diff options
author | Liam <byteslice@airmail.cc> | 2023-03-07 22:11:50 +0100 |
---|---|---|
committer | Liam <byteslice@airmail.cc> | 2023-03-13 03:09:09 +0100 |
commit | 6bfb4c8f713323bb39b7e38a779c35583fc61bcc (patch) | |
tree | 6208bf4bbd1c303811384c8fe3d600560a4d3bfe /src/core | |
parent | kernel: prefer std::addressof (diff) | |
download | yuzu-6bfb4c8f713323bb39b7e38a779c35583fc61bcc.tar yuzu-6bfb4c8f713323bb39b7e38a779c35583fc61bcc.tar.gz yuzu-6bfb4c8f713323bb39b7e38a779c35583fc61bcc.tar.bz2 yuzu-6bfb4c8f713323bb39b7e38a779c35583fc61bcc.tar.lz yuzu-6bfb4c8f713323bb39b7e38a779c35583fc61bcc.tar.xz yuzu-6bfb4c8f713323bb39b7e38a779c35583fc61bcc.tar.zst yuzu-6bfb4c8f713323bb39b7e38a779c35583fc61bcc.zip |
Diffstat (limited to '')
-rw-r--r-- | src/core/debugger/gdbstub.cpp | 12 | ||||
-rw-r--r-- | src/core/debugger/gdbstub_arch.cpp | 4 | ||||
-rw-r--r-- | src/core/hle/kernel/k_client_session.cpp | 2 | ||||
-rw-r--r-- | src/core/hle/kernel/k_condition_variable.cpp | 2 | ||||
-rw-r--r-- | src/core/hle/kernel/k_condition_variable.h | 8 | ||||
-rw-r--r-- | src/core/hle/kernel/k_process.cpp | 1 | ||||
-rw-r--r-- | src/core/hle/kernel/k_scheduler.cpp | 16 | ||||
-rw-r--r-- | src/core/hle/kernel/k_server_session.cpp | 4 | ||||
-rw-r--r-- | src/core/hle/kernel/k_thread.cpp | 483 | ||||
-rw-r--r-- | src/core/hle/kernel/k_thread.h | 544 | ||||
-rw-r--r-- | src/core/hle/kernel/kernel.cpp | 3 | ||||
-rw-r--r-- | src/core/hle/kernel/svc/svc_thread.cpp | 5 | ||||
-rw-r--r-- | src/core/hle/service/hle_ipc.cpp | 2 |
13 files changed, 518 insertions, 568 deletions
diff --git a/src/core/debugger/gdbstub.cpp b/src/core/debugger/gdbstub.cpp index 18afe97e1..f39f2ca29 100644 --- a/src/core/debugger/gdbstub.cpp +++ b/src/core/debugger/gdbstub.cpp @@ -421,7 +421,7 @@ void GDBStub::HandleBreakpointRemove(std::string_view command) { static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory& memory, const Kernel::KThread* thread) { // Read thread type from TLS - const VAddr tls_thread_type{memory.Read32(thread->GetTLSAddress() + 0x1fc)}; + const VAddr tls_thread_type{memory.Read32(thread->GetTlsAddress() + 0x1fc)}; const VAddr argument_thread_type{thread->GetArgument()}; if (argument_thread_type && tls_thread_type != argument_thread_type) { @@ -452,7 +452,7 @@ static std::optional<std::string> GetNameFromThreadType32(Core::Memory::Memory& static std::optional<std::string> GetNameFromThreadType64(Core::Memory::Memory& memory, const Kernel::KThread* thread) { // Read thread type from TLS - const VAddr tls_thread_type{memory.Read64(thread->GetTLSAddress() + 0x1f8)}; + const VAddr tls_thread_type{memory.Read64(thread->GetTlsAddress() + 0x1f8)}; const VAddr argument_thread_type{thread->GetArgument()}; if (argument_thread_type && tls_thread_type != argument_thread_type) { @@ -576,7 +576,7 @@ void GDBStub::HandleQuery(std::string_view command) { const auto& threads = system.ApplicationProcess()->GetThreadList(); std::vector<std::string> thread_ids; for (const auto& thread : threads) { - thread_ids.push_back(fmt::format("{:x}", thread->GetThreadID())); + thread_ids.push_back(fmt::format("{:x}", thread->GetThreadId())); } SendReply(fmt::format("m{}", fmt::join(thread_ids, ","))); } else if (command.starts_with("sThreadInfo")) { @@ -591,11 +591,11 @@ void GDBStub::HandleQuery(std::string_view command) { for (const auto* thread : threads) { auto thread_name{GetThreadName(system, thread)}; if (!thread_name) { - thread_name = fmt::format("Thread {:d}", thread->GetThreadID()); + thread_name = fmt::format("Thread {:d}", thread->GetThreadId()); } buffer += fmt::format(R"(<thread id="{:x}" core="{:d}" name="{}">{}</thread>)", - thread->GetThreadID(), thread->GetActiveCore(), + thread->GetThreadId(), thread->GetActiveCore(), EscapeXML(*thread_name), GetThreadState(thread)); } @@ -819,7 +819,7 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) { Kernel::KThread* GDBStub::GetThreadByID(u64 thread_id) { const auto& threads{system.ApplicationProcess()->GetThreadList()}; for (auto* thread : threads) { - if (thread->GetThreadID() == thread_id) { + if (thread->GetThreadId() == thread_id) { return thread; } } diff --git a/src/core/debugger/gdbstub_arch.cpp b/src/core/debugger/gdbstub_arch.cpp index 831c48513..75c94a91a 100644 --- a/src/core/debugger/gdbstub_arch.cpp +++ b/src/core/debugger/gdbstub_arch.cpp @@ -259,7 +259,7 @@ void GDBStubA64::WriteRegisters(Kernel::KThread* thread, std::string_view regist std::string GDBStubA64::ThreadStatus(const Kernel::KThread* thread, u8 signal) const { return fmt::format("T{:02x}{:02x}:{};{:02x}:{};{:02x}:{};thread:{:x};", signal, PC_REGISTER, RegRead(thread, PC_REGISTER), SP_REGISTER, RegRead(thread, SP_REGISTER), - LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadID()); + LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadId()); } u32 GDBStubA64::BreakpointInstruction() const { @@ -469,7 +469,7 @@ void GDBStubA32::WriteRegisters(Kernel::KThread* thread, std::string_view regist std::string GDBStubA32::ThreadStatus(const Kernel::KThread* thread, u8 signal) const { return fmt::format("T{:02x}{:02x}:{};{:02x}:{};{:02x}:{};thread:{:x};", signal, PC_REGISTER, RegRead(thread, PC_REGISTER), SP_REGISTER, RegRead(thread, SP_REGISTER), - LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadID()); + LR_REGISTER, RegRead(thread, LR_REGISTER), thread->GetThreadId()); } u32 GDBStubA32::BreakpointInstruction() const { diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp index 62a8fab45..d998b2be2 100644 --- a/src/core/hle/kernel/k_client_session.cpp +++ b/src/core/hle/kernel/k_client_session.cpp @@ -29,7 +29,7 @@ Result KClientSession::SendSyncRequest() { SCOPE_EXIT({ request->Close(); }); // Initialize the request. - request->Initialize(nullptr, GetCurrentThread(m_kernel).GetTLSAddress(), MessageBufferSize); + request->Initialize(nullptr, GetCurrentThread(m_kernel).GetTlsAddress(), MessageBufferSize); // Send the request. R_RETURN(m_parent->GetServerSession().OnRequest(request)); diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index 067f26fba..58b8609d8 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp @@ -177,7 +177,6 @@ Result KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) // Begin waiting. cur_thread->BeginWait(std::addressof(wait_queue)); cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); - cur_thread->SetMutexWaitAddressForDebugging(addr); } // Close our reference to the owner thread, now that the wait is over. @@ -324,7 +323,6 @@ Result KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { wait_queue.SetHardwareTimer(timer); cur_thread->BeginWait(std::addressof(wait_queue)); cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); - cur_thread->SetMutexWaitAddressForDebugging(addr); } // Get the wait result. diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h index 41635a894..fbd2c1fc0 100644 --- a/src/core/hle/kernel/k_condition_variable.h +++ b/src/core/hle/kernel/k_condition_variable.h @@ -41,16 +41,16 @@ private: ThreadTree m_tree{}; }; -inline void BeforeUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree, +inline void BeforeUpdatePriority(KernelCore& kernel, KConditionVariable::ThreadTree* tree, KThread* thread) { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); tree->erase(tree->iterator_to(*thread)); } -inline void AfterUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree, +inline void AfterUpdatePriority(KernelCore& kernel, KConditionVariable::ThreadTree* tree, KThread* thread) { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); tree->insert(*thread); } diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index b740fb1c3..fa3fc8c1c 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -52,7 +52,6 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority Handle thread_handle{}; owner_process.GetHandleTable().Add(std::addressof(thread_handle), thread); - thread->SetName("main"); thread->GetContext32().cpu_registers[0] = 0; thread->GetContext64().cpu_registers[0] = 0; thread->GetContext32().cpu_registers[1] = thread_handle; diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index fe371726c..ecadf2916 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -411,7 +411,7 @@ void KScheduler::ScheduleImpl() { m_switch_cur_thread = cur_thread; m_switch_highest_priority_thread = highest_priority_thread; m_switch_from_schedule = true; - Common::Fiber::YieldTo(cur_thread->host_context, *m_switch_fiber); + Common::Fiber::YieldTo(cur_thread->m_host_context, *m_switch_fiber); // Returning from ScheduleImpl occurs after this thread has been scheduled again. } @@ -450,7 +450,7 @@ void KScheduler::ScheduleImplFiber() { // We want to try to lock the highest priority thread's context. // Try to take it. - while (!highest_priority_thread->context_guard.try_lock()) { + while (!highest_priority_thread->m_context_guard.try_lock()) { // The highest priority thread's context is already locked. // Check if we need scheduling. If we don't, we can retry directly. if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { @@ -468,7 +468,7 @@ void KScheduler::ScheduleImplFiber() { if (m_state.needs_scheduling.load(std::memory_order_seq_cst)) { // Our switch failed. // We should unlock the thread context, and then retry. - highest_priority_thread->context_guard.unlock(); + highest_priority_thread->m_context_guard.unlock(); goto retry; } else { break; @@ -489,7 +489,7 @@ void KScheduler::ScheduleImplFiber() { Reload(highest_priority_thread); // Reload the host thread. - Common::Fiber::YieldTo(m_switch_fiber, *highest_priority_thread->host_context); + Common::Fiber::YieldTo(m_switch_fiber, *highest_priority_thread->m_host_context); } void KScheduler::Unload(KThread* thread) { @@ -497,13 +497,13 @@ void KScheduler::Unload(KThread* thread) { cpu_core.SaveContext(thread->GetContext32()); cpu_core.SaveContext(thread->GetContext64()); // Save the TPIDR_EL0 system register in case it was modified. - thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); + thread->SetTpidrEl0(cpu_core.GetTPIDR_EL0()); cpu_core.ClearExclusiveState(); // Check if the thread is terminated by checking the DPC flags. if ((thread->GetStackParameters().dpc_flags & static_cast<u32>(DpcFlag::Terminated)) == 0) { // The thread isn't terminated, so we want to unlock it. - thread->context_guard.unlock(); + thread->m_context_guard.unlock(); } } @@ -511,8 +511,8 @@ void KScheduler::Reload(KThread* thread) { auto& cpu_core = m_kernel.System().ArmInterface(m_core_id); cpu_core.LoadContext(thread->GetContext32()); cpu_core.LoadContext(thread->GetContext64()); - cpu_core.SetTlsAddress(thread->GetTLSAddress()); - cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); + cpu_core.SetTlsAddress(thread->GetTlsAddress()); + cpu_core.SetTPIDR_EL0(thread->GetTpidrEl0()); cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints()); cpu_core.ClearExclusiveState(); } diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp index 8376c5d76..2288ee435 100644 --- a/src/core/hle/kernel/k_server_session.cpp +++ b/src/core/hle/kernel/k_server_session.cpp @@ -226,7 +226,7 @@ Result KServerSession::SendReply(bool is_hle) { KThread* server_thread{GetCurrentThreadPointer(m_kernel)}; UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); - auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); + auto* src_msg_buffer = memory.GetPointer(server_thread->GetTlsAddress()); auto* dst_msg_buffer = memory.GetPointer(client_message); std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); } @@ -334,7 +334,7 @@ Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); auto* src_msg_buffer = memory.GetPointer(client_message); - auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); + auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTlsAddress()); std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); } diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 27616440c..2eee85258 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -35,15 +35,11 @@ #include "core/hle/result.h" #include "core/memory.h" -#ifdef ARCHITECTURE_x86_64 -#include "core/arm/dynarmic/arm_dynarmic_32.h" -#endif - namespace { constexpr inline s32 TerminatingThreadPriority = Kernel::Svc::SystemThreadPriorityHighest - 1; -static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, +static void ResetThreadContext32(Kernel::KThread::ThreadContext32& context, u32 stack_top, u32 entry_point, u32 arg) { context = {}; context.cpu_registers[0] = arg; @@ -52,7 +48,7 @@ static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, context.fpscr = 0; } -static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, VAddr stack_top, +static void ResetThreadContext64(Kernel::KThread::ThreadContext64& context, VAddr stack_top, VAddr entry_point, u64 arg) { context = {}; context.cpu_registers[0] = arg; @@ -95,13 +91,13 @@ public: } private: - KThread::WaiterList* m_wait_list; + KThread::WaiterList* m_wait_list{}; }; } // namespace KThread::KThread(KernelCore& kernel) - : KAutoObjectWithSlabHeapAndContainer{kernel}, activity_pause_lock{kernel} {} + : KAutoObjectWithSlabHeapAndContainer{kernel}, m_activity_pause_lock{kernel} {} KThread::~KThread() = default; Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, @@ -117,7 +113,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack ASSERT(0 <= phys_core && phys_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES)); // First, clear the TLS address. - tls_address = {}; + m_tls_address = {}; // Next, assert things based on the type. switch (type) { @@ -141,73 +137,73 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack ASSERT_MSG(false, "KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type)); break; } - thread_type = type; + m_thread_type = type; // Set the ideal core ID and affinity mask. - virtual_ideal_core_id = virt_core; - physical_ideal_core_id = phys_core; - virtual_affinity_mask = 1ULL << virt_core; - physical_affinity_mask.SetAffinity(phys_core, true); + m_virtual_ideal_core_id = virt_core; + m_physical_ideal_core_id = phys_core; + m_virtual_affinity_mask = 1ULL << virt_core; + m_physical_affinity_mask.SetAffinity(phys_core, true); // Set the thread state. - thread_state = (type == ThreadType::Main || type == ThreadType::Dummy) - ? ThreadState::Runnable - : ThreadState::Initialized; + m_thread_state = (type == ThreadType::Main || type == ThreadType::Dummy) + ? ThreadState::Runnable + : ThreadState::Initialized; // Set TLS address. - tls_address = 0; + m_tls_address = 0; // Set parent and condvar tree. - parent = nullptr; - condvar_tree = nullptr; + m_parent = nullptr; + m_condvar_tree = nullptr; // Set sync booleans. - signaled = false; - termination_requested = false; - wait_cancelled = false; - cancellable = false; + m_signaled = false; + m_termination_requested = false; + m_wait_cancelled = false; + m_cancellable = false; // Set core ID and wait result. - core_id = phys_core; - wait_result = ResultNoSynchronizationObject; + m_core_id = phys_core; + m_wait_result = ResultNoSynchronizationObject; // Set priorities. - priority = prio; - base_priority = prio; + m_priority = prio; + m_base_priority = prio; // Initialize sleeping queue. - wait_queue = nullptr; + m_wait_queue = nullptr; // Set suspend flags. - suspend_request_flags = 0; - suspend_allowed_flags = static_cast<u32>(ThreadState::SuspendFlagMask); + m_suspend_request_flags = 0; + m_suspend_allowed_flags = static_cast<u32>(ThreadState::SuspendFlagMask); // We're neither debug attached, nor are we nesting our priority inheritance. - debug_attached = false; - priority_inheritance_count = 0; + m_debug_attached = false; + m_priority_inheritance_count = 0; // We haven't been scheduled, and we have done no light IPC. - schedule_count = -1; - last_scheduled_tick = 0; - light_ipc_data = nullptr; + m_schedule_count = -1; + m_last_scheduled_tick = 0; + m_light_ipc_data = nullptr; // We're not waiting for a lock, and we haven't disabled migration. - waiting_lock_info = nullptr; - num_core_migration_disables = 0; + m_waiting_lock_info = nullptr; + m_num_core_migration_disables = 0; // We have no waiters, but we do have an entrypoint. - num_kernel_waiters = 0; + m_num_kernel_waiters = 0; // Set our current core id. - current_core_id = phys_core; + m_current_core_id = phys_core; // We haven't released our resource limit hint, and we've spent no time on the cpu. - resource_limit_release_hint = false; - cpu_time = 0; + m_resource_limit_release_hint = false; + m_cpu_time = 0; // Set debug context. - stack_top = user_stack_top; - argument = arg; + m_stack_top = user_stack_top; + m_argument = arg; // Clear our stack parameters. std::memset(static_cast<void*>(std::addressof(GetStackParameters())), 0, @@ -217,34 +213,34 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack if (owner != nullptr) { // Setup the TLS, if needed. if (type == ThreadType::User) { - R_TRY(owner->CreateThreadLocalRegion(std::addressof(tls_address))); + R_TRY(owner->CreateThreadLocalRegion(std::addressof(m_tls_address))); } - parent = owner; - parent->Open(); + m_parent = owner; + m_parent->Open(); } // Initialize thread context. - ResetThreadContext64(thread_context_64, user_stack_top, func, arg); - ResetThreadContext32(thread_context_32, static_cast<u32>(user_stack_top), + ResetThreadContext64(m_thread_context_64, user_stack_top, func, arg); + ResetThreadContext32(m_thread_context_32, static_cast<u32>(user_stack_top), static_cast<u32>(func), static_cast<u32>(arg)); // Setup the stack parameters. - StackParameters& sp = GetStackParameters(); + StackParameters& sp = this->GetStackParameters(); sp.cur_thread = this; sp.disable_count = 1; - SetInExceptionHandler(); + this->SetInExceptionHandler(); // Set thread ID. - thread_id = m_kernel.CreateNewThreadID(); + m_thread_id = m_kernel.CreateNewThreadID(); // We initialized! - initialized = true; + m_initialized = true; // Register ourselves with our parent process. - if (parent != nullptr) { - parent->RegisterThread(this); - if (parent->IsSuspended()) { + if (m_parent != nullptr) { + m_parent->RegisterThread(this); + if (m_parent->IsSuspended()) { RequestSuspend(SuspendType::Process); } } @@ -259,8 +255,7 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_ R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); // Initialize emulation parameters. - thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func)); - thread->is_single_core = !Settings::values.use_multi_core.GetValue(); + thread->m_host_context = std::make_shared<Common::Fiber>(std::move(init_func)); R_SUCCEED(); } @@ -270,7 +265,7 @@ Result KThread::InitializeDummyThread(KThread* thread, KProcess* owner) { R_TRY(thread->Initialize({}, {}, {}, DummyThreadPriority, 3, owner, ThreadType::Dummy)); // Initialize emulation parameters. - thread->stack_parameters.disable_count = 0; + thread->m_stack_parameters.disable_count = 0; R_SUCCEED(); } @@ -331,25 +326,25 @@ void KThread::PostDestroy(uintptr_t arg) { void KThread::Finalize() { // If the thread has an owner process, unregister it. - if (parent != nullptr) { - parent->UnregisterThread(this); + if (m_parent != nullptr) { + m_parent->UnregisterThread(this); } // If the thread has a local region, delete it. - if (tls_address != 0) { - ASSERT(parent->DeleteThreadLocalRegion(tls_address).IsSuccess()); + if (m_tls_address != 0) { + ASSERT(m_parent->DeleteThreadLocalRegion(m_tls_address).IsSuccess()); } // Release any waiters. { - ASSERT(waiting_lock_info == nullptr); + ASSERT(m_waiting_lock_info == nullptr); KScopedSchedulerLock sl{m_kernel}; // Check that we have no kernel waiters. - ASSERT(num_kernel_waiters == 0); + ASSERT(m_num_kernel_waiters == 0); - auto it = held_lock_info_list.begin(); - while (it != held_lock_info_list.end()) { + auto it = m_held_lock_info_list.begin(); + while (it != m_held_lock_info_list.end()) { // Get the lock info. auto* const lock_info = std::addressof(*it); @@ -371,7 +366,7 @@ void KThread::Finalize() { } // Remove the held lock from our list. - it = held_lock_info_list.erase(it); + it = m_held_lock_info_list.erase(it); // Free the lock info. LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info); @@ -379,58 +374,58 @@ void KThread::Finalize() { } // Release host emulation members. - host_context.reset(); + m_host_context.reset(); // Perform inherited finalization. KSynchronizationObject::Finalize(); } bool KThread::IsSignaled() const { - return signaled; + return m_signaled; } void KThread::OnTimer() { - ASSERT(m_kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); // If we're waiting, cancel the wait. - if (GetState() == ThreadState::Waiting) { - wait_queue->CancelWait(this, ResultTimedOut, false); + if (this->GetState() == ThreadState::Waiting) { + m_wait_queue->CancelWait(this, ResultTimedOut, false); } } void KThread::StartTermination() { - ASSERT(m_kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); // Release user exception and unpin, if relevant. - if (parent != nullptr) { - parent->ReleaseUserException(this); - if (parent->GetPinnedThread(GetCurrentCoreId(m_kernel)) == this) { - parent->UnpinCurrentThread(core_id); + if (m_parent != nullptr) { + m_parent->ReleaseUserException(this); + if (m_parent->GetPinnedThread(GetCurrentCoreId(m_kernel)) == this) { + m_parent->UnpinCurrentThread(m_core_id); } } // Set state to terminated. - SetState(ThreadState::Terminated); + this->SetState(ThreadState::Terminated); // Clear the thread's status as running in parent. - if (parent != nullptr) { - parent->ClearRunningThread(this); + if (m_parent != nullptr) { + m_parent->ClearRunningThread(this); } // Signal. - signaled = true; + m_signaled = true; KSynchronizationObject::NotifyAvailable(); // Clear previous thread in KScheduler. KScheduler::ClearPreviousThread(m_kernel, this); // Register terminated dpc flag. - RegisterDpc(DpcFlag::Terminated); + this->RegisterDpc(DpcFlag::Terminated); } void KThread::FinishTermination() { // Ensure that the thread is not executing on any core. - if (parent != nullptr) { + if (m_parent != nullptr) { for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) { KThread* core_thread{}; do { @@ -449,75 +444,76 @@ void KThread::DoWorkerTaskImpl() { } void KThread::Pin(s32 current_core) { - ASSERT(m_kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); // Set ourselves as pinned. GetStackParameters().is_pinned = true; // Disable core migration. - ASSERT(num_core_migration_disables == 0); + ASSERT(m_num_core_migration_disables == 0); { - ++num_core_migration_disables; + ++m_num_core_migration_disables; // Save our ideal state to restore when we're unpinned. - original_physical_ideal_core_id = physical_ideal_core_id; - original_physical_affinity_mask = physical_affinity_mask; + m_original_physical_ideal_core_id = m_physical_ideal_core_id; + m_original_physical_affinity_mask = m_physical_affinity_mask; // Bind ourselves to this core. - const s32 active_core = GetActiveCore(); + const s32 active_core = this->GetActiveCore(); - SetActiveCore(current_core); - physical_ideal_core_id = current_core; - physical_affinity_mask.SetAffinityMask(1ULL << current_core); + this->SetActiveCore(current_core); + m_physical_ideal_core_id = current_core; + m_physical_affinity_mask.SetAffinityMask(1ULL << current_core); - if (active_core != current_core || physical_affinity_mask.GetAffinityMask() != - original_physical_affinity_mask.GetAffinityMask()) { - KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, original_physical_affinity_mask, - active_core); + if (active_core != current_core || + m_physical_affinity_mask.GetAffinityMask() != + m_original_physical_affinity_mask.GetAffinityMask()) { + KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, + m_original_physical_affinity_mask, active_core); } } // Disallow performing thread suspension. { // Update our allow flags. - suspend_allowed_flags &= ~(1 << (static_cast<u32>(SuspendType::Thread) + - static_cast<u32>(ThreadState::SuspendShift))); + m_suspend_allowed_flags &= ~(1 << (static_cast<u32>(SuspendType::Thread) + + static_cast<u32>(ThreadState::SuspendShift))); // Update our state. - UpdateState(); + this->UpdateState(); } // TODO(bunnei): Update our SVC access permissions. - ASSERT(parent != nullptr); + ASSERT(m_parent != nullptr); } void KThread::Unpin() { - ASSERT(m_kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); // Set ourselves as unpinned. - GetStackParameters().is_pinned = false; + this->GetStackParameters().is_pinned = false; // Enable core migration. - ASSERT(num_core_migration_disables == 1); + ASSERT(m_num_core_migration_disables == 1); { - num_core_migration_disables--; + m_num_core_migration_disables--; // Restore our original state. - const KAffinityMask old_mask = physical_affinity_mask; + const KAffinityMask old_mask = m_physical_affinity_mask; - physical_ideal_core_id = original_physical_ideal_core_id; - physical_affinity_mask = original_physical_affinity_mask; + m_physical_ideal_core_id = m_original_physical_ideal_core_id; + m_physical_affinity_mask = m_original_physical_affinity_mask; - if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { - const s32 active_core = GetActiveCore(); + if (m_physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { + const s32 active_core = this->GetActiveCore(); - if (!physical_affinity_mask.GetAffinity(active_core)) { - if (physical_ideal_core_id >= 0) { - SetActiveCore(physical_ideal_core_id); + if (!m_physical_affinity_mask.GetAffinity(active_core)) { + if (m_physical_ideal_core_id >= 0) { + this->SetActiveCore(m_physical_ideal_core_id); } else { - SetActiveCore(static_cast<s32>( + this->SetActiveCore(static_cast<s32>( Common::BitSize<u64>() - 1 - - std::countl_zero(physical_affinity_mask.GetAffinityMask()))); + std::countl_zero(m_physical_affinity_mask.GetAffinityMask()))); } } KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, old_mask, active_core); @@ -525,106 +521,106 @@ void KThread::Unpin() { } // Allow performing thread suspension (if termination hasn't been requested). - if (!IsTerminationRequested()) { + if (!this->IsTerminationRequested()) { // Update our allow flags. - suspend_allowed_flags |= (1 << (static_cast<u32>(SuspendType::Thread) + - static_cast<u32>(ThreadState::SuspendShift))); + m_suspend_allowed_flags |= (1 << (static_cast<u32>(SuspendType::Thread) + + static_cast<u32>(ThreadState::SuspendShift))); // Update our state. - UpdateState(); + this->UpdateState(); } // TODO(bunnei): Update our SVC access permissions. - ASSERT(parent != nullptr); + ASSERT(m_parent != nullptr); // Resume any threads that began waiting on us while we were pinned. - for (auto it = pinned_waiter_list.begin(); it != pinned_waiter_list.end(); ++it) { + for (auto it = m_pinned_waiter_list.begin(); it != m_pinned_waiter_list.end(); ++it) { it->EndWait(ResultSuccess); } } u16 KThread::GetUserDisableCount() const { - if (!IsUserThread()) { + if (!this->IsUserThread()) { // We only emulate TLS for user threads return {}; } auto& memory = m_kernel.System().Memory(); - return memory.Read16(tls_address + offsetof(ThreadLocalRegion, disable_count)); + return memory.Read16(m_tls_address + offsetof(ThreadLocalRegion, disable_count)); } void KThread::SetInterruptFlag() { - if (!IsUserThread()) { + if (!this->IsUserThread()) { // We only emulate TLS for user threads return; } auto& memory = m_kernel.System().Memory(); - memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 1); + memory.Write16(m_tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 1); } void KThread::ClearInterruptFlag() { - if (!IsUserThread()) { + if (!this->IsUserThread()) { // We only emulate TLS for user threads return; } auto& memory = m_kernel.System().Memory(); - memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0); + memory.Write16(m_tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0); } Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { KScopedSchedulerLock sl{m_kernel}; // Get the virtual mask. - *out_ideal_core = virtual_ideal_core_id; - *out_affinity_mask = virtual_affinity_mask; + *out_ideal_core = m_virtual_ideal_core_id; + *out_affinity_mask = m_virtual_affinity_mask; R_SUCCEED(); } Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { KScopedSchedulerLock sl{m_kernel}; - ASSERT(num_core_migration_disables >= 0); + ASSERT(m_num_core_migration_disables >= 0); // Select between core mask and original core mask. - if (num_core_migration_disables == 0) { - *out_ideal_core = physical_ideal_core_id; - *out_affinity_mask = physical_affinity_mask.GetAffinityMask(); + if (m_num_core_migration_disables == 0) { + *out_ideal_core = m_physical_ideal_core_id; + *out_affinity_mask = m_physical_affinity_mask.GetAffinityMask(); } else { - *out_ideal_core = original_physical_ideal_core_id; - *out_affinity_mask = original_physical_affinity_mask.GetAffinityMask(); + *out_ideal_core = m_original_physical_ideal_core_id; + *out_affinity_mask = m_original_physical_affinity_mask.GetAffinityMask(); } R_SUCCEED(); } -Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { - ASSERT(parent != nullptr); +Result KThread::SetCoreMask(s32 core_id, u64 v_affinity_mask) { + ASSERT(m_parent != nullptr); ASSERT(v_affinity_mask != 0); - KScopedLightLock lk(activity_pause_lock); + KScopedLightLock lk(m_activity_pause_lock); // Set the core mask. u64 p_affinity_mask = 0; { KScopedSchedulerLock sl(m_kernel); - ASSERT(num_core_migration_disables >= 0); + ASSERT(m_num_core_migration_disables >= 0); // If we're updating, set our ideal virtual core. - if (core_id_ != Svc::IdealCoreNoUpdate) { - virtual_ideal_core_id = core_id_; + if (core_id != Svc::IdealCoreNoUpdate) { + m_virtual_ideal_core_id = core_id; } else { // Preserve our ideal core id. - core_id_ = virtual_ideal_core_id; - R_UNLESS(((1ULL << core_id_) & v_affinity_mask) != 0, ResultInvalidCombination); + core_id = m_virtual_ideal_core_id; + R_UNLESS(((1ULL << core_id) & v_affinity_mask) != 0, ResultInvalidCombination); } // Set our affinity mask. - virtual_affinity_mask = v_affinity_mask; + m_virtual_affinity_mask = v_affinity_mask; // Translate the virtual core to a physical core. - if (core_id_ >= 0) { - core_id_ = Core::Hardware::VirtualToPhysicalCoreMap[core_id_]; + if (core_id >= 0) { + core_id = Core::Hardware::VirtualToPhysicalCoreMap[core_id]; } // Translate the virtual affinity mask to a physical one. @@ -635,35 +631,35 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { } // If we haven't disabled migration, perform an affinity change. - if (num_core_migration_disables == 0) { - const KAffinityMask old_mask = physical_affinity_mask; + if (m_num_core_migration_disables == 0) { + const KAffinityMask old_mask = m_physical_affinity_mask; // Set our new ideals. - physical_ideal_core_id = core_id_; - physical_affinity_mask.SetAffinityMask(p_affinity_mask); + m_physical_ideal_core_id = core_id; + m_physical_affinity_mask.SetAffinityMask(p_affinity_mask); - if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { + if (m_physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { const s32 active_core = GetActiveCore(); - if (active_core >= 0 && !physical_affinity_mask.GetAffinity(active_core)) { + if (active_core >= 0 && !m_physical_affinity_mask.GetAffinity(active_core)) { const s32 new_core = static_cast<s32>( - physical_ideal_core_id >= 0 - ? physical_ideal_core_id + m_physical_ideal_core_id >= 0 + ? m_physical_ideal_core_id : Common::BitSize<u64>() - 1 - - std::countl_zero(physical_affinity_mask.GetAffinityMask())); + std::countl_zero(m_physical_affinity_mask.GetAffinityMask())); SetActiveCore(new_core); } KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, old_mask, active_core); } } else { // Otherwise, we edit the original affinity for restoration later. - original_physical_ideal_core_id = core_id_; - original_physical_affinity_mask.SetAffinityMask(p_affinity_mask); + m_original_physical_ideal_core_id = core_id; + m_original_physical_affinity_mask.SetAffinityMask(p_affinity_mask); } } // Update the pinned waiter list. - ThreadQueueImplForKThreadSetProperty wait_queue_(m_kernel, std::addressof(pinned_waiter_list)); + ThreadQueueImplForKThreadSetProperty wait_queue(m_kernel, std::addressof(m_pinned_waiter_list)); { bool retry_update{}; do { @@ -671,7 +667,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { KScopedSchedulerLock sl(m_kernel); // Don't do any further management if our termination has been requested. - R_SUCCEED_IF(IsTerminationRequested()); + R_SUCCEED_IF(this->IsTerminationRequested()); // By default, we won't need to retry. retry_update = false; @@ -691,14 +687,14 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { // new mask. if (thread_is_current && ((1ULL << thread_core) & p_affinity_mask) == 0) { // If the thread is pinned, we want to wait until it's not pinned. - if (GetStackParameters().is_pinned) { + if (this->GetStackParameters().is_pinned) { // Verify that the current thread isn't terminating. R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested); // Wait until the thread isn't pinned any more. - pinned_waiter_list.push_back(GetCurrentThread(m_kernel)); - GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue_)); + m_pinned_waiter_list.push_back(GetCurrentThread(m_kernel)); + GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue)); } else { // If the thread isn't pinned, release the scheduler lock and retry until it's // not current. @@ -717,24 +713,24 @@ void KThread::SetBasePriority(s32 value) { KScopedSchedulerLock sl{m_kernel}; // Change our base priority. - base_priority = value; + m_base_priority = value; // Perform a priority restoration. RestorePriority(m_kernel, this); } KThread* KThread::GetLockOwner() const { - return waiting_lock_info != nullptr ? waiting_lock_info->GetOwner() : nullptr; + return m_waiting_lock_info != nullptr ? m_waiting_lock_info->GetOwner() : nullptr; } -void KThread::IncreaseBasePriority(s32 priority_) { - ASSERT(Svc::HighestThreadPriority <= priority_ && priority_ <= Svc::LowestThreadPriority); +void KThread::IncreaseBasePriority(s32 priority) { + ASSERT(Svc::HighestThreadPriority <= priority && priority <= Svc::LowestThreadPriority); ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); ASSERT(!this->GetStackParameters().is_pinned); // Set our base priority. - if (base_priority > priority_) { - base_priority = priority_; + if (m_base_priority > priority) { + m_base_priority = priority; // Perform a priority restoration. RestorePriority(m_kernel, this); @@ -745,19 +741,19 @@ void KThread::RequestSuspend(SuspendType type) { KScopedSchedulerLock sl{m_kernel}; // Note the request in our flags. - suspend_request_flags |= - (1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type))); + m_suspend_request_flags |= + (1U << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type))); // Try to perform the suspend. - TrySuspend(); + this->TrySuspend(); } void KThread::Resume(SuspendType type) { KScopedSchedulerLock sl{m_kernel}; // Clear the request in our flags. - suspend_request_flags &= - ~(1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type))); + m_suspend_request_flags &= + ~(1U << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type))); // Update our state. this->UpdateState(); @@ -767,17 +763,17 @@ void KThread::WaitCancel() { KScopedSchedulerLock sl{m_kernel}; // Check if we're waiting and cancellable. - if (this->GetState() == ThreadState::Waiting && cancellable) { - wait_cancelled = false; - wait_queue->CancelWait(this, ResultCancelled, true); + if (this->GetState() == ThreadState::Waiting && m_cancellable) { + m_wait_cancelled = false; + m_wait_queue->CancelWait(this, ResultCancelled, true); } else { // Otherwise, note that we cancelled a wait. - wait_cancelled = true; + m_wait_cancelled = true; } } void KThread::TrySuspend() { - ASSERT(m_kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); ASSERT(IsSuspendRequested()); // Ensure that we have no waiters. @@ -791,13 +787,13 @@ void KThread::TrySuspend() { } void KThread::UpdateState() { - ASSERT(m_kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); // Set our suspend flags in state. - const ThreadState old_state = thread_state.load(std::memory_order_relaxed); + const ThreadState old_state = m_thread_state.load(std::memory_order_relaxed); const auto new_state = static_cast<ThreadState>(this->GetSuspendFlags()) | (old_state & ThreadState::Mask); - thread_state.store(new_state, std::memory_order_relaxed); + m_thread_state.store(new_state, std::memory_order_relaxed); // Note the state change in scheduler. if (new_state != old_state) { @@ -806,11 +802,11 @@ void KThread::UpdateState() { } void KThread::Continue() { - ASSERT(m_kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); // Clear our suspend flags in state. - const ThreadState old_state = thread_state.load(std::memory_order_relaxed); - thread_state.store(old_state & ThreadState::Mask, std::memory_order_relaxed); + const ThreadState old_state = m_thread_state.load(std::memory_order_relaxed); + m_thread_state.store(old_state & ThreadState::Mask, std::memory_order_relaxed); // Note the state change in scheduler. KScheduler::OnThreadStateChanged(m_kernel, this, old_state); @@ -839,7 +835,7 @@ void KThread::CloneFpuStatus() { Result KThread::SetActivity(Svc::ThreadActivity activity) { // Lock ourselves. - KScopedLightLock lk(activity_pause_lock); + KScopedLightLock lk(m_activity_pause_lock); // Set the activity. { @@ -871,10 +867,10 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) { // If the thread is now paused, update the pinned waiter list. if (activity == Svc::ThreadActivity::Paused) { - ThreadQueueImplForKThreadSetProperty wait_queue_(m_kernel, - std::addressof(pinned_waiter_list)); + ThreadQueueImplForKThreadSetProperty wait_queue(m_kernel, + std::addressof(m_pinned_waiter_list)); - bool thread_is_current; + bool thread_is_current{}; do { // Lock the scheduler. KScopedSchedulerLock sl(m_kernel); @@ -892,8 +888,8 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) { ResultTerminationRequested); // Wait until the thread isn't pinned any more. - pinned_waiter_list.push_back(GetCurrentThread(m_kernel)); - GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue_)); + m_pinned_waiter_list.push_back(GetCurrentThread(m_kernel)); + GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue)); } else { // Check if the thread is currently running. // If it is, we'll need to retry. @@ -912,7 +908,7 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) { Result KThread::GetThreadContext3(std::vector<u8>& out) { // Lock ourselves. - KScopedLightLock lk{activity_pause_lock}; + KScopedLightLock lk{m_activity_pause_lock}; // Get the context. { @@ -923,8 +919,8 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) { R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState); // If we're not terminating, get the thread's user context. - if (!IsTerminationRequested()) { - if (parent->Is64BitProcess()) { + if (!this->IsTerminationRequested()) { + if (m_parent->Is64BitProcess()) { // Mask away mode bits, interrupt bits, IL bit, and other reserved bits. auto context = GetContext64(); context.pstate &= 0xFF0FFE20; @@ -952,7 +948,7 @@ void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) { lock_info->SetOwner(this); // Add the lock to our held list. - held_lock_info_list.push_front(*lock_info); + m_held_lock_info_list.push_front(*lock_info); } KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_key_, @@ -960,7 +956,7 @@ KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_ke ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); // Try to find an existing held lock. - for (auto& held_lock : held_lock_info_list) { + for (auto& held_lock : m_held_lock_info_list) { if (held_lock.GetAddressKey() == address_key_ && held_lock.GetIsKernelAddressKey() == is_kernel_address_key_) { return std::addressof(held_lock); @@ -975,21 +971,21 @@ void KThread::AddWaiterImpl(KThread* thread) { ASSERT(thread->GetConditionVariableTree() == nullptr); // Get the thread's address key. - const auto address_key_ = thread->GetAddressKey(); - const auto is_kernel_address_key_ = thread->GetIsKernelAddressKey(); + const auto address_key = thread->GetAddressKey(); + const auto is_kernel_address_key = thread->GetIsKernelAddressKey(); // Keep track of how many kernel waiters we have. - if (is_kernel_address_key_) { - ASSERT((num_kernel_waiters++) >= 0); + if (is_kernel_address_key) { + ASSERT((m_num_kernel_waiters++) >= 0); KScheduler::SetSchedulerUpdateNeeded(m_kernel); } // Get the relevant lock info. - auto* lock_info = this->FindHeldLock(address_key_, is_kernel_address_key_); + auto* lock_info = this->FindHeldLock(address_key, is_kernel_address_key); if (lock_info == nullptr) { // Create a new lock for the address key. lock_info = - LockWithPriorityInheritanceInfo::Create(m_kernel, address_key_, is_kernel_address_key_); + LockWithPriorityInheritanceInfo::Create(m_kernel, address_key, is_kernel_address_key); // Add the new lock to our list. this->AddHeldLock(lock_info); @@ -1004,7 +1000,7 @@ void KThread::RemoveWaiterImpl(KThread* thread) { // Keep track of how many kernel waiters we have. if (thread->GetIsKernelAddressKey()) { - ASSERT((num_kernel_waiters--) > 0); + ASSERT((m_num_kernel_waiters--) > 0); KScheduler::SetSchedulerUpdateNeeded(m_kernel); } @@ -1014,7 +1010,7 @@ void KThread::RemoveWaiterImpl(KThread* thread) { // Remove the waiter. if (lock_info->RemoveWaiter(thread)) { - held_lock_info_list.erase(held_lock_info_list.iterator_to(*lock_info)); + m_held_lock_info_list.erase(m_held_lock_info_list.iterator_to(*lock_info)); LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info); } } @@ -1025,7 +1021,7 @@ void KThread::RestorePriority(KernelCore& kernel, KThread* thread) { while (thread != nullptr) { // We want to inherit priority where possible. s32 new_priority = thread->GetBasePriority(); - for (const auto& held_lock : thread->held_lock_info_list) { + for (const auto& held_lock : thread->m_held_lock_info_list) { new_priority = std::min(new_priority, held_lock.GetHighestPriorityWaiter()->GetPriority()); } @@ -1102,12 +1098,12 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke } // Remove the lock info from our held list. - held_lock_info_list.erase(held_lock_info_list.iterator_to(*lock_info)); + m_held_lock_info_list.erase(m_held_lock_info_list.iterator_to(*lock_info)); // Keep track of how many kernel waiters we have. if (lock_info->GetIsKernelAddressKey()) { - num_kernel_waiters -= lock_info->GetWaiterCount(); - ASSERT(num_kernel_waiters >= 0); + m_num_kernel_waiters -= lock_info->GetWaiterCount(); + ASSERT(m_num_kernel_waiters >= 0); KScheduler::SetSchedulerUpdateNeeded(m_kernel); } @@ -1130,8 +1126,8 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke // Keep track of any kernel waiters for the new owner. if (lock_info->GetIsKernelAddressKey()) { - next_lock_owner->num_kernel_waiters += lock_info->GetWaiterCount(); - ASSERT(next_lock_owner->num_kernel_waiters > 0); + next_lock_owner->m_num_kernel_waiters += lock_info->GetWaiterCount(); + ASSERT(next_lock_owner->m_num_kernel_waiters > 0); // NOTE: No need to set scheduler update needed, because we will have already done so // when removing earlier. @@ -1156,11 +1152,11 @@ Result KThread::Run() { KScopedSchedulerLock lk{m_kernel}; // If either this thread or the current thread are requesting termination, note it. - R_UNLESS(!IsTerminationRequested(), ResultTerminationRequested); + R_UNLESS(!this->IsTerminationRequested(), ResultTerminationRequested); R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested); // Ensure our thread state is correct. - R_UNLESS(GetState() == ThreadState::Initialized, ResultInvalidState); + R_UNLESS(this->GetState() == ThreadState::Initialized, ResultInvalidState); // If the current thread has been asked to suspend, suspend it and retry. if (GetCurrentThread(m_kernel).IsSuspended()) { @@ -1177,7 +1173,7 @@ Result KThread::Run() { } // Set our state and finish. - SetState(ThreadState::Runnable); + this->SetState(ThreadState::Runnable); R_SUCCEED(); } @@ -1187,10 +1183,10 @@ void KThread::Exit() { ASSERT(this == GetCurrentThreadPointer(m_kernel)); // Release the thread resource hint, running thread count from parent. - if (parent != nullptr) { - parent->GetResourceLimit()->Release(Kernel::LimitableResource::ThreadCountMax, 0, 1); - resource_limit_release_hint = true; - parent->DecrementRunningThreadCount(); + if (m_parent != nullptr) { + m_parent->GetResourceLimit()->Release(Kernel::LimitableResource::ThreadCountMax, 0, 1); + m_resource_limit_release_hint = true; + m_parent->DecrementRunningThreadCount(); } // Perform termination. @@ -1198,11 +1194,11 @@ void KThread::Exit() { KScopedSchedulerLock sl{m_kernel}; // Disallow all suspension. - suspend_allowed_flags = 0; + m_suspend_allowed_flags = 0; this->UpdateState(); // Disallow all suspension. - suspend_allowed_flags = 0; + m_suspend_allowed_flags = 0; // Start termination. StartTermination(); @@ -1238,14 +1234,14 @@ ThreadState KThread::RequestTerminate() { const bool first_request = [&]() -> bool { // Perform an atomic compare-and-swap from false to true. bool expected = false; - return termination_requested.compare_exchange_strong(expected, true); + return m_termination_requested.compare_exchange_strong(expected, true); }(); // If this is the first request, start termination procedure. if (first_request) { // If the thread is in initialized state, just change state to terminated. if (this->GetState() == ThreadState::Initialized) { - thread_state = ThreadState::Terminated; + m_thread_state = ThreadState::Terminated; return ThreadState::Terminated; } @@ -1259,7 +1255,7 @@ ThreadState KThread::RequestTerminate() { // If the thread is suspended, continue it. if (this->IsSuspended()) { - suspend_allowed_flags = 0; + m_suspend_allowed_flags = 0; this->UpdateState(); } @@ -1268,7 +1264,7 @@ ThreadState KThread::RequestTerminate() { // If the thread is runnable, send a termination interrupt to other cores. if (this->GetState() == ThreadState::Runnable) { - if (const u64 core_mask = physical_affinity_mask.GetAffinityMask() & + if (const u64 core_mask = m_physical_affinity_mask.GetAffinityMask() & ~(1ULL << GetCurrentCoreId(m_kernel)); core_mask != 0) { Kernel::KInterruptManager::SendInterProcessorInterrupt(m_kernel, core_mask); @@ -1277,7 +1273,7 @@ ThreadState KThread::RequestTerminate() { // Wake up the thread. if (this->GetState() == ThreadState::Waiting) { - wait_queue->CancelWait(this, ResultTerminationRequested, true); + m_wait_queue->CancelWait(this, ResultTerminationRequested, true); } } @@ -1285,7 +1281,7 @@ ThreadState KThread::RequestTerminate() { } Result KThread::Sleep(s64 timeout) { - ASSERT(!m_kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(!KScheduler::IsSchedulerLockedByCurrentThread(m_kernel)); ASSERT(this == GetCurrentThreadPointer(m_kernel)); ASSERT(timeout > 0); @@ -1315,7 +1311,7 @@ void KThread::RequestDummyThreadWait() { ASSERT(this->IsDummyThread()); // We will block when the scheduler lock is released. - dummy_thread_runnable.store(false); + m_dummy_thread_runnable.store(false); } void KThread::DummyThreadBeginWait() { @@ -1325,7 +1321,7 @@ void KThread::DummyThreadBeginWait() { } // Block until runnable is no longer false. - dummy_thread_runnable.wait(false); + m_dummy_thread_runnable.wait(false); } void KThread::DummyThreadEndWait() { @@ -1333,8 +1329,8 @@ void KThread::DummyThreadEndWait() { ASSERT(this->IsDummyThread()); // Wake up the waiting thread. - dummy_thread_runnable.store(true); - dummy_thread_runnable.notify_one(); + m_dummy_thread_runnable.store(true); + m_dummy_thread_runnable.notify_one(); } void KThread::BeginWait(KThreadQueue* queue) { @@ -1342,42 +1338,42 @@ void KThread::BeginWait(KThreadQueue* queue) { SetState(ThreadState::Waiting); // Set our wait queue. - wait_queue = queue; + m_wait_queue = queue; } -void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_) { +void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result) { // Lock the scheduler. KScopedSchedulerLock sl(m_kernel); // If we're waiting, notify our queue that we're available. - if (GetState() == ThreadState::Waiting) { - wait_queue->NotifyAvailable(this, signaled_object, wait_result_); + if (this->GetState() == ThreadState::Waiting) { + m_wait_queue->NotifyAvailable(this, signaled_object, wait_result); } } -void KThread::EndWait(Result wait_result_) { +void KThread::EndWait(Result wait_result) { // Lock the scheduler. KScopedSchedulerLock sl(m_kernel); // If we're waiting, notify our queue that we're available. - if (GetState() == ThreadState::Waiting) { - if (wait_queue == nullptr) { + if (this->GetState() == ThreadState::Waiting) { + if (m_wait_queue == nullptr) { // This should never happen, but avoid a hard crash below to get this logged. ASSERT_MSG(false, "wait_queue is nullptr!"); return; } - wait_queue->EndWait(this, wait_result_); + m_wait_queue->EndWait(this, wait_result); } } -void KThread::CancelWait(Result wait_result_, bool cancel_timer_task) { +void KThread::CancelWait(Result wait_result, bool cancel_timer_task) { // Lock the scheduler. KScopedSchedulerLock sl(m_kernel); // If we're waiting, notify our queue that we're available. - if (GetState() == ThreadState::Waiting) { - wait_queue->CancelWait(this, wait_result_, cancel_timer_task); + if (this->GetState() == ThreadState::Waiting) { + m_wait_queue->CancelWait(this, wait_result, cancel_timer_task); } } @@ -1385,20 +1381,19 @@ void KThread::SetState(ThreadState state) { KScopedSchedulerLock sl{m_kernel}; // Clear debugging state - SetMutexWaitAddressForDebugging({}); SetWaitReasonForDebugging({}); - const ThreadState old_state = thread_state.load(std::memory_order_relaxed); - thread_state.store( + const ThreadState old_state = m_thread_state.load(std::memory_order_relaxed); + m_thread_state.store( static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)), std::memory_order_relaxed); - if (thread_state.load(std::memory_order_relaxed) != old_state) { + if (m_thread_state.load(std::memory_order_relaxed) != old_state) { KScheduler::OnThreadStateChanged(m_kernel, this, old_state); } } std::shared_ptr<Common::Fiber>& KThread::GetHostContext() { - return host_context; + return m_host_context; } void SetCurrentThread(KernelCore& kernel, KThread* thread) { diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index e541ea079..53fa64369 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -108,11 +108,11 @@ enum class StepState : u32 { }; void SetCurrentThread(KernelCore& kernel, KThread* thread); -[[nodiscard]] KThread* GetCurrentThreadPointer(KernelCore& kernel); -[[nodiscard]] KThread& GetCurrentThread(KernelCore& kernel); -[[nodiscard]] KProcess* GetCurrentProcessPointer(KernelCore& kernel); -[[nodiscard]] KProcess& GetCurrentProcess(KernelCore& kernel); -[[nodiscard]] s32 GetCurrentCoreId(KernelCore& kernel); +KThread* GetCurrentThreadPointer(KernelCore& kernel); +KThread& GetCurrentThread(KernelCore& kernel); +KProcess* GetCurrentProcessPointer(KernelCore& kernel); +KProcess& GetCurrentProcess(KernelCore& kernel); +s32 GetCurrentCoreId(KernelCore& kernel); class KThread final : public KAutoObjectWithSlabHeapAndContainer<KThread, KWorkerTask>, public boost::intrusive::list_base_hook<>, @@ -136,16 +136,12 @@ public: using ThreadContext64 = Core::ARM_Interface::ThreadContext64; using WaiterList = boost::intrusive::list<KThread>; - void SetName(std::string new_name) { - name = std::move(new_name); - } - /** * Gets the thread's current priority * @return The current thread's priority */ - [[nodiscard]] s32 GetPriority() const { - return priority; + s32 GetPriority() const { + return m_priority; } /** @@ -153,23 +149,23 @@ public: * @param priority The new priority. */ void SetPriority(s32 value) { - priority = value; + m_priority = value; } /** * Gets the thread's nominal priority. * @return The current thread's nominal priority. */ - [[nodiscard]] s32 GetBasePriority() const { - return base_priority; + s32 GetBasePriority() const { + return m_base_priority; } /** * Gets the thread's thread ID * @return The thread's ID */ - [[nodiscard]] u64 GetThreadID() const { - return thread_id; + u64 GetThreadId() const { + return m_thread_id; } void ContinueIfHasKernelWaiters() { @@ -180,7 +176,7 @@ public: void SetBasePriority(s32 value); - [[nodiscard]] Result Run(); + Result Run(); void Exit(); @@ -188,22 +184,22 @@ public: ThreadState RequestTerminate(); - [[nodiscard]] u32 GetSuspendFlags() const { - return suspend_allowed_flags & suspend_request_flags; + u32 GetSuspendFlags() const { + return m_suspend_allowed_flags & m_suspend_request_flags; } - [[nodiscard]] bool IsSuspended() const { + bool IsSuspended() const { return GetSuspendFlags() != 0; } - [[nodiscard]] bool IsSuspendRequested(SuspendType type) const { - return (suspend_request_flags & - (1u << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)))) != + bool IsSuspendRequested(SuspendType type) const { + return (m_suspend_request_flags & + (1U << (static_cast<u32>(ThreadState::SuspendShift) + static_cast<u32>(type)))) != 0; } - [[nodiscard]] bool IsSuspendRequested() const { - return suspend_request_flags != 0; + bool IsSuspendRequested() const { + return m_suspend_request_flags != 0; } void RequestSuspend(SuspendType type); @@ -217,124 +213,124 @@ public: void Continue(); constexpr void SetSyncedIndex(s32 index) { - synced_index = index; + m_synced_index = index; } - [[nodiscard]] constexpr s32 GetSyncedIndex() const { - return synced_index; + constexpr s32 GetSyncedIndex() const { + return m_synced_index; } constexpr void SetWaitResult(Result wait_res) { - wait_result = wait_res; + m_wait_result = wait_res; } - [[nodiscard]] constexpr Result GetWaitResult() const { - return wait_result; + constexpr Result GetWaitResult() const { + return m_wait_result; } /* * Returns the Thread Local Storage address of the current thread * @returns VAddr of the thread's TLS */ - [[nodiscard]] VAddr GetTLSAddress() const { - return tls_address; + VAddr GetTlsAddress() const { + return m_tls_address; } /* * Returns the value of the TPIDR_EL0 Read/Write system register for this thread. * @returns The value of the TPIDR_EL0 register. */ - [[nodiscard]] u64 GetTPIDR_EL0() const { - return thread_context_64.tpidr; + u64 GetTpidrEl0() const { + return m_thread_context_64.tpidr; } /// Sets the value of the TPIDR_EL0 Read/Write system register for this thread. - void SetTPIDR_EL0(u64 value) { - thread_context_64.tpidr = value; - thread_context_32.tpidr = static_cast<u32>(value); + void SetTpidrEl0(u64 value) { + m_thread_context_64.tpidr = value; + m_thread_context_32.tpidr = static_cast<u32>(value); } void CloneFpuStatus(); - [[nodiscard]] ThreadContext32& GetContext32() { - return thread_context_32; + ThreadContext32& GetContext32() { + return m_thread_context_32; } - [[nodiscard]] const ThreadContext32& GetContext32() const { - return thread_context_32; + const ThreadContext32& GetContext32() const { + return m_thread_context_32; } - [[nodiscard]] ThreadContext64& GetContext64() { - return thread_context_64; + ThreadContext64& GetContext64() { + return m_thread_context_64; } - [[nodiscard]] const ThreadContext64& GetContext64() const { - return thread_context_64; + const ThreadContext64& GetContext64() const { + return m_thread_context_64; } - [[nodiscard]] std::shared_ptr<Common::Fiber>& GetHostContext(); + std::shared_ptr<Common::Fiber>& GetHostContext(); - [[nodiscard]] ThreadState GetState() const { - return thread_state.load(std::memory_order_relaxed) & ThreadState::Mask; + ThreadState GetState() const { + return m_thread_state.load(std::memory_order_relaxed) & ThreadState::Mask; } - [[nodiscard]] ThreadState GetRawState() const { - return thread_state.load(std::memory_order_relaxed); + ThreadState GetRawState() const { + return m_thread_state.load(std::memory_order_relaxed); } void SetState(ThreadState state); - [[nodiscard]] StepState GetStepState() const { - return step_state; + StepState GetStepState() const { + return m_step_state; } void SetStepState(StepState state) { - step_state = state; + m_step_state = state; } - [[nodiscard]] s64 GetLastScheduledTick() const { - return last_scheduled_tick; + s64 GetLastScheduledTick() const { + return m_last_scheduled_tick; } void SetLastScheduledTick(s64 tick) { - last_scheduled_tick = tick; + m_last_scheduled_tick = tick; } - void AddCpuTime([[maybe_unused]] s32 core_id_, s64 amount) { - cpu_time += amount; + void AddCpuTime(s32 core_id, s64 amount) { + m_cpu_time += amount; // TODO(bunnei): Debug kernels track per-core tick counts. Should we? } - [[nodiscard]] s64 GetCpuTime() const { - return cpu_time; + s64 GetCpuTime() const { + return m_cpu_time; } - [[nodiscard]] s32 GetActiveCore() const { - return core_id; + s32 GetActiveCore() const { + return m_core_id; } void SetActiveCore(s32 core) { - core_id = core; + m_core_id = core; } - [[nodiscard]] s32 GetCurrentCore() const { - return current_core_id; + s32 GetCurrentCore() const { + return m_current_core_id; } void SetCurrentCore(s32 core) { - current_core_id = core; + m_current_core_id = core; } - [[nodiscard]] KProcess* GetOwnerProcess() { - return parent; + KProcess* GetOwnerProcess() { + return m_parent; } - [[nodiscard]] const KProcess* GetOwnerProcess() const { - return parent; + const KProcess* GetOwnerProcess() const { + return m_parent; } - [[nodiscard]] bool IsUserThread() const { - return parent != nullptr; + bool IsUserThread() const { + return m_parent != nullptr; } u16 GetUserDisableCount() const; @@ -343,69 +339,69 @@ public: KThread* GetLockOwner() const; - [[nodiscard]] const KAffinityMask& GetAffinityMask() const { - return physical_affinity_mask; + const KAffinityMask& GetAffinityMask() const { + return m_physical_affinity_mask; } - [[nodiscard]] Result GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask); + Result GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask); - [[nodiscard]] Result GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask); + Result GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask); - [[nodiscard]] Result SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask); + Result SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask); - [[nodiscard]] Result SetActivity(Svc::ThreadActivity activity); + Result SetActivity(Svc::ThreadActivity activity); - [[nodiscard]] Result Sleep(s64 timeout); + Result Sleep(s64 timeout); - [[nodiscard]] s64 GetYieldScheduleCount() const { - return schedule_count; + s64 GetYieldScheduleCount() const { + return m_schedule_count; } void SetYieldScheduleCount(s64 count) { - schedule_count = count; + m_schedule_count = count; } void WaitCancel(); - [[nodiscard]] bool IsWaitCancelled() const { - return wait_cancelled; + bool IsWaitCancelled() const { + return m_wait_cancelled; } void ClearWaitCancelled() { - wait_cancelled = false; + m_wait_cancelled = false; } - [[nodiscard]] bool IsCancellable() const { - return cancellable; + bool IsCancellable() const { + return m_cancellable; } void SetCancellable() { - cancellable = true; + m_cancellable = true; } void ClearCancellable() { - cancellable = false; + m_cancellable = false; } - [[nodiscard]] bool IsTerminationRequested() const { - return termination_requested || GetRawState() == ThreadState::Terminated; + bool IsTerminationRequested() const { + return m_termination_requested || GetRawState() == ThreadState::Terminated; } - [[nodiscard]] u64 GetId() const override { - return this->GetThreadID(); + u64 GetId() const override { + return this->GetThreadId(); } - [[nodiscard]] bool IsInitialized() const override { - return initialized; + bool IsInitialized() const override { + return m_initialized; } - [[nodiscard]] uintptr_t GetPostDestroyArgument() const override { - return reinterpret_cast<uintptr_t>(parent) | (resource_limit_release_hint ? 1 : 0); + uintptr_t GetPostDestroyArgument() const override { + return reinterpret_cast<uintptr_t>(m_parent) | (m_resource_limit_release_hint ? 1 : 0); } void Finalize() override; - [[nodiscard]] bool IsSignaled() const override; + bool IsSignaled() const override; void OnTimer(); @@ -413,26 +409,22 @@ public: static void PostDestroy(uintptr_t arg); - [[nodiscard]] static Result InitializeDummyThread(KThread* thread, KProcess* owner); + static Result InitializeDummyThread(KThread* thread, KProcess* owner); - [[nodiscard]] static Result InitializeMainThread(Core::System& system, KThread* thread, - s32 virt_core); + static Result InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core); - [[nodiscard]] static Result InitializeIdleThread(Core::System& system, KThread* thread, - s32 virt_core); + static Result InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core); - [[nodiscard]] static Result InitializeHighPriorityThread(Core::System& system, KThread* thread, - KThreadFunction func, uintptr_t arg, - s32 virt_core); + static Result InitializeHighPriorityThread(Core::System& system, KThread* thread, + KThreadFunction func, uintptr_t arg, s32 virt_core); - [[nodiscard]] static Result InitializeUserThread(Core::System& system, KThread* thread, - KThreadFunction func, uintptr_t arg, - VAddr user_stack_top, s32 prio, s32 virt_core, - KProcess* owner); + static Result InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func, + uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core, + KProcess* owner); - [[nodiscard]] static Result InitializeServiceThread(Core::System& system, KThread* thread, - std::function<void()>&& thread_func, - s32 prio, s32 virt_core, KProcess* owner); + static Result InitializeServiceThread(Core::System& system, KThread* thread, + std::function<void()>&& thread_func, s32 prio, + s32 virt_core, KProcess* owner); public: struct StackParameters { @@ -446,12 +438,12 @@ public: KThread* cur_thread; }; - [[nodiscard]] StackParameters& GetStackParameters() { - return stack_parameters; + StackParameters& GetStackParameters() { + return m_stack_parameters; } - [[nodiscard]] const StackParameters& GetStackParameters() const { - return stack_parameters; + const StackParameters& GetStackParameters() const { + return m_stack_parameters; } class QueueEntry { @@ -459,37 +451,37 @@ public: constexpr QueueEntry() = default; constexpr void Initialize() { - prev = nullptr; - next = nullptr; + m_prev = nullptr; + m_next = nullptr; } constexpr KThread* GetPrev() const { - return prev; + return m_prev; } constexpr KThread* GetNext() const { - return next; + return m_next; } constexpr void SetPrev(KThread* thread) { - prev = thread; + m_prev = thread; } constexpr void SetNext(KThread* thread) { - next = thread; + m_next = thread; } private: - KThread* prev{}; - KThread* next{}; + KThread* m_prev{}; + KThread* m_next{}; }; - [[nodiscard]] QueueEntry& GetPriorityQueueEntry(s32 core) { - return per_core_priority_queue_entry[core]; + QueueEntry& GetPriorityQueueEntry(s32 core) { + return m_per_core_priority_queue_entry[core]; } - [[nodiscard]] const QueueEntry& GetPriorityQueueEntry(s32 core) const { - return per_core_priority_queue_entry[core]; + const QueueEntry& GetPriorityQueueEntry(s32 core) const { + return m_per_core_priority_queue_entry[core]; } - [[nodiscard]] s32 GetDisableDispatchCount() const { + s32 GetDisableDispatchCount() const { return this->GetStackParameters().disable_count; } @@ -515,7 +507,7 @@ public: this->GetStackParameters().is_in_exception_handler = false; } - [[nodiscard]] bool IsInExceptionHandler() const { + bool IsInExceptionHandler() const { return this->GetStackParameters().is_in_exception_handler; } @@ -527,11 +519,11 @@ public: this->GetStackParameters().is_calling_svc = false; } - [[nodiscard]] bool IsCallingSvc() const { + bool IsCallingSvc() const { return this->GetStackParameters().is_calling_svc; } - [[nodiscard]] u8 GetSvcId() const { + u8 GetSvcId() const { return this->GetStackParameters().current_svc_id; } @@ -543,78 +535,54 @@ public: this->GetStackParameters().dpc_flags &= ~static_cast<u8>(flag); } - [[nodiscard]] u8 GetDpc() const { + u8 GetDpc() const { return this->GetStackParameters().dpc_flags; } - [[nodiscard]] bool HasDpc() const { + bool HasDpc() const { return this->GetDpc() != 0; } void SetWaitReasonForDebugging(ThreadWaitReasonForDebugging reason) { - wait_reason_for_debugging = reason; - } - - [[nodiscard]] ThreadWaitReasonForDebugging GetWaitReasonForDebugging() const { - return wait_reason_for_debugging; - } - - [[nodiscard]] ThreadType GetThreadType() const { - return thread_type; - } - - [[nodiscard]] bool IsDummyThread() const { - return GetThreadType() == ThreadType::Dummy; - } - - void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) { - wait_objects_for_debugging.clear(); - wait_objects_for_debugging.reserve(objects.size()); - for (const auto& object : objects) { - wait_objects_for_debugging.emplace_back(object); - } + m_wait_reason_for_debugging = reason; } - [[nodiscard]] const std::vector<KSynchronizationObject*>& GetWaitObjectsForDebugging() const { - return wait_objects_for_debugging; + ThreadWaitReasonForDebugging GetWaitReasonForDebugging() const { + return m_wait_reason_for_debugging; } - void SetMutexWaitAddressForDebugging(VAddr address) { - mutex_wait_address_for_debugging = address; + ThreadType GetThreadType() const { + return m_thread_type; } - [[nodiscard]] VAddr GetMutexWaitAddressForDebugging() const { - return mutex_wait_address_for_debugging; - } - - [[nodiscard]] s32 GetIdealCoreForDebugging() const { - return virtual_ideal_core_id; + bool IsDummyThread() const { + return this->GetThreadType() == ThreadType::Dummy; } void AddWaiter(KThread* thread); void RemoveWaiter(KThread* thread); - [[nodiscard]] Result GetThreadContext3(std::vector<u8>& out); + Result GetThreadContext3(std::vector<u8>& out); - [[nodiscard]] KThread* RemoveUserWaiterByKey(bool* out_has_waiters, VAddr key) { + KThread* RemoveUserWaiterByKey(bool* out_has_waiters, VAddr key) { return this->RemoveWaiterByKey(out_has_waiters, key, false); } - [[nodiscard]] KThread* RemoveKernelWaiterByKey(bool* out_has_waiters, VAddr key) { + KThread* RemoveKernelWaiterByKey(bool* out_has_waiters, VAddr key) { return this->RemoveWaiterByKey(out_has_waiters, key, true); } - [[nodiscard]] VAddr GetAddressKey() const { - return address_key; + VAddr GetAddressKey() const { + return m_address_key; } - [[nodiscard]] u32 GetAddressKeyValue() const { - return address_key_value; + u32 GetAddressKeyValue() const { + return m_address_key_value; } - [[nodiscard]] bool GetIsKernelAddressKey() const { - return is_kernel_address_key; + bool GetIsKernelAddressKey() const { + return m_is_kernel_address_key; } //! NB: intentional deviation from official kernel. @@ -624,37 +592,37 @@ public: // into things. void SetUserAddressKey(VAddr key, u32 val) { - ASSERT(waiting_lock_info == nullptr); - address_key = key; - address_key_value = val; - is_kernel_address_key = false; + ASSERT(m_waiting_lock_info == nullptr); + m_address_key = key; + m_address_key_value = val; + m_is_kernel_address_key = false; } void SetKernelAddressKey(VAddr key) { - ASSERT(waiting_lock_info == nullptr); - address_key = key; - is_kernel_address_key = true; + ASSERT(m_waiting_lock_info == nullptr); + m_address_key = key; + m_is_kernel_address_key = true; } void ClearWaitQueue() { - wait_queue = nullptr; + m_wait_queue = nullptr; } void BeginWait(KThreadQueue* queue); - void NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_); - void EndWait(Result wait_result_); - void CancelWait(Result wait_result_, bool cancel_timer_task); + void NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result); + void EndWait(Result wait_result); + void CancelWait(Result wait_result, bool cancel_timer_task); - [[nodiscard]] s32 GetNumKernelWaiters() const { - return num_kernel_waiters; + s32 GetNumKernelWaiters() const { + return m_num_kernel_waiters; } - [[nodiscard]] u64 GetConditionVariableKey() const { - return condvar_key; + u64 GetConditionVariableKey() const { + return m_condvar_key; } - [[nodiscard]] u64 GetAddressArbiterKey() const { - return condvar_key; + u64 GetAddressArbiterKey() const { + return m_condvar_key; } // Dummy threads (used for HLE host threads) cannot wait based on the guest scheduler, and @@ -665,17 +633,16 @@ public: void DummyThreadBeginWait(); void DummyThreadEndWait(); - [[nodiscard]] uintptr_t GetArgument() const { - return argument; + uintptr_t GetArgument() const { + return m_argument; } - [[nodiscard]] VAddr GetUserStackTop() const { - return stack_top; + VAddr GetUserStackTop() const { + return m_stack_top; } private: - [[nodiscard]] KThread* RemoveWaiterByKey(bool* out_has_waiters, VAddr key, - bool is_kernel_address_key); + KThread* RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_kernel_address_key); static constexpr size_t PriorityInheritanceCountMax = 10; union SyncObjectBuffer { @@ -692,11 +659,11 @@ private: u64 cv_key{}; s32 priority{}; - [[nodiscard]] constexpr u64 GetConditionVariableKey() const { + constexpr u64 GetConditionVariableKey() const { return cv_key; } - [[nodiscard]] constexpr s32 GetPriority() const { + constexpr s32 GetPriority() const { return priority; } }; @@ -728,22 +695,21 @@ private: void IncreaseBasePriority(s32 priority); - [[nodiscard]] Result Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, - s32 prio, s32 virt_core, KProcess* owner, ThreadType type); + Result Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, + s32 virt_core, KProcess* owner, ThreadType type); - [[nodiscard]] static Result InitializeThread(KThread* thread, KThreadFunction func, - uintptr_t arg, VAddr user_stack_top, s32 prio, - s32 core, KProcess* owner, ThreadType type, - std::function<void()>&& init_func); + static Result InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, + VAddr user_stack_top, s32 prio, s32 core, KProcess* owner, + ThreadType type, std::function<void()>&& init_func); // For core KThread implementation - ThreadContext32 thread_context_32{}; - ThreadContext64 thread_context_64{}; - Common::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{}; - s32 priority{}; + ThreadContext32 m_thread_context_32{}; + ThreadContext64 m_thread_context_64{}; + Common::IntrusiveRedBlackTreeNode m_condvar_arbiter_tree_node{}; + s32 m_priority{}; using ConditionVariableThreadTreeTraits = Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert< - &KThread::condvar_arbiter_tree_node>; + &KThread::m_condvar_arbiter_tree_node>; using ConditionVariableThreadTree = ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>; @@ -773,7 +739,7 @@ private: using LockWithPriorityInheritanceThreadTreeTraits = Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert< - &KThread::condvar_arbiter_tree_node>; + &KThread::m_condvar_arbiter_tree_node>; using LockWithPriorityInheritanceThreadTree = ConditionVariableThreadTreeTraits::TreeType<LockWithPriorityInheritanceComparator>; @@ -809,7 +775,7 @@ public: waiter->SetWaitingLockInfo(this); } - [[nodiscard]] bool RemoveWaiter(KThread* waiter) { + bool RemoveWaiter(KThread* waiter) { m_tree.erase(m_tree.iterator_to(*waiter)); waiter->SetWaitingLockInfo(nullptr); @@ -853,11 +819,11 @@ public: }; void SetWaitingLockInfo(LockWithPriorityInheritanceInfo* lock) { - waiting_lock_info = lock; + m_waiting_lock_info = lock; } LockWithPriorityInheritanceInfo* GetWaitingLockInfo() { - return waiting_lock_info; + return m_waiting_lock_info; } void AddHeldLock(LockWithPriorityInheritanceInfo* lock_info); @@ -867,110 +833,108 @@ private: using LockWithPriorityInheritanceInfoList = boost::intrusive::list<LockWithPriorityInheritanceInfo>; - ConditionVariableThreadTree* condvar_tree{}; - u64 condvar_key{}; - u64 virtual_affinity_mask{}; - KAffinityMask physical_affinity_mask{}; - u64 thread_id{}; - std::atomic<s64> cpu_time{}; - VAddr address_key{}; - KProcess* parent{}; - VAddr kernel_stack_top{}; - u32* light_ipc_data{}; - VAddr tls_address{}; - KLightLock activity_pause_lock; - s64 schedule_count{}; - s64 last_scheduled_tick{}; - std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{}; - KThreadQueue* wait_queue{}; - LockWithPriorityInheritanceInfoList held_lock_info_list{}; - LockWithPriorityInheritanceInfo* waiting_lock_info{}; - WaiterList pinned_waiter_list{}; - u32 address_key_value{}; - u32 suspend_request_flags{}; - u32 suspend_allowed_flags{}; - s32 synced_index{}; - Result wait_result{ResultSuccess}; - s32 base_priority{}; - s32 physical_ideal_core_id{}; - s32 virtual_ideal_core_id{}; - s32 num_kernel_waiters{}; - s32 current_core_id{}; - s32 core_id{}; - KAffinityMask original_physical_affinity_mask{}; - s32 original_physical_ideal_core_id{}; - s32 num_core_migration_disables{}; - std::atomic<ThreadState> thread_state{}; - std::atomic<bool> termination_requested{}; - bool wait_cancelled{}; - bool cancellable{}; - bool signaled{}; - bool initialized{}; - bool debug_attached{}; - s8 priority_inheritance_count{}; - bool resource_limit_release_hint{}; - bool is_kernel_address_key{}; - StackParameters stack_parameters{}; - Common::SpinLock context_guard{}; + ConditionVariableThreadTree* m_condvar_tree{}; + u64 m_condvar_key{}; + u64 m_virtual_affinity_mask{}; + KAffinityMask m_physical_affinity_mask{}; + u64 m_thread_id{}; + std::atomic<s64> m_cpu_time{}; + VAddr m_address_key{}; + KProcess* m_parent{}; + VAddr m_kernel_stack_top{}; + u32* m_light_ipc_data{}; + VAddr m_tls_address{}; + KLightLock m_activity_pause_lock; + s64 m_schedule_count{}; + s64 m_last_scheduled_tick{}; + std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> m_per_core_priority_queue_entry{}; + KThreadQueue* m_wait_queue{}; + LockWithPriorityInheritanceInfoList m_held_lock_info_list{}; + LockWithPriorityInheritanceInfo* m_waiting_lock_info{}; + WaiterList m_pinned_waiter_list{}; + u32 m_address_key_value{}; + u32 m_suspend_request_flags{}; + u32 m_suspend_allowed_flags{}; + s32 m_synced_index{}; + Result m_wait_result{ResultSuccess}; + s32 m_base_priority{}; + s32 m_physical_ideal_core_id{}; + s32 m_virtual_ideal_core_id{}; + s32 m_num_kernel_waiters{}; + s32 m_current_core_id{}; + s32 m_core_id{}; + KAffinityMask m_original_physical_affinity_mask{}; + s32 m_original_physical_ideal_core_id{}; + s32 m_num_core_migration_disables{}; + std::atomic<ThreadState> m_thread_state{}; + std::atomic<bool> m_termination_requested{}; + bool m_wait_cancelled{}; + bool m_cancellable{}; + bool m_signaled{}; + bool m_initialized{}; + bool m_debug_attached{}; + s8 m_priority_inheritance_count{}; + bool m_resource_limit_release_hint{}; + bool m_is_kernel_address_key{}; + StackParameters m_stack_parameters{}; + Common::SpinLock m_context_guard{}; // For emulation - std::shared_ptr<Common::Fiber> host_context{}; - bool is_single_core{}; - ThreadType thread_type{}; - StepState step_state{}; - std::atomic<bool> dummy_thread_runnable{true}; + std::shared_ptr<Common::Fiber> m_host_context{}; + ThreadType m_thread_type{}; + StepState m_step_state{}; + std::atomic<bool> m_dummy_thread_runnable{true}; // For debugging - std::vector<KSynchronizationObject*> wait_objects_for_debugging; - VAddr mutex_wait_address_for_debugging{}; - ThreadWaitReasonForDebugging wait_reason_for_debugging{}; - uintptr_t argument{}; - VAddr stack_top{}; - std::string name{}; + std::vector<KSynchronizationObject*> m_wait_objects_for_debugging{}; + VAddr m_mutex_wait_address_for_debugging{}; + ThreadWaitReasonForDebugging m_wait_reason_for_debugging{}; + uintptr_t m_argument{}; + VAddr m_stack_top{}; public: using ConditionVariableThreadTreeType = ConditionVariableThreadTree; void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, u64 cv_key, u32 value) { - ASSERT(waiting_lock_info == nullptr); - condvar_tree = tree; - condvar_key = cv_key; - address_key = address; - address_key_value = value; - is_kernel_address_key = false; + ASSERT(m_waiting_lock_info == nullptr); + m_condvar_tree = tree; + m_condvar_key = cv_key; + m_address_key = address; + m_address_key_value = value; + m_is_kernel_address_key = false; } void ClearConditionVariable() { - condvar_tree = nullptr; + m_condvar_tree = nullptr; } - [[nodiscard]] bool IsWaitingForConditionVariable() const { - return condvar_tree != nullptr; + bool IsWaitingForConditionVariable() const { + return m_condvar_tree != nullptr; } void SetAddressArbiter(ConditionVariableThreadTree* tree, u64 address) { - ASSERT(waiting_lock_info == nullptr); - condvar_tree = tree; - condvar_key = address; + ASSERT(m_waiting_lock_info == nullptr); + m_condvar_tree = tree; + m_condvar_key = address; } void ClearAddressArbiter() { - condvar_tree = nullptr; + m_condvar_tree = nullptr; } - [[nodiscard]] bool IsWaitingForAddressArbiter() const { - return condvar_tree != nullptr; + bool IsWaitingForAddressArbiter() const { + return m_condvar_tree != nullptr; } - [[nodiscard]] ConditionVariableThreadTree* GetConditionVariableTree() const { - return condvar_tree; + ConditionVariableThreadTree* GetConditionVariableTree() const { + return m_condvar_tree; } }; class KScopedDisableDispatch { public: - [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel) : m_kernel{kernel} { + explicit KScopedDisableDispatch(KernelCore& kernel) : m_kernel{kernel} { // If we are shutting down the kernel, none of this is relevant anymore. if (m_kernel.IsShuttingDown()) { return; diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index c236e9976..f35fa95b5 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -214,7 +214,6 @@ struct KernelCore::Impl { cores[i] = std::make_unique<Kernel::PhysicalCore>(i, system, *schedulers[i]); auto* main_thread{Kernel::KThread::Create(system.Kernel())}; - main_thread->SetName(fmt::format("MainThread:{}", core)); main_thread->SetCurrentCore(core); ASSERT(Kernel::KThread::InitializeMainThread(system, main_thread, core).IsSuccess()); @@ -356,7 +355,6 @@ struct KernelCore::Impl { ASSERT(KThread::InitializeHighPriorityThread(system, shutdown_threads[core_id], {}, {}, core_id) .IsSuccess()); - shutdown_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id)); } } @@ -390,7 +388,6 @@ struct KernelCore::Impl { KThread* GetHostDummyThread(KThread* existing_thread) { auto initialize = [this](KThread* thread) { ASSERT(KThread::InitializeDummyThread(thread, nullptr).IsSuccess()); - thread->SetName(fmt::format("DummyThread:{}", next_host_thread_id++)); return thread; }; diff --git a/src/core/hle/kernel/svc/svc_thread.cpp b/src/core/hle/kernel/svc/svc_thread.cpp index a16fc7ae3..50991fb62 100644 --- a/src/core/hle/kernel/svc/svc_thread.cpp +++ b/src/core/hle/kernel/svc/svc_thread.cpp @@ -59,9 +59,6 @@ Result CreateThread(Core::System& system, Handle* out_handle, VAddr entry_point, priority, core_id, std::addressof(process))); } - // Set the thread name for debugging purposes. - thread->SetName(fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *out_handle)); - // Commit the thread reservation. thread_reservation.Commit(); @@ -252,7 +249,7 @@ Result GetThreadList(Core::System& system, s32* out_num_threads, VAddr out_threa auto list_iter = thread_list.cbegin(); for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) { - memory.Write64(out_thread_ids, (*list_iter)->GetThreadID()); + memory.Write64(out_thread_ids, (*list_iter)->GetThreadId()); out_thread_ids += sizeof(u64); } diff --git a/src/core/hle/service/hle_ipc.cpp b/src/core/hle/service/hle_ipc.cpp index c221ffe11..cca697c64 100644 --- a/src/core/hle/service/hle_ipc.cpp +++ b/src/core/hle/service/hle_ipc.cpp @@ -303,7 +303,7 @@ Result HLERequestContext::WriteToOutgoingCommandBuffer(Kernel::KThread& requesti } // Copy the translated command buffer back into the thread's command buffer area. - memory.WriteBlock(owner_process, requesting_thread.GetTLSAddress(), cmd_buf.data(), + memory.WriteBlock(owner_process, requesting_thread.GetTlsAddress(), cmd_buf.data(), write_size * sizeof(u32)); return ResultSuccess; |