diff options
author | bunnei <bunneidev@gmail.com> | 2022-01-27 21:17:14 +0100 |
---|---|---|
committer | bunnei <bunneidev@gmail.com> | 2022-01-27 21:17:14 +0100 |
commit | 3a1a3dd0db4256eef6382706c84c7b908b48bccd (patch) | |
tree | c042cfa45d8c043371e7e41d16ec619be5ed5803 /src/core/hle | |
parent | Merge pull request #7783 from lioncash/abi-cexpr (diff) | |
download | yuzu-3a1a3dd0db4256eef6382706c84c7b908b48bccd.tar yuzu-3a1a3dd0db4256eef6382706c84c7b908b48bccd.tar.gz yuzu-3a1a3dd0db4256eef6382706c84c7b908b48bccd.tar.bz2 yuzu-3a1a3dd0db4256eef6382706c84c7b908b48bccd.tar.lz yuzu-3a1a3dd0db4256eef6382706c84c7b908b48bccd.tar.xz yuzu-3a1a3dd0db4256eef6382706c84c7b908b48bccd.tar.zst yuzu-3a1a3dd0db4256eef6382706c84c7b908b48bccd.zip |
Diffstat (limited to 'src/core/hle')
-rw-r--r-- | src/core/hle/kernel/k_priority_queue.h | 2 | ||||
-rw-r--r-- | src/core/hle/kernel/k_scheduler.cpp | 45 |
2 files changed, 24 insertions, 23 deletions
diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h index 0b894c8cf..bd779739d 100644 --- a/src/core/hle/kernel/k_priority_queue.h +++ b/src/core/hle/kernel/k_priority_queue.h @@ -258,7 +258,7 @@ private: private: constexpr void ClearAffinityBit(u64& affinity, s32 core) { - affinity &= ~(u64(1) << core); + affinity &= ~(UINT64_C(1) << core); } constexpr s32 GetNextCore(u64& affinity) { diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index b32d4f285..c96520828 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -710,23 +710,19 @@ void KScheduler::Unload(KThread* thread) { } void KScheduler::Reload(KThread* thread) { - LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread ? thread->GetName() : "nullptr"); + LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread->GetName()); - if (thread) { - ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); - - Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); - cpu_core.LoadContext(thread->GetContext32()); - cpu_core.LoadContext(thread->GetContext64()); - cpu_core.SetTlsAddress(thread->GetTLSAddress()); - cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); - cpu_core.ClearExclusiveState(); - } + Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); + cpu_core.LoadContext(thread->GetContext32()); + cpu_core.LoadContext(thread->GetContext64()); + cpu_core.SetTlsAddress(thread->GetTLSAddress()); + cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); + cpu_core.ClearExclusiveState(); } void KScheduler::SwitchContextStep2() { // Load context of new thread - Reload(current_thread.load()); + Reload(GetCurrentThread()); RescheduleCurrentCore(); } @@ -735,13 +731,17 @@ void KScheduler::ScheduleImpl() { KThread* previous_thread = GetCurrentThread(); KThread* next_thread = state.highest_priority_thread; - state.needs_scheduling = false; + state.needs_scheduling.store(false); // We never want to schedule a null thread, so use the idle thread if we don't have a next. if (next_thread == nullptr) { next_thread = idle_thread; } + if (next_thread->GetCurrentCore() != core_id) { + next_thread->SetCurrentCore(core_id); + } + // We never want to schedule a dummy thread, as these are only used by host threads for locking. if (next_thread->GetThreadType() == ThreadType::Dummy) { ASSERT_MSG(false, "Dummy threads should never be scheduled!"); @@ -755,14 +755,8 @@ void KScheduler::ScheduleImpl() { return; } - if (next_thread->GetCurrentCore() != core_id) { - next_thread->SetCurrentCore(core_id); - } - - current_thread.store(next_thread); - + // Update the CPU time tracking variables. KProcess* const previous_process = system.Kernel().CurrentProcess(); - UpdateLastContextSwitchTime(previous_thread, previous_process); // Save context for previous thread @@ -770,6 +764,10 @@ void KScheduler::ScheduleImpl() { std::shared_ptr<Common::Fiber>* old_context; old_context = &previous_thread->GetHostContext(); + + // Set the new thread. + current_thread.store(next_thread); + guard.Unlock(); Common::Fiber::YieldTo(*old_context, *switch_fiber); @@ -797,8 +795,8 @@ void KScheduler::SwitchToCurrent() { do { auto next_thread = current_thread.load(); if (next_thread != nullptr) { - next_thread->context_guard.Lock(); - if (next_thread->GetRawState() != ThreadState::Runnable) { + const auto locked = next_thread->context_guard.TryLock(); + if (state.needs_scheduling.load()) { next_thread->context_guard.Unlock(); break; } @@ -806,6 +804,9 @@ void KScheduler::SwitchToCurrent() { next_thread->context_guard.Unlock(); break; } + if (!locked) { + continue; + } } auto thread = next_thread ? next_thread : idle_thread; Common::Fiber::YieldTo(switch_fiber, *thread->GetHostContext()); |