diff options
Diffstat (limited to 'src/core/hle/kernel/k_scheduler.cpp')
-rw-r--r-- | src/core/hle/kernel/k_scheduler.cpp | 255 |
1 files changed, 143 insertions, 112 deletions
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 12b5619fb..bb5f43b53 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -17,25 +17,30 @@ #include "core/cpu_manager.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" +#include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/physical_core.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/thread.h" #include "core/hle/kernel/time_manager.h" namespace Kernel { -static void IncrementScheduledCount(Kernel::Thread* thread) { +static void IncrementScheduledCount(Kernel::KThread* thread) { if (auto process = thread->GetOwnerProcess(); process) { process->IncrementScheduledCount(); } } -void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule, - Core::EmuThreadHandle global_thread) { - const u32 current_core = global_thread.host_handle; - bool must_context_switch = global_thread.guest_handle != InvalidHandle && - (current_core < Core::Hardware::NUM_CPU_CORES); +void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule) { + auto scheduler = kernel.CurrentScheduler(); + + u32 current_core{0xF}; + bool must_context_switch{}; + if (scheduler) { + current_core = scheduler->core_id; + // TODO(bunnei): Should be set to true when we deprecate single core + must_context_switch = !kernel.IsPhantomModeForSingleCore(); + } while (cores_pending_reschedule != 0) { const auto core = static_cast<u32>(std::countr_zero(cores_pending_reschedule)); @@ -56,28 +61,27 @@ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedul } } -u64 KScheduler::UpdateHighestPriorityThread(Thread* highest_thread) { +u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) { std::scoped_lock lock{guard}; - if (Thread* prev_highest_thread = this->state.highest_priority_thread; + if (KThread* prev_highest_thread = state.highest_priority_thread; prev_highest_thread != highest_thread) { if (prev_highest_thread != nullptr) { IncrementScheduledCount(prev_highest_thread); prev_highest_thread->SetLastScheduledTick(system.CoreTiming().GetCPUTicks()); } - if (this->state.should_count_idle) { + if (state.should_count_idle) { if (highest_thread != nullptr) { - // if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) { - // process->SetRunningThread(this->core_id, highest_thread, - // this->state.idle_count); - //} + if (Process* process = highest_thread->GetOwnerProcess(); process != nullptr) { + process->SetRunningThread(core_id, highest_thread, state.idle_count); + } } else { - this->state.idle_count++; + state.idle_count++; } } - this->state.highest_priority_thread = highest_thread; - this->state.needs_scheduling = true; - return (1ULL << this->core_id); + state.highest_priority_thread = highest_thread; + state.needs_scheduling.store(true); + return (1ULL << core_id); } else { return 0; } @@ -90,16 +94,29 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { ClearSchedulerUpdateNeeded(kernel); u64 cores_needing_scheduling = 0, idle_cores = 0; - Thread* top_threads[Core::Hardware::NUM_CPU_CORES]; + KThread* top_threads[Core::Hardware::NUM_CPU_CORES]; auto& priority_queue = GetPriorityQueue(kernel); /// We want to go over all cores, finding the highest priority thread and determining if /// scheduling is needed for that core. for (size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { - Thread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id)); + KThread* top_thread = priority_queue.GetScheduledFront(static_cast<s32>(core_id)); if (top_thread != nullptr) { // If the thread has no waiters, we need to check if the process has a thread pinned. - // TODO(bunnei): Implement thread pinning + if (top_thread->GetNumKernelWaiters() == 0) { + if (Process* parent = top_thread->GetOwnerProcess(); parent != nullptr) { + if (KThread* pinned = parent->GetPinnedThread(static_cast<s32>(core_id)); + pinned != nullptr && pinned != top_thread) { + // We prefer our parent's pinned thread if possible. However, we also don't + // want to schedule un-runnable threads. + if (pinned->GetRawState() == ThreadState::Runnable) { + top_thread = pinned; + } else { + top_thread = nullptr; + } + } + } + } } else { idle_cores |= (1ULL << core_id); } @@ -112,7 +129,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { // Idle cores are bad. We're going to try to migrate threads to each idle core in turn. while (idle_cores != 0) { const auto core_id = static_cast<u32>(std::countr_zero(idle_cores)); - if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) { + if (KThread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) { s32 migration_candidates[Core::Hardware::NUM_CPU_CORES]; size_t num_candidates = 0; @@ -120,7 +137,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { while (suggested != nullptr) { // Check if the suggested thread is the top thread on its core. const s32 suggested_core = suggested->GetActiveCore(); - if (Thread* top_thread = + if (KThread* top_thread = (suggested_core >= 0) ? top_threads[suggested_core] : nullptr; top_thread != suggested) { // Make sure we're not dealing with threads too high priority for migration. @@ -152,7 +169,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { // Check if there's some other thread that can run on the candidate core. const s32 candidate_core = migration_candidates[i]; suggested = top_threads[candidate_core]; - if (Thread* next_on_candidate_core = + if (KThread* next_on_candidate_core = priority_queue.GetScheduledNext(candidate_core, suggested); next_on_candidate_core != nullptr) { // The candidate core can run some other thread! We'll migrate its current @@ -182,7 +199,20 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) { return cores_needing_scheduling; } -void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, ThreadState old_state) { +void KScheduler::ClearPreviousThread(KernelCore& kernel, KThread* thread) { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + for (size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; ++i) { + // Get an atomic reference to the core scheduler's previous thread. + std::atomic_ref<KThread*> prev_thread(kernel.Scheduler(static_cast<s32>(i)).prev_thread); + static_assert(std::atomic_ref<KThread*>::is_always_lock_free); + + // Atomically clear the previous thread if it's our target. + KThread* compare = thread; + prev_thread.compare_exchange_strong(compare, nullptr); + } +} + +void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, ThreadState old_state) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); // Check if the state has changed, because if it hasn't there's nothing to do. @@ -205,7 +235,7 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, Thread } } -void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, s32 old_priority) { +void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s32 old_priority) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); // If the thread is runnable, we want to change its priority in the queue. @@ -217,7 +247,7 @@ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, s32 } } -void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, +void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, KThread* thread, const KAffinityMask& old_affinity, s32 old_core) { ASSERT(kernel.GlobalSchedulerContext().IsLocked()); @@ -237,8 +267,8 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { auto& priority_queue = GetPriorityQueue(kernel); // Rotate the front of the queue to the end. - Thread* top_thread = priority_queue.GetScheduledFront(core_id, priority); - Thread* next_thread = nullptr; + KThread* top_thread = priority_queue.GetScheduledFront(core_id, priority); + KThread* next_thread = nullptr; if (top_thread != nullptr) { next_thread = priority_queue.MoveToScheduledBack(top_thread); if (next_thread != top_thread) { @@ -249,11 +279,11 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { // While we have a suggested thread, try to migrate it! { - Thread* suggested = priority_queue.GetSuggestedFront(core_id, priority); + KThread* suggested = priority_queue.GetSuggestedFront(core_id, priority); while (suggested != nullptr) { // Check if the suggested thread is the top thread on its core. const s32 suggested_core = suggested->GetActiveCore(); - if (Thread* top_on_suggested_core = + if (KThread* top_on_suggested_core = (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) : nullptr; top_on_suggested_core != suggested) { @@ -285,7 +315,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { // Now that we might have migrated a thread with the same priority, check if we can do better. { - Thread* best_thread = priority_queue.GetScheduledFront(core_id); + KThread* best_thread = priority_queue.GetScheduledFront(core_id); if (best_thread == GetCurrentThread()) { best_thread = priority_queue.GetScheduledNext(core_id, best_thread); } @@ -293,7 +323,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { // If the best thread we can choose has a priority the same or worse than ours, try to // migrate a higher priority thread. if (best_thread != nullptr && best_thread->GetPriority() >= priority) { - Thread* suggested = priority_queue.GetSuggestedFront(core_id); + KThread* suggested = priority_queue.GetSuggestedFront(core_id); while (suggested != nullptr) { // If the suggestion's priority is the same as ours, don't bother. if (suggested->GetPriority() >= best_thread->GetPriority()) { @@ -302,7 +332,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) { // Check if the suggested thread is the top thread on its core. const s32 suggested_core = suggested->GetActiveCore(); - if (Thread* top_on_suggested_core = + if (KThread* top_on_suggested_core = (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) : nullptr; top_on_suggested_core != suggested) { @@ -352,12 +382,14 @@ void KScheduler::DisableScheduling(KernelCore& kernel) { } } -void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling, - Core::EmuThreadHandle global_thread) { +void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { - scheduler->GetCurrentThread()->EnableDispatch(); + ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1); + if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) { + scheduler->GetCurrentThread()->EnableDispatch(); + } } - RescheduleCores(kernel, cores_needing_scheduling, global_thread); + RescheduleCores(kernel, cores_needing_scheduling); } u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { @@ -372,15 +404,13 @@ KSchedulerPriorityQueue& KScheduler::GetPriorityQueue(KernelCore& kernel) { return kernel.GlobalSchedulerContext().priority_queue; } -void KScheduler::YieldWithoutCoreMigration() { - auto& kernel = system.Kernel(); - +void KScheduler::YieldWithoutCoreMigration(KernelCore& kernel) { // Validate preconditions. ASSERT(CanSchedule(kernel)); ASSERT(kernel.CurrentProcess() != nullptr); // Get the current thread and process. - Thread& cur_thread = *GetCurrentThread(); + KThread& cur_thread = Kernel::GetCurrentThread(kernel); Process& cur_process = *kernel.CurrentProcess(); // If the thread's yield count matches, there's nothing for us to do. @@ -398,7 +428,7 @@ void KScheduler::YieldWithoutCoreMigration() { const auto cur_state = cur_thread.GetRawState(); if (cur_state == ThreadState::Runnable) { // Put the current thread at the back of the queue. - Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); + KThread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); IncrementScheduledCount(std::addressof(cur_thread)); // If the next thread is different, we have an update to perform. @@ -413,15 +443,13 @@ void KScheduler::YieldWithoutCoreMigration() { } } -void KScheduler::YieldWithCoreMigration() { - auto& kernel = system.Kernel(); - +void KScheduler::YieldWithCoreMigration(KernelCore& kernel) { // Validate preconditions. ASSERT(CanSchedule(kernel)); ASSERT(kernel.CurrentProcess() != nullptr); // Get the current thread and process. - Thread& cur_thread = *GetCurrentThread(); + KThread& cur_thread = Kernel::GetCurrentThread(kernel); Process& cur_process = *kernel.CurrentProcess(); // If the thread's yield count matches, there's nothing for us to do. @@ -442,17 +470,17 @@ void KScheduler::YieldWithCoreMigration() { const s32 core_id = cur_thread.GetActiveCore(); // Put the current thread at the back of the queue. - Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); + KThread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); IncrementScheduledCount(std::addressof(cur_thread)); // While we have a suggested thread, try to migrate it! bool recheck = false; - Thread* suggested = priority_queue.GetSuggestedFront(core_id); + KThread* suggested = priority_queue.GetSuggestedFront(core_id); while (suggested != nullptr) { // Check if the suggested thread is the thread running on its core. const s32 suggested_core = suggested->GetActiveCore(); - if (Thread* running_on_suggested_core = + if (KThread* running_on_suggested_core = (suggested_core >= 0) ? kernel.Scheduler(suggested_core).state.highest_priority_thread : nullptr; @@ -503,15 +531,13 @@ void KScheduler::YieldWithCoreMigration() { } } -void KScheduler::YieldToAnyThread() { - auto& kernel = system.Kernel(); - +void KScheduler::YieldToAnyThread(KernelCore& kernel) { // Validate preconditions. ASSERT(CanSchedule(kernel)); ASSERT(kernel.CurrentProcess() != nullptr); // Get the current thread and process. - Thread& cur_thread = *GetCurrentThread(); + KThread& cur_thread = Kernel::GetCurrentThread(kernel); Process& cur_process = *kernel.CurrentProcess(); // If the thread's yield count matches, there's nothing for us to do. @@ -539,11 +565,11 @@ void KScheduler::YieldToAnyThread() { // If there's nothing scheduled, we can try to perform a migration. if (priority_queue.GetScheduledFront(core_id) == nullptr) { // While we have a suggested thread, try to migrate it! - Thread* suggested = priority_queue.GetSuggestedFront(core_id); + KThread* suggested = priority_queue.GetSuggestedFront(core_id); while (suggested != nullptr) { // Check if the suggested thread is the top thread on its core. const s32 suggested_core = suggested->GetActiveCore(); - if (Thread* top_on_suggested_core = + if (KThread* top_on_suggested_core = (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) : nullptr; top_on_suggested_core != suggested) { @@ -581,22 +607,21 @@ void KScheduler::YieldToAnyThread() { } } -KScheduler::KScheduler(Core::System& system, std::size_t core_id) - : system(system), core_id(core_id) { +KScheduler::KScheduler(Core::System& system, s32 core_id) : system(system), core_id(core_id) { switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this); - this->state.needs_scheduling = true; - this->state.interrupt_task_thread_runnable = false; - this->state.should_count_idle = false; - this->state.idle_count = 0; - this->state.idle_thread_stack = nullptr; - this->state.highest_priority_thread = nullptr; + state.needs_scheduling.store(true); + state.interrupt_task_thread_runnable = false; + state.should_count_idle = false; + state.idle_count = 0; + state.idle_thread_stack = nullptr; + state.highest_priority_thread = nullptr; } KScheduler::~KScheduler() = default; -Thread* KScheduler::GetCurrentThread() const { - if (current_thread) { - return current_thread; +KThread* KScheduler::GetCurrentThread() const { + if (auto result = current_thread.load(); result) { + return result; } return idle_thread; } @@ -613,7 +638,7 @@ void KScheduler::RescheduleCurrentCore() { phys_core.ClearInterrupt(); } guard.lock(); - if (this->state.needs_scheduling) { + if (state.needs_scheduling.load()) { Schedule(); } else { guard.unlock(); @@ -624,66 +649,76 @@ void KScheduler::OnThreadStart() { SwitchContextStep2(); } -void KScheduler::Unload(Thread* thread) { +void KScheduler::Unload(KThread* thread) { + LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); + if (thread) { - thread->SetIsRunning(false); - if (thread->IsContinuousOnSVC() && !thread->IsHLEThread()) { + if (thread->IsCallingSvc()) { system.ArmInterface(core_id).ExceptionalExit(); - thread->SetContinuousOnSVC(false); + thread->ClearIsCallingSvc(); } - if (!thread->IsHLEThread() && !thread->HasExited()) { + if (!thread->IsTerminationRequested()) { + prev_thread = thread; + Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); cpu_core.SaveContext(thread->GetContext32()); cpu_core.SaveContext(thread->GetContext64()); // Save the TPIDR_EL0 system register in case it was modified. thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); cpu_core.ClearExclusiveState(); + } else { + prev_thread = nullptr; } thread->context_guard.unlock(); } } -void KScheduler::Reload(Thread* thread) { +void KScheduler::Reload(KThread* thread) { + LOG_TRACE(Kernel, "core {}, reload thread {}", core_id, thread ? thread->GetName() : "nullptr"); + if (thread) { ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); - // Cancel any outstanding wakeup events for this thread - thread->SetIsRunning(true); - thread->SetWasRunning(false); - auto* const thread_owner_process = thread->GetOwnerProcess(); if (thread_owner_process != nullptr) { system.Kernel().MakeCurrentProcess(thread_owner_process); } - if (!thread->IsHLEThread()) { - Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); - cpu_core.LoadContext(thread->GetContext32()); - cpu_core.LoadContext(thread->GetContext64()); - cpu_core.SetTlsAddress(thread->GetTLSAddress()); - cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); - cpu_core.ClearExclusiveState(); - } + + Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); + cpu_core.LoadContext(thread->GetContext32()); + cpu_core.LoadContext(thread->GetContext64()); + cpu_core.SetTlsAddress(thread->GetTLSAddress()); + cpu_core.SetTPIDR_EL0(thread->GetTPIDR_EL0()); + cpu_core.ClearExclusiveState(); } } void KScheduler::SwitchContextStep2() { // Load context of new thread - Reload(current_thread); + Reload(current_thread.load()); RescheduleCurrentCore(); } void KScheduler::ScheduleImpl() { - Thread* previous_thread = current_thread; - current_thread = state.highest_priority_thread; + KThread* previous_thread = current_thread.load(); + KThread* next_thread = state.highest_priority_thread; - this->state.needs_scheduling = false; + state.needs_scheduling = false; + + // We never want to schedule a null thread, so use the idle thread if we don't have a next. + if (next_thread == nullptr) { + next_thread = idle_thread; + } - if (current_thread == previous_thread) { + // If we're not actually switching thread, there's nothing to do. + if (next_thread == current_thread.load()) { guard.unlock(); return; } + current_thread.store(next_thread); + Process* const previous_process = system.Kernel().CurrentProcess(); UpdateLastContextSwitchTime(previous_thread, previous_process); @@ -714,28 +749,29 @@ void KScheduler::SwitchToCurrent() { while (true) { { std::scoped_lock lock{guard}; - current_thread = state.highest_priority_thread; - this->state.needs_scheduling = false; + current_thread.store(state.highest_priority_thread); + state.needs_scheduling.store(false); } const auto is_switch_pending = [this] { std::scoped_lock lock{guard}; - return state.needs_scheduling.load(std::memory_order_relaxed); + return state.needs_scheduling.load(); }; do { - if (current_thread != nullptr && !current_thread->IsHLEThread()) { - current_thread->context_guard.lock(); - if (current_thread->GetRawState() != ThreadState::Runnable) { - current_thread->context_guard.unlock(); + auto next_thread = current_thread.load(); + if (next_thread != nullptr) { + next_thread->context_guard.lock(); + if (next_thread->GetRawState() != ThreadState::Runnable) { + next_thread->context_guard.unlock(); break; } - if (static_cast<u32>(current_thread->GetProcessorID()) != core_id) { - current_thread->context_guard.unlock(); + if (next_thread->GetActiveCore() != core_id) { + next_thread->context_guard.unlock(); break; } } std::shared_ptr<Common::Fiber>* next_context; - if (current_thread != nullptr) { - next_context = ¤t_thread->GetHostContext(); + if (next_thread != nullptr) { + next_context = &next_thread->GetHostContext(); } else { next_context = &idle_thread->GetHostContext(); } @@ -744,13 +780,13 @@ void KScheduler::SwitchToCurrent() { } } -void KScheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { +void KScheduler::UpdateLastContextSwitchTime(KThread* thread, Process* process) { const u64 prev_switch_ticks = last_context_switch_time; const u64 most_recent_switch_ticks = system.CoreTiming().GetCPUTicks(); const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks; if (thread != nullptr) { - thread->UpdateCPUTimeTicks(update_ticks); + thread->AddCpuTime(core_id, update_ticks); } if (process != nullptr) { @@ -764,15 +800,10 @@ void KScheduler::Initialize() { std::string name = "Idle Thread Id:" + std::to_string(core_id); std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc(); void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); - ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE); - auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0, - nullptr, std::move(init_func), init_func_parameter); + auto thread_res = KThread::Create(system, ThreadType::Main, name, 0, + KThread::IdleThreadPriority, 0, static_cast<u32>(core_id), 0, + nullptr, std::move(init_func), init_func_parameter); idle_thread = thread_res.Unwrap().get(); - - { - KScopedSchedulerLock lock{system.Kernel()}; - idle_thread->SetState(ThreadState::Runnable); - } } KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel) |