summaryrefslogtreecommitdiffstats
path: root/src/core/hle
diff options
context:
space:
mode:
authorFernando Sahmkow <fsahmkow27@gmail.com>2019-03-29 22:01:46 +0100
committerFernandoS27 <fsahmkow27@gmail.com>2019-10-15 17:55:06 +0200
commita1ac0c6cb47e10863b0bfbb1a6aadc71ccc513ab (patch)
treed4476f115b69c74f543f7992006f8e5548cd7f54 /src/core/hle
parentImplement a new Core Scheduler (diff)
downloadyuzu-a1ac0c6cb47e10863b0bfbb1a6aadc71ccc513ab.tar
yuzu-a1ac0c6cb47e10863b0bfbb1a6aadc71ccc513ab.tar.gz
yuzu-a1ac0c6cb47e10863b0bfbb1a6aadc71ccc513ab.tar.bz2
yuzu-a1ac0c6cb47e10863b0bfbb1a6aadc71ccc513ab.tar.lz
yuzu-a1ac0c6cb47e10863b0bfbb1a6aadc71ccc513ab.tar.xz
yuzu-a1ac0c6cb47e10863b0bfbb1a6aadc71ccc513ab.tar.zst
yuzu-a1ac0c6cb47e10863b0bfbb1a6aadc71ccc513ab.zip
Diffstat (limited to 'src/core/hle')
-rw-r--r--src/core/hle/kernel/thread.cpp242
-rw-r--r--src/core/hle/kernel/thread.h55
2 files changed, 237 insertions, 60 deletions
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index ec529e7f2..d0fa7b370 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -45,15 +45,7 @@ void Thread::Stop() {
callback_handle);
kernel.ThreadWakeupCallbackHandleTable().Close(callback_handle);
callback_handle = 0;
-
- // Clean up thread from ready queue
- // This is only needed when the thread is terminated forcefully (SVC TerminateProcess)
- if (status == ThreadStatus::Ready || status == ThreadStatus::Paused) {
- scheduler->UnscheduleThread(this, current_priority);
- }
-
- status = ThreadStatus::Dead;
-
+ SetStatus(ThreadStatus::Dead);
WakeupAllWaitingThreads();
// Clean up any dangling references in objects that this thread was waiting for
@@ -132,13 +124,11 @@ void Thread::ResumeFromWait() {
wakeup_callback = nullptr;
if (activity == ThreadActivity::Paused) {
- status = ThreadStatus::Paused;
+ SetStatus(ThreadStatus::Paused);
return;
}
- status = ThreadStatus::Ready;
-
- ChangeScheduler();
+ SetStatus(ThreadStatus::Ready);
}
void Thread::CancelWait() {
@@ -205,9 +195,9 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
thread->name = std::move(name);
thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap();
thread->owner_process = &owner_process;
+ auto& scheduler = kernel.GlobalScheduler();
+ scheduler.AddThread(thread);
thread->tls_address = thread->owner_process->CreateTLSRegion();
- thread->scheduler = &system.Scheduler(processor_id);
- thread->scheduler->AddThread(thread);
thread->owner_process->RegisterThread(thread.get());
@@ -250,6 +240,22 @@ void Thread::SetStatus(ThreadStatus new_status) {
return;
}
+ switch (new_status) {
+ case ThreadStatus::Ready:
+ case ThreadStatus::Running:
+ SetSchedulingStatus(ThreadSchedStatus::Runnable);
+ break;
+ case ThreadStatus::Dormant:
+ SetSchedulingStatus(ThreadSchedStatus::None);
+ break;
+ case ThreadStatus::Dead:
+ SetSchedulingStatus(ThreadSchedStatus::Exited);
+ break;
+ default:
+ SetSchedulingStatus(ThreadSchedStatus::Paused);
+ break;
+ }
+
if (status == ThreadStatus::Running) {
last_running_ticks = Core::System::GetInstance().CoreTiming().GetTicks();
}
@@ -311,8 +317,7 @@ void Thread::UpdatePriority() {
return;
}
- scheduler->SetThreadPriority(this, new_priority);
- current_priority = new_priority;
+ SetCurrentPriority(new_priority);
if (!lock_owner) {
return;
@@ -328,47 +333,7 @@ void Thread::UpdatePriority() {
}
void Thread::ChangeCore(u32 core, u64 mask) {
- ideal_core = core;
- affinity_mask = mask;
- ChangeScheduler();
-}
-
-void Thread::ChangeScheduler() {
- if (status != ThreadStatus::Ready) {
- return;
- }
-
- auto& system = Core::System::GetInstance();
- std::optional<s32> new_processor_id{GetNextProcessorId(affinity_mask)};
-
- if (!new_processor_id) {
- new_processor_id = processor_id;
- }
- if (ideal_core != -1 && system.Scheduler(ideal_core).GetCurrentThread() == nullptr) {
- new_processor_id = ideal_core;
- }
-
- ASSERT(*new_processor_id < 4);
-
- // Add thread to new core's scheduler
- auto& next_scheduler = system.Scheduler(*new_processor_id);
-
- if (*new_processor_id != processor_id) {
- // Remove thread from previous core's scheduler
- scheduler->RemoveThread(this);
- next_scheduler.AddThread(this);
- }
-
- processor_id = *new_processor_id;
-
- // If the thread was ready, unschedule from the previous core and schedule on the new core
- scheduler->UnscheduleThread(this, current_priority);
- next_scheduler.ScheduleThread(this, current_priority);
-
- // Change thread's scheduler
- scheduler = &next_scheduler;
-
- system.CpuCore(processor_id).PrepareReschedule();
+ SetCoreAndAffinityMask(core, mask);
}
bool Thread::AllWaitObjectsReady() const {
@@ -391,7 +356,7 @@ void Thread::SetActivity(ThreadActivity value) {
if (status == ThreadStatus::Ready) {
status = ThreadStatus::Paused;
} else if (status == ThreadStatus::Running) {
- status = ThreadStatus::Paused;
+ SetStatus(ThreadStatus::Paused);
Core::System::GetInstance().CpuCore(processor_id).PrepareReschedule();
}
} else if (status == ThreadStatus::Paused) {
@@ -408,6 +373,165 @@ void Thread::Sleep(s64 nanoseconds) {
WakeAfterDelay(nanoseconds);
}
+void Thread::YieldType0() {
+ auto& scheduler = kernel.GlobalScheduler();
+ scheduler.YieldThread(this);
+}
+
+void Thread::YieldType1() {
+ auto& scheduler = kernel.GlobalScheduler();
+ scheduler.YieldThreadAndBalanceLoad(this);
+}
+
+void Thread::YieldType2() {
+ auto& scheduler = kernel.GlobalScheduler();
+ scheduler.YieldThreadAndWaitForLoadBalancing(this);
+}
+
+void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
+ u32 old_flags = scheduling_state;
+ scheduling_state =
+ (scheduling_state & ThreadSchedMasks::HighMask) | static_cast<u32>(new_status);
+ AdjustSchedulingOnStatus(old_flags);
+}
+
+void Thread::SetCurrentPriority(u32 new_priority) {
+ u32 old_priority = current_priority;
+ current_priority = new_priority;
+ AdjustSchedulingOnPriority(old_priority);
+}
+
+ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
+ auto HighestSetCore = [](u64 mask, u32 max_cores) {
+ for (s32 core = max_cores - 1; core >= 0; core--) {
+ if (((mask >> core) & 1) != 0)
+ return core;
+ }
+ return -1;
+ };
+ bool use_override = affinity_override_count != 0;
+ // The value -3 is "do not change the ideal core".
+ if (new_core == -3) {
+ new_core = use_override ? ideal_core_override : ideal_core;
+ if ((new_affinity_mask & (1 << new_core)) == 0) {
+ return ERR_INVALID_COMBINATION;
+ }
+ }
+ if (use_override) {
+ ideal_core_override = new_core;
+ affinity_mask_override = new_affinity_mask;
+ } else {
+ u64 old_affinity_mask = affinity_mask;
+ ideal_core = new_core;
+ affinity_mask = new_affinity_mask;
+ if (old_affinity_mask != new_affinity_mask) {
+ s32 old_core = processor_id;
+ if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) {
+ if (ideal_core < 0) {
+ processor_id = HighestSetCore(affinity_mask, GlobalScheduler::NUM_CPU_CORES);
+ } else {
+ processor_id = ideal_core;
+ }
+ }
+ AdjustSchedulingOnAffinity(old_affinity_mask, old_core);
+ }
+ }
+ return RESULT_SUCCESS;
+}
+
+void Thread::AdjustSchedulingOnStatus(u32 old_flags) {
+ if (old_flags == scheduling_state)
+ return;
+
+ auto& scheduler = kernel.GlobalScheduler();
+ if (static_cast<ThreadSchedStatus>(old_flags & ThreadSchedMasks::LowMask) ==
+ ThreadSchedStatus::Runnable) {
+ // In this case the thread was running, now it's pausing/exitting
+ if (processor_id >= 0)
+ scheduler.Unschedule(current_priority, processor_id, this);
+
+ for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
+ if (core != processor_id && ((affinity_mask >> core) & 1) != 0)
+ scheduler.Unsuggest(current_priority, core, this);
+ }
+ } else if (GetSchedulingStatus() == ThreadSchedStatus::Runnable) {
+ // The thread is now set to running from being stopped
+ if (processor_id >= 0)
+ scheduler.Schedule(current_priority, processor_id, this);
+
+ for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
+ if (core != processor_id && ((affinity_mask >> core) & 1) != 0)
+ scheduler.Suggest(current_priority, core, this);
+ }
+ }
+
+ scheduler.SetReselectionPending();
+}
+
+void Thread::AdjustSchedulingOnPriority(u32 old_priority) {
+ if (GetSchedulingStatus() != ThreadSchedStatus::Runnable) {
+ return;
+ }
+ auto& scheduler = Core::System::GetInstance().GlobalScheduler();
+ if (processor_id >= 0) {
+ scheduler.Unschedule(old_priority, processor_id, this);
+ }
+
+ for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
+ if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
+ scheduler.Unsuggest(old_priority, core, this);
+ }
+ }
+
+ // Add thread to the new priority queues.
+ Thread* current_thread = GetCurrentThread();
+
+ if (processor_id >= 0) {
+ if (current_thread == this) {
+ scheduler.SchedulePrepend(current_priority, processor_id, this);
+ } else {
+ scheduler.Schedule(current_priority, processor_id, this);
+ }
+ }
+
+ for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
+ if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
+ scheduler.Suggest(current_priority, core, this);
+ }
+ }
+
+ scheduler.SetReselectionPending();
+}
+
+void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) {
+ auto& scheduler = Core::System::GetInstance().GlobalScheduler();
+ if (GetSchedulingStatus() != ThreadSchedStatus::Runnable ||
+ current_priority >= THREADPRIO_COUNT)
+ return;
+
+ for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
+ if (((old_affinity_mask >> core) & 1) != 0) {
+ if (core == old_core) {
+ scheduler.Unschedule(current_priority, core, this);
+ } else {
+ scheduler.Unsuggest(current_priority, core, this);
+ }
+ }
+ }
+
+ for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
+ if (((affinity_mask >> core) & 1) != 0) {
+ if (core == processor_id) {
+ scheduler.Schedule(current_priority, core, this);
+ } else {
+ scheduler.Suggest(current_priority, core, this);
+ }
+ }
+ }
+
+ scheduler.SetReselectionPending();
+}
+
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index 07e989637..c426a7209 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -75,6 +75,21 @@ enum class ThreadActivity : u32 {
Paused = 1,
};
+enum class ThreadSchedStatus : u32 { None = 0, Paused = 1, Runnable = 2, Exited = 3 };
+
+enum ThreadSchedFlags : u32 {
+ ProcessPauseFlag = 1 << 4,
+ ThreadPauseFlag = 1 << 5,
+ ProcessDebugPauseFlag = 1 << 6,
+ KernelInitPauseFlag = 1 << 8,
+};
+
+enum ThreadSchedMasks : u32 {
+ LowMask = 0x000f,
+ HighMask = 0xfff0,
+ ForcePauseMask = 0x0070,
+};
+
class Thread final : public WaitObject {
public:
using MutexWaitingThreads = std::vector<SharedPtr<Thread>>;
@@ -278,6 +293,10 @@ public:
return processor_id;
}
+ void SetProcessorID(s32 new_core) {
+ processor_id = new_core;
+ }
+
Process* GetOwnerProcess() {
return owner_process;
}
@@ -383,11 +402,38 @@ public:
/// Sleeps this thread for the given amount of nanoseconds.
void Sleep(s64 nanoseconds);
+ /// Yields this thread without rebalancing loads.
+ void YieldType0();
+
+ /// Yields this thread and does a load rebalancing.
+ void YieldType1();
+
+ /// Yields this thread and if the core is left idle, loads are rebalanced
+ void YieldType2();
+
+ ThreadSchedStatus GetSchedulingStatus() {
+ return static_cast<ThreadSchedStatus>(scheduling_state & ThreadSchedMasks::LowMask);
+ }
+
+ bool IsRunning() const {
+ return is_running;
+ }
+
+ void SetIsRunning(bool value) {
+ is_running = value;
+ }
+
private:
explicit Thread(KernelCore& kernel);
~Thread() override;
- void ChangeScheduler();
+ void SetSchedulingStatus(ThreadSchedStatus new_status);
+ void SetCurrentPriority(u32 new_priority);
+ ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask);
+
+ void AdjustSchedulingOnStatus(u32 old_flags);
+ void AdjustSchedulingOnPriority(u32 old_priority);
+ void AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core);
Core::ARM_Interface::ThreadContext context{};
@@ -453,6 +499,13 @@ private:
ThreadActivity activity = ThreadActivity::Normal;
+ s32 ideal_core_override = -1;
+ u64 affinity_mask_override = 0x1;
+ u32 affinity_override_count = 0;
+
+ u32 scheduling_state = 0;
+ bool is_running = false;
+
std::string name;
};