diff options
author | Fernando Sahmkow <fsahmkow27@gmail.com> | 2020-02-28 14:42:06 +0100 |
---|---|---|
committer | Fernando Sahmkow <fsahmkow27@gmail.com> | 2020-06-27 17:35:21 +0200 |
commit | 2a8837ff51a9cf5a0123489dba5f7ab48373c2d3 (patch) | |
tree | 119ae561120f78d70efd6e12297248a48ea28901 /src/core | |
parent | General: Add better safety for JIT use. (diff) | |
download | yuzu-2a8837ff51a9cf5a0123489dba5f7ab48373c2d3.tar yuzu-2a8837ff51a9cf5a0123489dba5f7ab48373c2d3.tar.gz yuzu-2a8837ff51a9cf5a0123489dba5f7ab48373c2d3.tar.bz2 yuzu-2a8837ff51a9cf5a0123489dba5f7ab48373c2d3.tar.lz yuzu-2a8837ff51a9cf5a0123489dba5f7ab48373c2d3.tar.xz yuzu-2a8837ff51a9cf5a0123489dba5f7ab48373c2d3.tar.zst yuzu-2a8837ff51a9cf5a0123489dba5f7ab48373c2d3.zip |
Diffstat (limited to 'src/core')
-rw-r--r-- | src/core/hardware_properties.h | 4 | ||||
-rw-r--r-- | src/core/hle/kernel/scheduler.cpp | 18 | ||||
-rw-r--r-- | src/core/hle/kernel/scheduler.h | 1 | ||||
-rw-r--r-- | src/core/hle/kernel/svc.cpp | 1 |
4 files changed, 24 insertions, 0 deletions
diff --git a/src/core/hardware_properties.h b/src/core/hardware_properties.h index b04e046ed..456b41e1b 100644 --- a/src/core/hardware_properties.h +++ b/src/core/hardware_properties.h @@ -42,6 +42,10 @@ struct EmuThreadHandle { constexpr u32 invalid_handle = 0xFFFFFFFF; return {invalid_handle, invalid_handle}; } + + bool IsInvalid() const { + return (*this) == InvalidHandle(); + } }; } // namespace Core diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 727d2e6cc..d67d3c5cd 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp @@ -44,6 +44,7 @@ void GlobalScheduler::RemoveThread(std::shared_ptr<Thread> thread) { } u32 GlobalScheduler::SelectThreads() { + ASSERT(is_locked); const auto update_thread = [](Thread* thread, Scheduler& sched) { sched.guard.lock(); if (thread != sched.selected_thread.get()) { @@ -136,6 +137,7 @@ u32 GlobalScheduler::SelectThreads() { } bool GlobalScheduler::YieldThread(Thread* yielding_thread) { + ASSERT(is_locked); // Note: caller should use critical section, etc. const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); const u32 priority = yielding_thread->GetPriority(); @@ -149,6 +151,7 @@ bool GlobalScheduler::YieldThread(Thread* yielding_thread) { } bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { + ASSERT(is_locked); // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, // etc. const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); @@ -197,6 +200,7 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { } bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) { + ASSERT(is_locked); // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, // etc. Thread* winner = nullptr; @@ -237,6 +241,7 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread } void GlobalScheduler::PreemptThreads() { + ASSERT(is_locked); for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { const u32 priority = preemption_priorities[core_id]; @@ -339,33 +344,40 @@ void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule, } void GlobalScheduler::Suggest(u32 priority, std::size_t core, Thread* thread) { + ASSERT(is_locked); suggested_queue[core].add(thread, priority); } void GlobalScheduler::Unsuggest(u32 priority, std::size_t core, Thread* thread) { + ASSERT(is_locked); suggested_queue[core].remove(thread, priority); } void GlobalScheduler::Schedule(u32 priority, std::size_t core, Thread* thread) { + ASSERT(is_locked); ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); scheduled_queue[core].add(thread, priority); } void GlobalScheduler::SchedulePrepend(u32 priority, std::size_t core, Thread* thread) { + ASSERT(is_locked); ASSERT_MSG(thread->GetProcessorID() == s32(core), "Thread must be assigned to this core."); scheduled_queue[core].add(thread, priority, false); } void GlobalScheduler::Reschedule(u32 priority, std::size_t core, Thread* thread) { + ASSERT(is_locked); scheduled_queue[core].remove(thread, priority); scheduled_queue[core].add(thread, priority); } void GlobalScheduler::Unschedule(u32 priority, std::size_t core, Thread* thread) { + ASSERT(is_locked); scheduled_queue[core].remove(thread, priority); } void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) { + ASSERT(is_locked); const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT; const s32 source_core = thread->GetProcessorID(); if (source_core == destination_core || !schedulable) { @@ -399,6 +411,7 @@ void GlobalScheduler::AdjustSchedulingOnStatus(Thread* thread, u32 old_flags) { if (old_flags == thread->scheduling_state) { return; } + ASSERT(is_locked); if (static_cast<ThreadSchedStatus>(old_flags & static_cast<u32>(ThreadSchedMasks::LowMask)) == ThreadSchedStatus::Runnable) { @@ -434,6 +447,7 @@ void GlobalScheduler::AdjustSchedulingOnPriority(Thread* thread, u32 old_priorit if (thread->GetSchedulingStatus() != ThreadSchedStatus::Runnable) { return; } + ASSERT(is_locked); if (thread->processor_id >= 0) { Unschedule(old_priority, static_cast<u32>(thread->processor_id), thread); } @@ -472,6 +486,7 @@ void GlobalScheduler::AdjustSchedulingOnAffinity(Thread* thread, u64 old_affinit thread->current_priority >= THREADPRIO_COUNT) { return; } + ASSERT(is_locked); for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { if (((old_affinity_mask >> core) & 1) != 0) { @@ -507,10 +522,12 @@ void GlobalScheduler::Shutdown() { void GlobalScheduler::Lock() { Core::EmuThreadHandle current_thread = kernel.GetCurrentEmuThreadID(); + ASSERT(!current_thread.IsInvalid()); if (current_thread == current_owner) { ++scope_lock; } else { inner_lock.lock(); + is_locked = true; current_owner = current_thread; ASSERT(current_owner != Core::EmuThreadHandle::InvalidHandle()); scope_lock = 1; @@ -526,6 +543,7 @@ void GlobalScheduler::Unlock() { Core::EmuThreadHandle leaving_thread = current_owner; current_owner = Core::EmuThreadHandle::InvalidHandle(); scope_lock = 1; + is_locked = false; inner_lock.unlock(); EnableInterruptAndSchedule(cores_pending_reschedule, leaving_thread); } diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index f5f64338f..f26a554f5 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h @@ -182,6 +182,7 @@ private: std::array<u32, Core::Hardware::NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62}; /// Scheduler lock mechanisms. + bool is_locked{}; std::mutex inner_lock{}; // TODO(Blinkhawk): Replace for a SpinLock std::atomic<s64> scope_lock{}; Core::EmuThreadHandle current_owner{Core::EmuThreadHandle::InvalidHandle()}; diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index b535593c7..4c1040a3b 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -1657,6 +1657,7 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_ update_val = thread->GetWaitHandle(); } } while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val)); + monitor.ClearExclusive(); if (mutex_val == 0) { // We were able to acquire the mutex, resume this thread. ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar); |