summaryrefslogtreecommitdiffstats
path: root/src/core/hle
diff options
context:
space:
mode:
authorFernando S <fsahmkow27@gmail.com>2022-11-09 14:50:49 +0100
committerGitHub <noreply@github.com>2022-11-09 14:50:49 +0100
commit3161b34ff6e4773cce35f0e4efe94ffb670eb2af (patch)
tree524010a3d63edd86fa80f0af873a411359aa6b9f /src/core/hle
parentMerge pull request #9199 from liamwhite/service-oops (diff)
parentEnsure correctness of atomic store ordering (diff)
downloadyuzu-3161b34ff6e4773cce35f0e4efe94ffb670eb2af.tar
yuzu-3161b34ff6e4773cce35f0e4efe94ffb670eb2af.tar.gz
yuzu-3161b34ff6e4773cce35f0e4efe94ffb670eb2af.tar.bz2
yuzu-3161b34ff6e4773cce35f0e4efe94ffb670eb2af.tar.lz
yuzu-3161b34ff6e4773cce35f0e4efe94ffb670eb2af.tar.xz
yuzu-3161b34ff6e4773cce35f0e4efe94ffb670eb2af.tar.zst
yuzu-3161b34ff6e4773cce35f0e4efe94ffb670eb2af.zip
Diffstat (limited to 'src/core/hle')
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp9
-rw-r--r--src/core/hle/kernel/k_scheduler_lock.h3
2 files changed, 9 insertions, 3 deletions
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index b1cabbca0..d6676904b 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -384,7 +384,8 @@ void KScheduler::SwitchThread(KThread* next_thread) {
void KScheduler::ScheduleImpl() {
// First, clear the needs scheduling bool.
- m_state.needs_scheduling.store(false, std::memory_order_seq_cst);
+ m_state.needs_scheduling.store(false, std::memory_order_relaxed);
+ std::atomic_thread_fence(std::memory_order_seq_cst);
// Load the appropriate thread pointers for scheduling.
KThread* const cur_thread{GetCurrentThreadPointer(kernel)};
@@ -400,7 +401,8 @@ void KScheduler::ScheduleImpl() {
// If there aren't, we want to check if the highest priority thread is the same as the current
// thread.
if (highest_priority_thread == cur_thread) {
- // If they're the same, then we can just return.
+ // If they're the same, then we can just issue a memory barrier and return.
+ std::atomic_thread_fence(std::memory_order_seq_cst);
return;
}
@@ -476,7 +478,8 @@ void KScheduler::ScheduleImplFiber() {
// We failed to successfully do the context switch, and need to retry.
// Clear needs_scheduling.
- m_state.needs_scheduling.store(false, std::memory_order_seq_cst);
+ m_state.needs_scheduling.store(false, std::memory_order_relaxed);
+ std::atomic_thread_fence(std::memory_order_seq_cst);
// Refresh the highest priority thread.
highest_priority_thread = m_state.highest_priority_thread;
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h
index 73314b45e..129d60472 100644
--- a/src/core/hle/kernel/k_scheduler_lock.h
+++ b/src/core/hle/kernel/k_scheduler_lock.h
@@ -60,6 +60,9 @@ public:
// Release an instance of the lock.
if ((--lock_count) == 0) {
+ // Perform a memory barrier here.
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+
// We're no longer going to hold the lock. Take note of what cores need scheduling.
const u64 cores_needing_scheduling =
SchedulerType::UpdateHighestPriorityThreads(kernel);