diff options
65 files changed, 2670 insertions, 1434 deletions
diff --git a/.ci/scripts/common/post-upload.sh b/.ci/scripts/common/post-upload.sh index b80868635..bb4e9d328 100644 --- a/.ci/scripts/common/post-upload.sh +++ b/.ci/scripts/common/post-upload.sh @@ -1,12 +1,12 @@ #!/bin/bash -ex # Copy documentation -cp license.txt "$DIR_NAME" -cp README.md "$DIR_NAME" +cp license.txt "$REV_NAME" +cp README.md "$REV_NAME" -tar $COMPRESSION_FLAGS "$ARCHIVE_NAME" "$DIR_NAME" +tar $COMPRESSION_FLAGS "$ARCHIVE_NAME" "$REV_NAME" -mv "$DIR_NAME" $RELEASE_NAME +mv "$REV_NAME" $RELEASE_NAME 7z a "$REV_NAME.7z" $RELEASE_NAME diff --git a/.ci/scripts/linux/upload.sh b/.ci/scripts/linux/upload.sh index 3b20d7801..0d131d1dd 100644 --- a/.ci/scripts/linux/upload.sh +++ b/.ci/scripts/linux/upload.sh @@ -5,11 +5,10 @@ REV_NAME="yuzu-linux-${GITDATE}-${GITREV}" ARCHIVE_NAME="${REV_NAME}.tar.xz" COMPRESSION_FLAGS="-cJvf" -DIR_NAME="${REV_NAME}_${RELEASE_NAME}" -mkdir "$DIR_NAME" +mkdir "$REV_NAME" -cp build/bin/yuzu-cmd "$DIR_NAME" -cp build/bin/yuzu "$DIR_NAME" +cp build/bin/yuzu-cmd "$REV_NAME" +cp build/bin/yuzu "$REV_NAME" . .ci/scripts/common/post-upload.sh diff --git a/.ci/scripts/windows/upload.ps1 b/.ci/scripts/windows/upload.ps1 index 2371a7d4a..3cb709924 100644 --- a/.ci/scripts/windows/upload.ps1 +++ b/.ci/scripts/windows/upload.ps1 @@ -1,8 +1,6 @@ -param($BUILD_NAME) - $GITDATE = $(git show -s --date=short --format='%ad') -replace "-","" $GITREV = $(git show -s --format='%h') -$RELEASE_DIST = "yuzu-windows-msvc-$BUILD_NAME" +$RELEASE_DIST = "yuzu-windows-msvc" $MSVC_BUILD_ZIP = "yuzu-windows-msvc-$GITDATE-$GITREV.zip" -replace " ", "" $MSVC_BUILD_PDB = "yuzu-windows-msvc-$GITDATE-$GITREV-debugsymbols.zip" -replace " ", "" diff --git a/.ci/scripts/windows/upload.sh b/.ci/scripts/windows/upload.sh index 3f5794ae6..de73d3541 100644 --- a/.ci/scripts/windows/upload.sh +++ b/.ci/scripts/windows/upload.sh @@ -5,10 +5,9 @@ REV_NAME="yuzu-windows-mingw-${GITDATE}-${GITREV}" ARCHIVE_NAME="${REV_NAME}.tar.gz" COMPRESSION_FLAGS="-czvf" -DIR_NAME="${REV_NAME}_${RELEASE_NAME}" -mkdir "$DIR_NAME" +mkdir "$REV_NAME" # get around the permission issues -cp -r package/* "$DIR_NAME" +cp -r package/* "$REV_NAME" . .ci/scripts/common/post-upload.sh diff --git a/.ci/templates/build-msvc.yml b/.ci/templates/build-msvc.yml index 52cebaee0..b44a08247 100644 --- a/.ci/templates/build-msvc.yml +++ b/.ci/templates/build-msvc.yml @@ -17,7 +17,6 @@ steps: inputs: targetType: 'filePath' filePath: './.ci/scripts/windows/upload.ps1' - arguments: '$(BuildName)' - publish: artifacts artifact: 'yuzu-$(BuildName)-windows-msvc' displayName: 'Upload Artifacts' diff --git a/CMakeModules/GenerateSCMRev.cmake b/CMakeModules/GenerateSCMRev.cmake index 09eabe2c7..21e03ae98 100644 --- a/CMakeModules/GenerateSCMRev.cmake +++ b/CMakeModules/GenerateSCMRev.cmake @@ -85,10 +85,12 @@ set(HASH_FILES "${VIDEO_CORE}/shader/decode/xmad.cpp" "${VIDEO_CORE}/shader/ast.cpp" "${VIDEO_CORE}/shader/ast.h" - "${VIDEO_CORE}/shader/control_flow.cpp" - "${VIDEO_CORE}/shader/control_flow.h" "${VIDEO_CORE}/shader/compiler_settings.cpp" "${VIDEO_CORE}/shader/compiler_settings.h" + "${VIDEO_CORE}/shader/const_buffer_locker.cpp" + "${VIDEO_CORE}/shader/const_buffer_locker.h" + "${VIDEO_CORE}/shader/control_flow.cpp" + "${VIDEO_CORE}/shader/control_flow.h" "${VIDEO_CORE}/shader/decode.cpp" "${VIDEO_CORE}/shader/expr.cpp" "${VIDEO_CORE}/shader/expr.h" diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index 5b51fcafa..9c6f1c07c 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -74,10 +74,12 @@ add_custom_command(OUTPUT scm_rev.cpp "${VIDEO_CORE}/shader/decode/xmad.cpp" "${VIDEO_CORE}/shader/ast.cpp" "${VIDEO_CORE}/shader/ast.h" - "${VIDEO_CORE}/shader/control_flow.cpp" - "${VIDEO_CORE}/shader/control_flow.h" "${VIDEO_CORE}/shader/compiler_settings.cpp" "${VIDEO_CORE}/shader/compiler_settings.h" + "${VIDEO_CORE}/shader/const_buffer_locker.cpp" + "${VIDEO_CORE}/shader/const_buffer_locker.h" + "${VIDEO_CORE}/shader/control_flow.cpp" + "${VIDEO_CORE}/shader/control_flow.h" "${VIDEO_CORE}/shader/decode.cpp" "${VIDEO_CORE}/shader/expr.cpp" "${VIDEO_CORE}/shader/expr.h" diff --git a/src/common/hash.h b/src/common/hash.h index 40194d1ee..ebd4125e2 100644 --- a/src/common/hash.h +++ b/src/common/hash.h @@ -6,6 +6,8 @@ #include <cstddef> #include <cstring> +#include <utility> +#include <boost/functional/hash.hpp> #include "common/cityhash.h" #include "common/common_types.h" @@ -68,4 +70,13 @@ struct HashableStruct { } }; +struct PairHash { + template <class T1, class T2> + std::size_t operator()(const std::pair<T1, T2>& pair) const noexcept { + std::size_t seed = std::hash<T1>()(pair.first); + boost::hash_combine(seed, std::hash<T2>()(pair.second)); + return seed; + } +}; + } // namespace Common diff --git a/src/common/multi_level_queue.h b/src/common/multi_level_queue.h index 9cb448f56..50acfdbf2 100644 --- a/src/common/multi_level_queue.h +++ b/src/common/multi_level_queue.h @@ -304,6 +304,13 @@ public: return levels[priority == Depth ? 63 : priority].back(); } + void clear() { + used_priorities = 0; + for (std::size_t i = 0; i < Depth; i++) { + levels[i].clear(); + } + } + private: using const_list_iterator = typename std::list<T>::const_iterator; diff --git a/src/core/core.cpp b/src/core/core.cpp index b7b9259ec..eba17218a 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp @@ -409,6 +409,12 @@ void System::PrepareReschedule() { CurrentCpuCore().PrepareReschedule(); } +void System::PrepareReschedule(const u32 core_index) { + if (core_index < GlobalScheduler().CpuCoresCount()) { + CpuCore(core_index).PrepareReschedule(); + } +} + PerfStatsResults System::GetAndResetPerfStats() { return impl->GetAndResetPerfStats(); } @@ -449,6 +455,16 @@ const Kernel::Scheduler& System::Scheduler(std::size_t core_index) const { return CpuCore(core_index).Scheduler(); } +/// Gets the global scheduler +Kernel::GlobalScheduler& System::GlobalScheduler() { + return impl->kernel.GlobalScheduler(); +} + +/// Gets the global scheduler +const Kernel::GlobalScheduler& System::GlobalScheduler() const { + return impl->kernel.GlobalScheduler(); +} + Kernel::Process* System::CurrentProcess() { return impl->kernel.CurrentProcess(); } diff --git a/src/core/core.h b/src/core/core.h index 90e7ac607..984074ce3 100644 --- a/src/core/core.h +++ b/src/core/core.h @@ -24,6 +24,7 @@ class VfsFilesystem; } // namespace FileSys namespace Kernel { +class GlobalScheduler; class KernelCore; class Process; class Scheduler; @@ -184,6 +185,9 @@ public: /// Prepare the core emulation for a reschedule void PrepareReschedule(); + /// Prepare the core emulation for a reschedule + void PrepareReschedule(u32 core_index); + /// Gets and resets core performance statistics PerfStatsResults GetAndResetPerfStats(); @@ -238,6 +242,12 @@ public: /// Gets the scheduler for the CPU core with the specified index const Kernel::Scheduler& Scheduler(std::size_t core_index) const; + /// Gets the global scheduler + Kernel::GlobalScheduler& GlobalScheduler(); + + /// Gets the global scheduler + const Kernel::GlobalScheduler& GlobalScheduler() const; + /// Provides a pointer to the current process Kernel::Process* CurrentProcess(); diff --git a/src/core/core_cpu.cpp b/src/core/core_cpu.cpp index 6bd9639c6..233ea572c 100644 --- a/src/core/core_cpu.cpp +++ b/src/core/core_cpu.cpp @@ -52,7 +52,8 @@ bool CpuBarrier::Rendezvous() { Cpu::Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_barrier, std::size_t core_index) - : cpu_barrier{cpu_barrier}, core_timing{system.CoreTiming()}, core_index{core_index} { + : cpu_barrier{cpu_barrier}, global_scheduler{system.GlobalScheduler()}, + core_timing{system.CoreTiming()}, core_index{core_index} { #ifdef ARCHITECTURE_x86_64 arm_interface = std::make_unique<ARM_Dynarmic>(system, exclusive_monitor, core_index); #else @@ -60,7 +61,7 @@ Cpu::Cpu(System& system, ExclusiveMonitor& exclusive_monitor, CpuBarrier& cpu_ba LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available"); #endif - scheduler = std::make_unique<Kernel::Scheduler>(system, *arm_interface); + scheduler = std::make_unique<Kernel::Scheduler>(system, *arm_interface, core_index); } Cpu::~Cpu() = default; @@ -81,21 +82,21 @@ void Cpu::RunLoop(bool tight_loop) { return; } + Reschedule(); + // If we don't have a currently active thread then don't execute instructions, // instead advance to the next event and try to yield to the next thread if (Kernel::GetCurrentThread() == nullptr) { LOG_TRACE(Core, "Core-{} idling", core_index); core_timing.Idle(); - core_timing.Advance(); - PrepareReschedule(); } else { if (tight_loop) { arm_interface->Run(); } else { arm_interface->Step(); } - core_timing.Advance(); } + core_timing.Advance(); Reschedule(); } @@ -106,18 +107,18 @@ void Cpu::SingleStep() { void Cpu::PrepareReschedule() { arm_interface->PrepareReschedule(); - reschedule_pending = true; } void Cpu::Reschedule() { - if (!reschedule_pending) { - return; - } - - reschedule_pending = false; // Lock the global kernel mutex when we manipulate the HLE state - std::lock_guard lock{HLE::g_hle_lock}; - scheduler->Reschedule(); + std::lock_guard lock(HLE::g_hle_lock); + + global_scheduler.SelectThread(core_index); + scheduler->TryDoContextSwitch(); +} + +void Cpu::Shutdown() { + scheduler->Shutdown(); } } // namespace Core diff --git a/src/core/core_cpu.h b/src/core/core_cpu.h index 7589beb8c..cafca8df7 100644 --- a/src/core/core_cpu.h +++ b/src/core/core_cpu.h @@ -12,8 +12,9 @@ #include "common/common_types.h" namespace Kernel { +class GlobalScheduler; class Scheduler; -} +} // namespace Kernel namespace Core { class System; @@ -83,6 +84,8 @@ public: return core_index; } + void Shutdown(); + static std::unique_ptr<ExclusiveMonitor> MakeExclusiveMonitor(std::size_t num_cores); private: @@ -90,6 +93,7 @@ private: std::unique_ptr<ARM_Interface> arm_interface; CpuBarrier& cpu_barrier; + Kernel::GlobalScheduler& global_scheduler; std::unique_ptr<Kernel::Scheduler> scheduler; Timing::CoreTiming& core_timing; diff --git a/src/core/cpu_core_manager.cpp b/src/core/cpu_core_manager.cpp index 16b384076..8efd410bb 100644 --- a/src/core/cpu_core_manager.cpp +++ b/src/core/cpu_core_manager.cpp @@ -58,6 +58,7 @@ void CpuCoreManager::Shutdown() { thread_to_cpu.clear(); for (auto& cpu_core : cores) { + cpu_core->Shutdown(); cpu_core.reset(); } diff --git a/src/core/gdbstub/gdbstub.cpp b/src/core/gdbstub/gdbstub.cpp index db51d722f..20bb50868 100644 --- a/src/core/gdbstub/gdbstub.cpp +++ b/src/core/gdbstub/gdbstub.cpp @@ -202,13 +202,11 @@ void RegisterModule(std::string name, VAddr beg, VAddr end, bool add_elf_ext) { } static Kernel::Thread* FindThreadById(s64 id) { - for (u32 core = 0; core < Core::NUM_CPU_CORES; core++) { - const auto& threads = Core::System::GetInstance().Scheduler(core).GetThreadList(); - for (auto& thread : threads) { - if (thread->GetThreadID() == static_cast<u64>(id)) { - current_core = core; - return thread.get(); - } + const auto& threads = Core::System::GetInstance().GlobalScheduler().GetThreadList(); + for (auto& thread : threads) { + if (thread->GetThreadID() == static_cast<u64>(id)) { + current_core = thread->GetProcessorID(); + return thread.get(); } } return nullptr; @@ -647,11 +645,9 @@ static void HandleQuery() { SendReply(buffer.c_str()); } else if (strncmp(query, "fThreadInfo", strlen("fThreadInfo")) == 0) { std::string val = "m"; - for (u32 core = 0; core < Core::NUM_CPU_CORES; core++) { - const auto& threads = Core::System::GetInstance().Scheduler(core).GetThreadList(); - for (const auto& thread : threads) { - val += fmt::format("{:x},", thread->GetThreadID()); - } + const auto& threads = Core::System::GetInstance().GlobalScheduler().GetThreadList(); + for (const auto& thread : threads) { + val += fmt::format("{:x},", thread->GetThreadID()); } val.pop_back(); SendReply(val.c_str()); @@ -661,13 +657,11 @@ static void HandleQuery() { std::string buffer; buffer += "l<?xml version=\"1.0\"?>"; buffer += "<threads>"; - for (u32 core = 0; core < Core::NUM_CPU_CORES; core++) { - const auto& threads = Core::System::GetInstance().Scheduler(core).GetThreadList(); - for (const auto& thread : threads) { - buffer += - fmt::format(R"*(<thread id="{:x}" core="{:d}" name="Thread {:x}"></thread>)*", - thread->GetThreadID(), core, thread->GetThreadID()); - } + const auto& threads = Core::System::GetInstance().GlobalScheduler().GetThreadList(); + for (const auto& thread : threads) { + buffer += + fmt::format(R"*(<thread id="{:x}" core="{:d}" name="Thread {:x}"></thread>)*", + thread->GetThreadID(), thread->GetProcessorID(), thread->GetThreadID()); } buffer += "</threads>"; SendReply(buffer.c_str()); diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index c8842410b..de0a9064e 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp @@ -22,6 +22,7 @@ namespace Kernel { namespace { // Wake up num_to_wake (or all) threads in a vector. void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_to_wake) { + auto& system = Core::System::GetInstance(); // Only process up to 'target' threads, unless 'target' is <= 0, in which case process // them all. std::size_t last = waiting_threads.size(); @@ -35,6 +36,7 @@ void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_ waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS); waiting_threads[i]->SetArbiterWaitAddress(0); waiting_threads[i]->ResumeFromWait(); + system.PrepareReschedule(waiting_threads[i]->GetProcessorID()); } } } // Anonymous namespace @@ -89,12 +91,20 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a // Determine the modified value depending on the waiting count. s32 updated_value; - if (waiting_threads.empty()) { - updated_value = value + 1; - } else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) { - updated_value = value - 1; + if (num_to_wake <= 0) { + if (waiting_threads.empty()) { + updated_value = value + 1; + } else { + updated_value = value - 1; + } } else { - updated_value = value; + if (waiting_threads.empty()) { + updated_value = value + 1; + } else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) { + updated_value = value - 1; + } else { + updated_value = value; + } } if (static_cast<s32>(Memory::Read32(address)) != value) { @@ -169,30 +179,22 @@ ResultCode AddressArbiter::WaitForAddressImpl(VAddr address, s64 timeout) { current_thread->WakeAfterDelay(timeout); - system.CpuCore(current_thread->GetProcessorID()).PrepareReschedule(); + system.PrepareReschedule(current_thread->GetProcessorID()); return RESULT_TIMEOUT; } std::vector<SharedPtr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(VAddr address) const { - const auto RetrieveWaitingThreads = [this](std::size_t core_index, - std::vector<SharedPtr<Thread>>& waiting_threads, - VAddr arb_addr) { - const auto& scheduler = system.Scheduler(core_index); - const auto& thread_list = scheduler.GetThreadList(); - - for (const auto& thread : thread_list) { - if (thread->GetArbiterWaitAddress() == arb_addr) { - waiting_threads.push_back(thread); - } - } - }; // Retrieve all threads that are waiting for this address. std::vector<SharedPtr<Thread>> threads; - RetrieveWaitingThreads(0, threads, address); - RetrieveWaitingThreads(1, threads, address); - RetrieveWaitingThreads(2, threads, address); - RetrieveWaitingThreads(3, threads, address); + const auto& scheduler = system.GlobalScheduler(); + const auto& thread_list = scheduler.GetThreadList(); + + for (const auto& thread : thread_list) { + if (thread->GetArbiterWaitAddress() == address) { + threads.push_back(thread); + } + } // Sort them by priority, such that the highest priority ones come first. std::sort(threads.begin(), threads.end(), diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 799e5e0d8..f94ac150d 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -12,12 +12,15 @@ #include "core/core.h" #include "core/core_timing.h" +#include "core/core_timing_util.h" #include "core/hle/kernel/address_arbiter.h" #include "core/hle/kernel/client_port.h" +#include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/resource_limit.h" +#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" #include "core/hle/lock.h" #include "core/hle/result.h" @@ -58,12 +61,8 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_ if (thread->HasWakeupCallback()) { resume = thread->InvokeWakeupCallback(ThreadWakeupReason::Timeout, thread, nullptr, 0); } - } - - if (thread->GetMutexWaitAddress() != 0 || thread->GetCondVarWaitAddress() != 0 || - thread->GetWaitHandle() != 0) { - ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex || - thread->GetStatus() == ThreadStatus::WaitCondVar); + } else if (thread->GetStatus() == ThreadStatus::WaitMutex || + thread->GetStatus() == ThreadStatus::WaitCondVar) { thread->SetMutexWaitAddress(0); thread->SetCondVarWaitAddress(0); thread->SetWaitHandle(0); @@ -83,18 +82,23 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_ } if (resume) { + if (thread->GetStatus() == ThreadStatus::WaitCondVar || + thread->GetStatus() == ThreadStatus::WaitArb) { + thread->SetWaitSynchronizationResult(RESULT_TIMEOUT); + } thread->ResumeFromWait(); } } struct KernelCore::Impl { - explicit Impl(Core::System& system) : system{system} {} + explicit Impl(Core::System& system) : system{system}, global_scheduler{system} {} void Initialize(KernelCore& kernel) { Shutdown(); InitializeSystemResourceLimit(kernel); InitializeThreads(); + InitializePreemption(); } void Shutdown() { @@ -110,6 +114,9 @@ struct KernelCore::Impl { thread_wakeup_callback_handle_table.Clear(); thread_wakeup_event_type = nullptr; + preemption_event = nullptr; + + global_scheduler.Shutdown(); named_ports.clear(); } @@ -132,6 +139,18 @@ struct KernelCore::Impl { system.CoreTiming().RegisterEvent("ThreadWakeupCallback", ThreadWakeupCallback); } + void InitializePreemption() { + preemption_event = system.CoreTiming().RegisterEvent( + "PreemptionCallback", [this](u64 userdata, s64 cycles_late) { + global_scheduler.PreemptThreads(); + s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10)); + system.CoreTiming().ScheduleEvent(time_interval, preemption_event); + }); + + s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10)); + system.CoreTiming().ScheduleEvent(time_interval, preemption_event); + } + std::atomic<u32> next_object_id{0}; std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin}; std::atomic<u64> next_user_process_id{Process::ProcessIDMin}; @@ -140,10 +159,12 @@ struct KernelCore::Impl { // Lists all processes that exist in the current session. std::vector<SharedPtr<Process>> process_list; Process* current_process = nullptr; + Kernel::GlobalScheduler global_scheduler; SharedPtr<ResourceLimit> system_resource_limit; Core::Timing::EventType* thread_wakeup_event_type = nullptr; + Core::Timing::EventType* preemption_event = nullptr; // TODO(yuriks): This can be removed if Thread objects are explicitly pooled in the future, // allowing us to simply use a pool index or similar. Kernel::HandleTable thread_wakeup_callback_handle_table; @@ -203,6 +224,14 @@ const std::vector<SharedPtr<Process>>& KernelCore::GetProcessList() const { return impl->process_list; } +Kernel::GlobalScheduler& KernelCore::GlobalScheduler() { + return impl->global_scheduler; +} + +const Kernel::GlobalScheduler& KernelCore::GlobalScheduler() const { + return impl->global_scheduler; +} + void KernelCore::AddNamedPort(std::string name, SharedPtr<ClientPort> port) { impl->named_ports.emplace(std::move(name), std::move(port)); } diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 0cc44ee76..c4397fc77 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -21,6 +21,7 @@ namespace Kernel { class AddressArbiter; class ClientPort; +class GlobalScheduler; class HandleTable; class Process; class ResourceLimit; @@ -75,6 +76,12 @@ public: /// Retrieves the list of processes. const std::vector<SharedPtr<Process>>& GetProcessList() const; + /// Gets the sole instance of the global scheduler + Kernel::GlobalScheduler& GlobalScheduler(); + + /// Gets the sole instance of the global scheduler + const Kernel::GlobalScheduler& GlobalScheduler() const; + /// Adds a port to the named port table void AddNamedPort(std::string name, SharedPtr<ClientPort> port); diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp index 98e87313b..663d0f4b6 100644 --- a/src/core/hle/kernel/mutex.cpp +++ b/src/core/hle/kernel/mutex.cpp @@ -139,6 +139,9 @@ ResultCode Mutex::Release(VAddr address) { thread->SetCondVarWaitAddress(0); thread->SetMutexWaitAddress(0); thread->SetWaitHandle(0); + thread->SetWaitSynchronizationResult(RESULT_SUCCESS); + + system.PrepareReschedule(); return RESULT_SUCCESS; } diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index e80a12ac3..12a900bcc 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -213,10 +213,7 @@ void Process::PrepareForTermination() { } }; - stop_threads(system.Scheduler(0).GetThreadList()); - stop_threads(system.Scheduler(1).GetThreadList()); - stop_threads(system.Scheduler(2).GetThreadList()); - stop_threads(system.Scheduler(3).GetThreadList()); + stop_threads(system.GlobalScheduler().GetThreadList()); FreeTLSRegion(tls_region_address); tls_region_address = 0; diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index e8447b69a..e6dcb9639 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp @@ -1,8 +1,13 @@ // Copyright 2018 yuzu emulator team // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +// +// SelectThreads, Yield functions originally by TuxSH. +// licensed under GPLv2 or later under exception provided by the author. #include <algorithm> +#include <set> +#include <unordered_set> #include <utility> #include "common/assert.h" @@ -17,56 +22,434 @@ namespace Kernel { -std::mutex Scheduler::scheduler_mutex; +GlobalScheduler::GlobalScheduler(Core::System& system) : system{system} { + is_reselection_pending = false; +} + +void GlobalScheduler::AddThread(SharedPtr<Thread> thread) { + thread_list.push_back(std::move(thread)); +} + +void GlobalScheduler::RemoveThread(const Thread* thread) { + thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), + thread_list.end()); +} + +/* + * UnloadThread selects a core and forces it to unload its current thread's context + */ +void GlobalScheduler::UnloadThread(s32 core) { + Scheduler& sched = system.Scheduler(core); + sched.UnloadThread(); +} + +/* + * SelectThread takes care of selecting the new scheduled thread. + * It does it in 3 steps: + * - First a thread is selected from the top of the priority queue. If no thread + * is obtained then we move to step two, else we are done. + * - Second we try to get a suggested thread that's not assigned to any core or + * that is not the top thread in that core. + * - Third is no suggested thread is found, we do a second pass and pick a running + * thread in another core and swap it with its current thread. + */ +void GlobalScheduler::SelectThread(u32 core) { + const auto update_thread = [](Thread* thread, Scheduler& sched) { + if (thread != sched.selected_thread) { + if (thread == nullptr) { + ++sched.idle_selection_count; + } + sched.selected_thread = thread; + } + sched.is_context_switch_pending = sched.selected_thread != sched.current_thread; + std::atomic_thread_fence(std::memory_order_seq_cst); + }; + Scheduler& sched = system.Scheduler(core); + Thread* current_thread = nullptr; + // Step 1: Get top thread in schedule queue. + current_thread = scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front(); + if (current_thread) { + update_thread(current_thread, sched); + return; + } + // Step 2: Try selecting a suggested thread. + Thread* winner = nullptr; + std::set<s32> sug_cores; + for (auto thread : suggested_queue[core]) { + s32 this_core = thread->GetProcessorID(); + Thread* thread_on_core = nullptr; + if (this_core >= 0) { + thread_on_core = scheduled_queue[this_core].front(); + } + if (this_core < 0 || thread != thread_on_core) { + winner = thread; + break; + } + sug_cores.insert(this_core); + } + // if we got a suggested thread, select it, else do a second pass. + if (winner && winner->GetPriority() > 2) { + if (winner->IsRunning()) { + UnloadThread(winner->GetProcessorID()); + } + TransferToCore(winner->GetPriority(), core, winner); + update_thread(winner, sched); + return; + } + // Step 3: Select a suggested thread from another core + for (auto& src_core : sug_cores) { + auto it = scheduled_queue[src_core].begin(); + it++; + if (it != scheduled_queue[src_core].end()) { + Thread* thread_on_core = scheduled_queue[src_core].front(); + Thread* to_change = *it; + if (thread_on_core->IsRunning() || to_change->IsRunning()) { + UnloadThread(src_core); + } + TransferToCore(thread_on_core->GetPriority(), core, thread_on_core); + current_thread = thread_on_core; + break; + } + } + update_thread(current_thread, sched); +} + +/* + * YieldThread takes a thread and moves it to the back of the it's priority list + * This operation can be redundant and no scheduling is changed if marked as so. + */ +bool GlobalScheduler::YieldThread(Thread* yielding_thread) { + // Note: caller should use critical section, etc. + const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); + const u32 priority = yielding_thread->GetPriority(); + + // Yield the thread + ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority), + "Thread yielding without being in front"); + scheduled_queue[core_id].yield(priority); + + Thread* winner = scheduled_queue[core_id].front(priority); + return AskForReselectionOrMarkRedundant(yielding_thread, winner); +} + +/* + * YieldThreadAndBalanceLoad takes a thread and moves it to the back of the it's priority list. + * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or + * a better priority than the next thread in the core. + * This operation can be redundant and no scheduling is changed if marked as so. + */ +bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) { + // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, + // etc. + const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); + const u32 priority = yielding_thread->GetPriority(); + + // Yield the thread + ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority), + "Thread yielding without being in front"); + scheduled_queue[core_id].yield(priority); + + std::array<Thread*, NUM_CPU_CORES> current_threads; + for (u32 i = 0; i < NUM_CPU_CORES; i++) { + current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); + } + + Thread* next_thread = scheduled_queue[core_id].front(priority); + Thread* winner = nullptr; + for (auto& thread : suggested_queue[core_id]) { + const s32 source_core = thread->GetProcessorID(); + if (source_core >= 0) { + if (current_threads[source_core] != nullptr) { + if (thread == current_threads[source_core] || + current_threads[source_core]->GetPriority() < min_regular_priority) { + continue; + } + } + } + if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() || + next_thread->GetPriority() < thread->GetPriority()) { + if (thread->GetPriority() <= priority) { + winner = thread; + break; + } + } + } + + if (winner != nullptr) { + if (winner != yielding_thread) { + if (winner->IsRunning()) { + UnloadThread(winner->GetProcessorID()); + } + TransferToCore(winner->GetPriority(), core_id, winner); + } + } else { + winner = next_thread; + } + + return AskForReselectionOrMarkRedundant(yielding_thread, winner); +} + +/* + * YieldThreadAndWaitForLoadBalancing takes a thread and moves it out of the scheduling queue + * and into the suggested queue. If no thread can be squeduled afterwards in that core, + * a suggested thread is obtained instead. + * This operation can be redundant and no scheduling is changed if marked as so. + */ +bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) { + // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section, + // etc. + Thread* winner = nullptr; + const u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID()); + + // Remove the thread from its scheduled mlq, put it on the corresponding "suggested" one instead + TransferToCore(yielding_thread->GetPriority(), -1, yielding_thread); + + // If the core is idle, perform load balancing, excluding the threads that have just used this + // function... + if (scheduled_queue[core_id].empty()) { + // Here, "current_threads" is calculated after the ""yield"", unlike yield -1 + std::array<Thread*, NUM_CPU_CORES> current_threads; + for (u32 i = 0; i < NUM_CPU_CORES; i++) { + current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); + } + for (auto& thread : suggested_queue[core_id]) { + const s32 source_core = thread->GetProcessorID(); + if (source_core < 0 || thread == current_threads[source_core]) { + continue; + } + if (current_threads[source_core] == nullptr || + current_threads[source_core]->GetPriority() >= min_regular_priority) { + winner = thread; + } + break; + } + if (winner != nullptr) { + if (winner != yielding_thread) { + if (winner->IsRunning()) { + UnloadThread(winner->GetProcessorID()); + } + TransferToCore(winner->GetPriority(), core_id, winner); + } + } else { + winner = yielding_thread; + } + } + + return AskForReselectionOrMarkRedundant(yielding_thread, winner); +} + +void GlobalScheduler::PreemptThreads() { + for (std::size_t core_id = 0; core_id < NUM_CPU_CORES; core_id++) { + const u32 priority = preemption_priorities[core_id]; + + if (scheduled_queue[core_id].size(priority) > 0) { + scheduled_queue[core_id].front(priority)->IncrementYieldCount(); + scheduled_queue[core_id].yield(priority); + if (scheduled_queue[core_id].size(priority) > 1) { + scheduled_queue[core_id].front(priority)->IncrementYieldCount(); + } + } + + Thread* current_thread = + scheduled_queue[core_id].empty() ? nullptr : scheduled_queue[core_id].front(); + Thread* winner = nullptr; + for (auto& thread : suggested_queue[core_id]) { + const s32 source_core = thread->GetProcessorID(); + if (thread->GetPriority() != priority) { + continue; + } + if (source_core >= 0) { + Thread* next_thread = scheduled_queue[source_core].empty() + ? nullptr + : scheduled_queue[source_core].front(); + if (next_thread != nullptr && next_thread->GetPriority() < 2) { + break; + } + if (next_thread == thread) { + continue; + } + } + if (current_thread != nullptr && + current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) { + winner = thread; + break; + } + } + + if (winner != nullptr) { + if (winner->IsRunning()) { + UnloadThread(winner->GetProcessorID()); + } + TransferToCore(winner->GetPriority(), core_id, winner); + current_thread = + winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread; + } + + if (current_thread != nullptr && current_thread->GetPriority() > priority) { + for (auto& thread : suggested_queue[core_id]) { + const s32 source_core = thread->GetProcessorID(); + if (thread->GetPriority() < priority) { + continue; + } + if (source_core >= 0) { + Thread* next_thread = scheduled_queue[source_core].empty() + ? nullptr + : scheduled_queue[source_core].front(); + if (next_thread != nullptr && next_thread->GetPriority() < 2) { + break; + } + if (next_thread == thread) { + continue; + } + } + if (current_thread != nullptr && + current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) { + winner = thread; + break; + } + } + + if (winner != nullptr) { + if (winner->IsRunning()) { + UnloadThread(winner->GetProcessorID()); + } + TransferToCore(winner->GetPriority(), core_id, winner); + current_thread = winner; + } + } + + is_reselection_pending.store(true, std::memory_order_release); + } +} + +void GlobalScheduler::Suggest(u32 priority, u32 core, Thread* thread) { + suggested_queue[core].add(thread, priority); +} + +void GlobalScheduler::Unsuggest(u32 priority, u32 core, Thread* thread) { + suggested_queue[core].remove(thread, priority); +} + +void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) { + ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core."); + scheduled_queue[core].add(thread, priority); +} + +void GlobalScheduler::SchedulePrepend(u32 priority, u32 core, Thread* thread) { + ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core."); + scheduled_queue[core].add(thread, priority, false); +} + +void GlobalScheduler::Reschedule(u32 priority, u32 core, Thread* thread) { + scheduled_queue[core].remove(thread, priority); + scheduled_queue[core].add(thread, priority); +} + +void GlobalScheduler::Unschedule(u32 priority, u32 core, Thread* thread) { + scheduled_queue[core].remove(thread, priority); +} + +void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) { + const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT; + const s32 source_core = thread->GetProcessorID(); + if (source_core == destination_core || !schedulable) { + return; + } + thread->SetProcessorID(destination_core); + if (source_core >= 0) { + Unschedule(priority, source_core, thread); + } + if (destination_core >= 0) { + Unsuggest(priority, destination_core, thread); + Schedule(priority, destination_core, thread); + } + if (source_core >= 0) { + Suggest(priority, source_core, thread); + } +} -Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core) - : cpu_core{cpu_core}, system{system} {} +bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) { + if (current_thread == winner) { + current_thread->IncrementYieldCount(); + return true; + } else { + is_reselection_pending.store(true, std::memory_order_release); + return false; + } +} -Scheduler::~Scheduler() { - for (auto& thread : thread_list) { - thread->Stop(); +void GlobalScheduler::Shutdown() { + for (std::size_t core = 0; core < NUM_CPU_CORES; core++) { + scheduled_queue[core].clear(); + suggested_queue[core].clear(); } + thread_list.clear(); } +GlobalScheduler::~GlobalScheduler() = default; + +Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 core_id) + : system(system), cpu_core(cpu_core), core_id(core_id) {} + +Scheduler::~Scheduler() = default; + bool Scheduler::HaveReadyThreads() const { - std::lock_guard lock{scheduler_mutex}; - return !ready_queue.empty(); + return system.GlobalScheduler().HaveReadyThreads(core_id); } Thread* Scheduler::GetCurrentThread() const { return current_thread.get(); } +Thread* Scheduler::GetSelectedThread() const { + return selected_thread.get(); +} + +void Scheduler::SelectThreads() { + system.GlobalScheduler().SelectThread(core_id); +} + u64 Scheduler::GetLastContextSwitchTicks() const { return last_context_switch_time; } -Thread* Scheduler::PopNextReadyThread() { - Thread* next = nullptr; - Thread* thread = GetCurrentThread(); +void Scheduler::TryDoContextSwitch() { + if (is_context_switch_pending) { + SwitchContext(); + } +} - if (thread && thread->GetStatus() == ThreadStatus::Running) { - if (ready_queue.empty()) { - return thread; - } - // We have to do better than the current thread. - // This call returns null when that's not possible. - next = ready_queue.front(); - if (next == nullptr || next->GetPriority() >= thread->GetPriority()) { - next = thread; - } - } else { - if (ready_queue.empty()) { - return nullptr; +void Scheduler::UnloadThread() { + Thread* const previous_thread = GetCurrentThread(); + Process* const previous_process = system.Kernel().CurrentProcess(); + + UpdateLastContextSwitchTime(previous_thread, previous_process); + + // Save context for previous thread + if (previous_thread) { + cpu_core.SaveContext(previous_thread->GetContext()); + // Save the TPIDR_EL0 system register in case it was modified. + previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); + + if (previous_thread->GetStatus() == ThreadStatus::Running) { + // This is only the case when a reschedule is triggered without the current thread + // yielding execution (i.e. an event triggered, system core time-sliced, etc) + previous_thread->SetStatus(ThreadStatus::Ready); } - next = ready_queue.front(); + previous_thread->SetIsRunning(false); } - - return next; + current_thread = nullptr; } -void Scheduler::SwitchContext(Thread* new_thread) { - Thread* previous_thread = GetCurrentThread(); +void Scheduler::SwitchContext() { + Thread* const previous_thread = GetCurrentThread(); + Thread* const new_thread = GetSelectedThread(); + + is_context_switch_pending = false; + if (new_thread == previous_thread) { + return; + } + Process* const previous_process = system.Kernel().CurrentProcess(); UpdateLastContextSwitchTime(previous_thread, previous_process); @@ -80,23 +463,23 @@ void Scheduler::SwitchContext(Thread* new_thread) { if (previous_thread->GetStatus() == ThreadStatus::Running) { // This is only the case when a reschedule is triggered without the current thread // yielding execution (i.e. an event triggered, system core time-sliced, etc) - ready_queue.add(previous_thread, previous_thread->GetPriority(), false); previous_thread->SetStatus(ThreadStatus::Ready); } + previous_thread->SetIsRunning(false); } // Load context of new thread if (new_thread) { + ASSERT_MSG(new_thread->GetProcessorID() == this->core_id, + "Thread must be assigned to this core."); ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready, "Thread must be ready to become running."); // Cancel any outstanding wakeup events for this thread new_thread->CancelWakeupTimer(); - current_thread = new_thread; - - ready_queue.remove(new_thread, new_thread->GetPriority()); new_thread->SetStatus(ThreadStatus::Running); + new_thread->SetIsRunning(true); auto* const thread_owner_process = current_thread->GetOwnerProcess(); if (previous_process != thread_owner_process) { @@ -130,124 +513,9 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { last_context_switch_time = most_recent_switch_ticks; } -void Scheduler::Reschedule() { - std::lock_guard lock{scheduler_mutex}; - - Thread* cur = GetCurrentThread(); - Thread* next = PopNextReadyThread(); - - if (cur && next) { - LOG_TRACE(Kernel, "context switch {} -> {}", cur->GetObjectId(), next->GetObjectId()); - } else if (cur) { - LOG_TRACE(Kernel, "context switch {} -> idle", cur->GetObjectId()); - } else if (next) { - LOG_TRACE(Kernel, "context switch idle -> {}", next->GetObjectId()); - } - - SwitchContext(next); -} - -void Scheduler::AddThread(SharedPtr<Thread> thread) { - std::lock_guard lock{scheduler_mutex}; - - thread_list.push_back(std::move(thread)); -} - -void Scheduler::RemoveThread(Thread* thread) { - std::lock_guard lock{scheduler_mutex}; - - thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), - thread_list.end()); -} - -void Scheduler::ScheduleThread(Thread* thread, u32 priority) { - std::lock_guard lock{scheduler_mutex}; - - ASSERT(thread->GetStatus() == ThreadStatus::Ready); - ready_queue.add(thread, priority); -} - -void Scheduler::UnscheduleThread(Thread* thread, u32 priority) { - std::lock_guard lock{scheduler_mutex}; - - ASSERT(thread->GetStatus() == ThreadStatus::Ready); - ready_queue.remove(thread, priority); -} - -void Scheduler::SetThreadPriority(Thread* thread, u32 priority) { - std::lock_guard lock{scheduler_mutex}; - if (thread->GetPriority() == priority) { - return; - } - - // If thread was ready, adjust queues - if (thread->GetStatus() == ThreadStatus::Ready) - ready_queue.adjust(thread, thread->GetPriority(), priority); -} - -Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const { - std::lock_guard lock{scheduler_mutex}; - - const u32 mask = 1U << core; - for (auto* thread : ready_queue) { - if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority) { - return thread; - } - } - return nullptr; -} - -void Scheduler::YieldWithoutLoadBalancing(Thread* thread) { - ASSERT(thread != nullptr); - // Avoid yielding if the thread isn't even running. - ASSERT(thread->GetStatus() == ThreadStatus::Running); - - // Sanity check that the priority is valid - ASSERT(thread->GetPriority() < THREADPRIO_COUNT); - - // Yield this thread -- sleep for zero time and force reschedule to different thread - GetCurrentThread()->Sleep(0); -} - -void Scheduler::YieldWithLoadBalancing(Thread* thread) { - ASSERT(thread != nullptr); - const auto priority = thread->GetPriority(); - const auto core = static_cast<u32>(thread->GetProcessorID()); - - // Avoid yielding if the thread isn't even running. - ASSERT(thread->GetStatus() == ThreadStatus::Running); - - // Sanity check that the priority is valid - ASSERT(priority < THREADPRIO_COUNT); - - // Sleep for zero time to be able to force reschedule to different thread - GetCurrentThread()->Sleep(0); - - Thread* suggested_thread = nullptr; - - // Search through all of the cpu cores (except this one) for a suggested thread. - // Take the first non-nullptr one - for (unsigned cur_core = 0; cur_core < Core::NUM_CPU_CORES; ++cur_core) { - const auto res = - system.CpuCore(cur_core).Scheduler().GetNextSuggestedThread(core, priority); - - // If scheduler provides a suggested thread - if (res != nullptr) { - // And its better than the current suggested thread (or is the first valid one) - if (suggested_thread == nullptr || - suggested_thread->GetPriority() > res->GetPriority()) { - suggested_thread = res; - } - } - } - - // If a suggested thread was found, queue that for this core - if (suggested_thread != nullptr) - suggested_thread->ChangeCore(core, suggested_thread->GetAffinityMask()); -} - -void Scheduler::YieldAndWaitForLoadBalancing(Thread* thread) { - UNIMPLEMENTED_MSG("Wait for load balancing thread yield type is not implemented!"); +void Scheduler::Shutdown() { + current_thread = nullptr; + selected_thread = nullptr; } } // namespace Kernel diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index b29bf7be8..fcae28e0a 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h @@ -20,124 +20,172 @@ namespace Kernel { class Process; -class Scheduler final { +class GlobalScheduler final { public: - explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core); - ~Scheduler(); + static constexpr u32 NUM_CPU_CORES = 4; - /// Returns whether there are any threads that are ready to run. - bool HaveReadyThreads() const; + explicit GlobalScheduler(Core::System& system); + ~GlobalScheduler(); + /// Adds a new thread to the scheduler + void AddThread(SharedPtr<Thread> thread); - /// Reschedules to the next available thread (call after current thread is suspended) - void Reschedule(); + /// Removes a thread from the scheduler + void RemoveThread(const Thread* thread); - /// Gets the current running thread - Thread* GetCurrentThread() const; + /// Returns a list of all threads managed by the scheduler + const std::vector<SharedPtr<Thread>>& GetThreadList() const { + return thread_list; + } - /// Gets the timestamp for the last context switch in ticks. - u64 GetLastContextSwitchTicks() const; + // Add a thread to the suggested queue of a cpu core. Suggested threads may be + // picked if no thread is scheduled to run on the core. + void Suggest(u32 priority, u32 core, Thread* thread); - /// Adds a new thread to the scheduler - void AddThread(SharedPtr<Thread> thread); + // Remove a thread to the suggested queue of a cpu core. Suggested threads may be + // picked if no thread is scheduled to run on the core. + void Unsuggest(u32 priority, u32 core, Thread* thread); - /// Removes a thread from the scheduler - void RemoveThread(Thread* thread); + // Add a thread to the scheduling queue of a cpu core. The thread is added at the + // back the queue in its priority level + void Schedule(u32 priority, u32 core, Thread* thread); - /// Schedules a thread that has become "ready" - void ScheduleThread(Thread* thread, u32 priority); + // Add a thread to the scheduling queue of a cpu core. The thread is added at the + // front the queue in its priority level + void SchedulePrepend(u32 priority, u32 core, Thread* thread); - /// Unschedules a thread that was already scheduled - void UnscheduleThread(Thread* thread, u32 priority); + // Reschedule an already scheduled thread based on a new priority + void Reschedule(u32 priority, u32 core, Thread* thread); - /// Sets the priority of a thread in the scheduler - void SetThreadPriority(Thread* thread, u32 priority); + // Unschedule a thread. + void Unschedule(u32 priority, u32 core, Thread* thread); - /// Gets the next suggested thread for load balancing - Thread* GetNextSuggestedThread(u32 core, u32 minimum_priority) const; + // Transfers a thread into an specific core. If the destination_core is -1 + // it will be unscheduled from its source code and added into its suggested + // queue. + void TransferToCore(u32 priority, s32 destination_core, Thread* thread); - /** - * YieldWithoutLoadBalancing -- analogous to normal yield on a system - * Moves the thread to the end of the ready queue for its priority, and then reschedules the - * system to the new head of the queue. - * - * Example (Single Core -- but can be extrapolated to multi): - * ready_queue[prio=0]: ThreadA, ThreadB, ThreadC (->exec order->) - * Currently Running: ThreadR - * - * ThreadR calls YieldWithoutLoadBalancing - * - * ThreadR is moved to the end of ready_queue[prio=0]: - * ready_queue[prio=0]: ThreadA, ThreadB, ThreadC, ThreadR (->exec order->) - * Currently Running: Nothing - * - * System is rescheduled (ThreadA is popped off of queue): - * ready_queue[prio=0]: ThreadB, ThreadC, ThreadR (->exec order->) - * Currently Running: ThreadA - * - * If the queue is empty at time of call, no yielding occurs. This does not cross between cores - * or priorities at all. + /* + * UnloadThread selects a core and forces it to unload its current thread's context */ - void YieldWithoutLoadBalancing(Thread* thread); + void UnloadThread(s32 core); + + /* + * SelectThread takes care of selecting the new scheduled thread. + * It does it in 3 steps: + * - First a thread is selected from the top of the priority queue. If no thread + * is obtained then we move to step two, else we are done. + * - Second we try to get a suggested thread that's not assigned to any core or + * that is not the top thread in that core. + * - Third is no suggested thread is found, we do a second pass and pick a running + * thread in another core and swap it with its current thread. + */ + void SelectThread(u32 core); - /** - * YieldWithLoadBalancing -- yield but with better selection of the new running thread - * Moves the current thread to the end of the ready queue for its priority, then selects a - * 'suggested thread' (a thread on a different core that could run on this core) from the - * scheduler, changes its core, and reschedules the current core to that thread. - * - * Example (Dual Core -- can be extrapolated to Quad Core, this is just normal yield if it were - * single core): - * ready_queue[core=0][prio=0]: ThreadA, ThreadB (affinities not pictured as irrelevant - * ready_queue[core=1][prio=0]: ThreadC[affinity=both], ThreadD[affinity=core1only] - * Currently Running: ThreadQ on Core 0 || ThreadP on Core 1 - * - * ThreadQ calls YieldWithLoadBalancing - * - * ThreadQ is moved to the end of ready_queue[core=0][prio=0]: - * ready_queue[core=0][prio=0]: ThreadA, ThreadB - * ready_queue[core=1][prio=0]: ThreadC[affinity=both], ThreadD[affinity=core1only] - * Currently Running: ThreadQ on Core 0 || ThreadP on Core 1 - * - * A list of suggested threads for each core is compiled - * Suggested Threads: {ThreadC on Core 1} - * If this were quad core (as the switch is), there could be between 0 and 3 threads in this - * list. If there are more than one, the thread is selected by highest prio. - * - * ThreadC is core changed to Core 0: - * ready_queue[core=0][prio=0]: ThreadC, ThreadA, ThreadB, ThreadQ - * ready_queue[core=1][prio=0]: ThreadD - * Currently Running: None on Core 0 || ThreadP on Core 1 - * - * System is rescheduled (ThreadC is popped off of queue): - * ready_queue[core=0][prio=0]: ThreadA, ThreadB, ThreadQ - * ready_queue[core=1][prio=0]: ThreadD - * Currently Running: ThreadC on Core 0 || ThreadP on Core 1 - * - * If no suggested threads can be found this will behave just as normal yield. If there are - * multiple candidates for the suggested thread on a core, the highest prio is taken. + bool HaveReadyThreads(u32 core_id) const { + return !scheduled_queue[core_id].empty(); + } + + /* + * YieldThread takes a thread and moves it to the back of the it's priority list + * This operation can be redundant and no scheduling is changed if marked as so. */ - void YieldWithLoadBalancing(Thread* thread); + bool YieldThread(Thread* thread); - /// Currently unknown -- asserts as unimplemented on call - void YieldAndWaitForLoadBalancing(Thread* thread); + /* + * YieldThreadAndBalanceLoad takes a thread and moves it to the back of the it's priority list. + * Afterwards, tries to pick a suggested thread from the suggested queue that has worse time or + * a better priority than the next thread in the core. + * This operation can be redundant and no scheduling is changed if marked as so. + */ + bool YieldThreadAndBalanceLoad(Thread* thread); - /// Returns a list of all threads managed by the scheduler - const std::vector<SharedPtr<Thread>>& GetThreadList() const { - return thread_list; + /* + * YieldThreadAndWaitForLoadBalancing takes a thread and moves it out of the scheduling queue + * and into the suggested queue. If no thread can be squeduled afterwards in that core, + * a suggested thread is obtained instead. + * This operation can be redundant and no scheduling is changed if marked as so. + */ + bool YieldThreadAndWaitForLoadBalancing(Thread* thread); + + /* + * PreemptThreads this operation rotates the scheduling queues of threads at + * a preemption priority and then does some core rebalancing. Preemption priorities + * can be found in the array 'preemption_priorities'. This operation happens + * every 10ms. + */ + void PreemptThreads(); + + u32 CpuCoresCount() const { + return NUM_CPU_CORES; + } + + void SetReselectionPending() { + is_reselection_pending.store(true, std::memory_order_release); } + bool IsReselectionPending() const { + return is_reselection_pending.load(std::memory_order_acquire); + } + + void Shutdown(); + private: - /** - * Pops and returns the next thread from the thread queue - * @return A pointer to the next ready thread - */ - Thread* PopNextReadyThread(); + bool AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner); + + static constexpr u32 min_regular_priority = 2; + std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> scheduled_queue; + std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> suggested_queue; + std::atomic<bool> is_reselection_pending; + + // `preemption_priorities` are the priority levels at which the global scheduler + // preempts threads every 10 ms. They are ordered from Core 0 to Core 3 + std::array<u32, NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62}; + + /// Lists all thread ids that aren't deleted/etc. + std::vector<SharedPtr<Thread>> thread_list; + Core::System& system; +}; + +class Scheduler final { +public: + explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 core_id); + ~Scheduler(); + + /// Returns whether there are any threads that are ready to run. + bool HaveReadyThreads() const; + /// Reschedules to the next available thread (call after current thread is suspended) + void TryDoContextSwitch(); + + /// Unloads currently running thread + void UnloadThread(); + + /// Select the threads in top of the scheduling multilist. + void SelectThreads(); + + /// Gets the current running thread + Thread* GetCurrentThread() const; + + /// Gets the currently selected thread from the top of the multilevel queue + Thread* GetSelectedThread() const; + + /// Gets the timestamp for the last context switch in ticks. + u64 GetLastContextSwitchTicks() const; + + bool ContextSwitchPending() const { + return is_context_switch_pending; + } + + /// Shutdowns the scheduler. + void Shutdown(); + +private: + friend class GlobalScheduler; /** * Switches the CPU's active thread context to that of the specified thread * @param new_thread The thread to switch to */ - void SwitchContext(Thread* new_thread); + void SwitchContext(); /** * Called on every context switch to update the internal timestamp @@ -152,19 +200,16 @@ private: */ void UpdateLastContextSwitchTime(Thread* thread, Process* process); - /// Lists all thread ids that aren't deleted/etc. - std::vector<SharedPtr<Thread>> thread_list; - - /// Lists only ready thread ids. - Common::MultiLevelQueue<Thread*, THREADPRIO_LOWEST + 1> ready_queue; - SharedPtr<Thread> current_thread = nullptr; + SharedPtr<Thread> selected_thread = nullptr; + Core::System& system; Core::ARM_Interface& cpu_core; u64 last_context_switch_time = 0; + u64 idle_selection_count = 0; + const u32 core_id; - Core::System& system; - static std::mutex scheduler_mutex; + bool is_context_switch_pending = false; }; } // namespace Kernel diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 1fd1a732a..f64236be1 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -516,7 +516,7 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr thread->WakeAfterDelay(nano_seconds); thread->SetWakeupCallback(DefaultThreadWakeupCallback); - system.CpuCore(thread->GetProcessorID()).PrepareReschedule(); + system.PrepareReschedule(thread->GetProcessorID()); return RESULT_TIMEOUT; } @@ -534,6 +534,7 @@ static ResultCode CancelSynchronization(Core::System& system, Handle thread_hand } thread->CancelWait(); + system.PrepareReschedule(thread->GetProcessorID()); return RESULT_SUCCESS; } @@ -1066,6 +1067,8 @@ static ResultCode SetThreadActivity(Core::System& system, Handle handle, u32 act } thread->SetActivity(static_cast<ThreadActivity>(activity)); + + system.PrepareReschedule(thread->GetProcessorID()); return RESULT_SUCCESS; } @@ -1147,7 +1150,7 @@ static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 pri thread->SetPriority(priority); - system.CpuCore(thread->GetProcessorID()).PrepareReschedule(); + system.PrepareReschedule(thread->GetProcessorID()); return RESULT_SUCCESS; } @@ -1503,7 +1506,7 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e thread->SetName( fmt::format("thread[entry_point={:X}, handle={:X}]", entry_point, *new_thread_handle)); - system.CpuCore(thread->GetProcessorID()).PrepareReschedule(); + system.PrepareReschedule(thread->GetProcessorID()); return RESULT_SUCCESS; } @@ -1525,7 +1528,7 @@ static ResultCode StartThread(Core::System& system, Handle thread_handle) { thread->ResumeFromWait(); if (thread->GetStatus() == ThreadStatus::Ready) { - system.CpuCore(thread->GetProcessorID()).PrepareReschedule(); + system.PrepareReschedule(thread->GetProcessorID()); } return RESULT_SUCCESS; @@ -1537,7 +1540,7 @@ static void ExitThread(Core::System& system) { auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); current_thread->Stop(); - system.CurrentScheduler().RemoveThread(current_thread); + system.GlobalScheduler().RemoveThread(current_thread); system.PrepareReschedule(); } @@ -1553,17 +1556,18 @@ static void SleepThread(Core::System& system, s64 nanoseconds) { auto& scheduler = system.CurrentScheduler(); auto* const current_thread = scheduler.GetCurrentThread(); + bool is_redundant = false; if (nanoseconds <= 0) { switch (static_cast<SleepType>(nanoseconds)) { case SleepType::YieldWithoutLoadBalancing: - scheduler.YieldWithoutLoadBalancing(current_thread); + is_redundant = current_thread->YieldSimple(); break; case SleepType::YieldWithLoadBalancing: - scheduler.YieldWithLoadBalancing(current_thread); + is_redundant = current_thread->YieldAndBalanceLoad(); break; case SleepType::YieldAndWaitForLoadBalancing: - scheduler.YieldAndWaitForLoadBalancing(current_thread); + is_redundant = current_thread->YieldAndWaitForLoadBalancing(); break; default: UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds); @@ -1572,10 +1576,13 @@ static void SleepThread(Core::System& system, s64 nanoseconds) { current_thread->Sleep(nanoseconds); } - // Reschedule all CPU cores - for (std::size_t i = 0; i < Core::NUM_CPU_CORES; ++i) { - system.CpuCore(i).PrepareReschedule(); + if (is_redundant) { + // If it's redundant, the core is pretty much idle. Some games keep idling + // a core while it's doing nothing, we advance timing to avoid costly continuous + // calls. + system.CoreTiming().AddTicks(2000); } + system.PrepareReschedule(current_thread->GetProcessorID()); } /// Wait process wide key atomic @@ -1601,6 +1608,8 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add return ERR_INVALID_ADDRESS; } + ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); + auto* const current_process = system.Kernel().CurrentProcess(); const auto& handle_table = current_process->GetHandleTable(); SharedPtr<Thread> thread = handle_table.Get<Thread>(thread_handle); @@ -1622,7 +1631,7 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add // Note: Deliberately don't attempt to inherit the lock owner's priority. - system.CpuCore(current_thread->GetProcessorID()).PrepareReschedule(); + system.PrepareReschedule(current_thread->GetProcessorID()); return RESULT_SUCCESS; } @@ -1632,24 +1641,19 @@ static ResultCode SignalProcessWideKey(Core::System& system, VAddr condition_var LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}", condition_variable_addr, target); - const auto RetrieveWaitingThreads = [&system](std::size_t core_index, - std::vector<SharedPtr<Thread>>& waiting_threads, - VAddr condvar_addr) { - const auto& scheduler = system.Scheduler(core_index); - const auto& thread_list = scheduler.GetThreadList(); - - for (const auto& thread : thread_list) { - if (thread->GetCondVarWaitAddress() == condvar_addr) - waiting_threads.push_back(thread); - } - }; + ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); // Retrieve a list of all threads that are waiting for this condition variable. std::vector<SharedPtr<Thread>> waiting_threads; - RetrieveWaitingThreads(0, waiting_threads, condition_variable_addr); - RetrieveWaitingThreads(1, waiting_threads, condition_variable_addr); - RetrieveWaitingThreads(2, waiting_threads, condition_variable_addr); - RetrieveWaitingThreads(3, waiting_threads, condition_variable_addr); + const auto& scheduler = system.GlobalScheduler(); + const auto& thread_list = scheduler.GetThreadList(); + + for (const auto& thread : thread_list) { + if (thread->GetCondVarWaitAddress() == condition_variable_addr) { + waiting_threads.push_back(thread); + } + } + // Sort them by priority, such that the highest priority ones come first. std::sort(waiting_threads.begin(), waiting_threads.end(), [](const SharedPtr<Thread>& lhs, const SharedPtr<Thread>& rhs) { @@ -1679,18 +1683,20 @@ static ResultCode SignalProcessWideKey(Core::System& system, VAddr condition_var // Atomically read the value of the mutex. u32 mutex_val = 0; + u32 update_val = 0; + const VAddr mutex_address = thread->GetMutexWaitAddress(); do { - monitor.SetExclusive(current_core, thread->GetMutexWaitAddress()); + monitor.SetExclusive(current_core, mutex_address); // If the mutex is not yet acquired, acquire it. - mutex_val = Memory::Read32(thread->GetMutexWaitAddress()); + mutex_val = Memory::Read32(mutex_address); if (mutex_val != 0) { - monitor.ClearExclusive(); - break; + update_val = mutex_val | Mutex::MutexHasWaitersFlag; + } else { + update_val = thread->GetWaitHandle(); } - } while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(), - thread->GetWaitHandle())); + } while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val)); if (mutex_val == 0) { // We were able to acquire the mutex, resume this thread. ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar); @@ -1704,20 +1710,9 @@ static ResultCode SignalProcessWideKey(Core::System& system, VAddr condition_var thread->SetLockOwner(nullptr); thread->SetMutexWaitAddress(0); thread->SetWaitHandle(0); - system.CpuCore(thread->GetProcessorID()).PrepareReschedule(); + thread->SetWaitSynchronizationResult(RESULT_SUCCESS); + system.PrepareReschedule(thread->GetProcessorID()); } else { - // Atomically signal that the mutex now has a waiting thread. - do { - monitor.SetExclusive(current_core, thread->GetMutexWaitAddress()); - - // Ensure that the mutex value is still what we expect. - u32 value = Memory::Read32(thread->GetMutexWaitAddress()); - // TODO(Subv): When this happens, the kernel just clears the exclusive state and - // retries the initial read for this thread. - ASSERT_MSG(mutex_val == value, "Unhandled synchronization primitive case"); - } while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(), - mutex_val | Mutex::MutexHasWaitersFlag)); - // The mutex is already owned by some other thread, make this thread wait on it. const Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask); const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); @@ -1728,6 +1723,7 @@ static ResultCode SignalProcessWideKey(Core::System& system, VAddr condition_var thread->SetStatus(ThreadStatus::WaitMutex); owner->AddMutexWaiter(thread); + system.PrepareReschedule(thread->GetProcessorID()); } } @@ -1754,7 +1750,12 @@ static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type, const auto arbitration_type = static_cast<AddressArbiter::ArbitrationType>(type); auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter(); - return address_arbiter.WaitForAddress(address, arbitration_type, value, timeout); + const ResultCode result = + address_arbiter.WaitForAddress(address, arbitration_type, value, timeout); + if (result == RESULT_SUCCESS) { + system.PrepareReschedule(); + } + return result; } // Signals to an address (via Address Arbiter) @@ -2040,7 +2041,10 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, return ERR_INVALID_HANDLE; } + system.PrepareReschedule(thread->GetProcessorID()); thread->ChangeCore(core, affinity_mask); + system.PrepareReschedule(thread->GetProcessorID()); + return RESULT_SUCCESS; } @@ -2151,6 +2155,7 @@ static ResultCode SignalEvent(Core::System& system, Handle handle) { } writable_event->Signal(); + system.PrepareReschedule(); return RESULT_SUCCESS; } diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index ec529e7f2..962530d2d 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -45,15 +45,7 @@ void Thread::Stop() { callback_handle); kernel.ThreadWakeupCallbackHandleTable().Close(callback_handle); callback_handle = 0; - - // Clean up thread from ready queue - // This is only needed when the thread is terminated forcefully (SVC TerminateProcess) - if (status == ThreadStatus::Ready || status == ThreadStatus::Paused) { - scheduler->UnscheduleThread(this, current_priority); - } - - status = ThreadStatus::Dead; - + SetStatus(ThreadStatus::Dead); WakeupAllWaitingThreads(); // Clean up any dangling references in objects that this thread was waiting for @@ -132,17 +124,16 @@ void Thread::ResumeFromWait() { wakeup_callback = nullptr; if (activity == ThreadActivity::Paused) { - status = ThreadStatus::Paused; + SetStatus(ThreadStatus::Paused); return; } - status = ThreadStatus::Ready; - - ChangeScheduler(); + SetStatus(ThreadStatus::Ready); } void Thread::CancelWait() { ASSERT(GetStatus() == ThreadStatus::WaitSynch); + ClearWaitObjects(); SetWaitSynchronizationResult(ERR_SYNCHRONIZATION_CANCELED); ResumeFromWait(); } @@ -205,9 +196,9 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name thread->name = std::move(name); thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap(); thread->owner_process = &owner_process; + auto& scheduler = kernel.GlobalScheduler(); + scheduler.AddThread(thread); thread->tls_address = thread->owner_process->CreateTLSRegion(); - thread->scheduler = &system.Scheduler(processor_id); - thread->scheduler->AddThread(thread); thread->owner_process->RegisterThread(thread.get()); @@ -250,6 +241,22 @@ void Thread::SetStatus(ThreadStatus new_status) { return; } + switch (new_status) { + case ThreadStatus::Ready: + case ThreadStatus::Running: + SetSchedulingStatus(ThreadSchedStatus::Runnable); + break; + case ThreadStatus::Dormant: + SetSchedulingStatus(ThreadSchedStatus::None); + break; + case ThreadStatus::Dead: + SetSchedulingStatus(ThreadSchedStatus::Exited); + break; + default: + SetSchedulingStatus(ThreadSchedStatus::Paused); + break; + } + if (status == ThreadStatus::Running) { last_running_ticks = Core::System::GetInstance().CoreTiming().GetTicks(); } @@ -311,8 +318,7 @@ void Thread::UpdatePriority() { return; } - scheduler->SetThreadPriority(this, new_priority); - current_priority = new_priority; + SetCurrentPriority(new_priority); if (!lock_owner) { return; @@ -328,47 +334,7 @@ void Thread::UpdatePriority() { } void Thread::ChangeCore(u32 core, u64 mask) { - ideal_core = core; - affinity_mask = mask; - ChangeScheduler(); -} - -void Thread::ChangeScheduler() { - if (status != ThreadStatus::Ready) { - return; - } - - auto& system = Core::System::GetInstance(); - std::optional<s32> new_processor_id{GetNextProcessorId(affinity_mask)}; - - if (!new_processor_id) { - new_processor_id = processor_id; - } - if (ideal_core != -1 && system.Scheduler(ideal_core).GetCurrentThread() == nullptr) { - new_processor_id = ideal_core; - } - - ASSERT(*new_processor_id < 4); - - // Add thread to new core's scheduler - auto& next_scheduler = system.Scheduler(*new_processor_id); - - if (*new_processor_id != processor_id) { - // Remove thread from previous core's scheduler - scheduler->RemoveThread(this); - next_scheduler.AddThread(this); - } - - processor_id = *new_processor_id; - - // If the thread was ready, unschedule from the previous core and schedule on the new core - scheduler->UnscheduleThread(this, current_priority); - next_scheduler.ScheduleThread(this, current_priority); - - // Change thread's scheduler - scheduler = &next_scheduler; - - system.CpuCore(processor_id).PrepareReschedule(); + SetCoreAndAffinityMask(core, mask); } bool Thread::AllWaitObjectsReady() const { @@ -388,10 +354,8 @@ void Thread::SetActivity(ThreadActivity value) { if (value == ThreadActivity::Paused) { // Set status if not waiting - if (status == ThreadStatus::Ready) { - status = ThreadStatus::Paused; - } else if (status == ThreadStatus::Running) { - status = ThreadStatus::Paused; + if (status == ThreadStatus::Ready || status == ThreadStatus::Running) { + SetStatus(ThreadStatus::Paused); Core::System::GetInstance().CpuCore(processor_id).PrepareReschedule(); } } else if (status == ThreadStatus::Paused) { @@ -408,6 +372,170 @@ void Thread::Sleep(s64 nanoseconds) { WakeAfterDelay(nanoseconds); } +bool Thread::YieldSimple() { + auto& scheduler = kernel.GlobalScheduler(); + return scheduler.YieldThread(this); +} + +bool Thread::YieldAndBalanceLoad() { + auto& scheduler = kernel.GlobalScheduler(); + return scheduler.YieldThreadAndBalanceLoad(this); +} + +bool Thread::YieldAndWaitForLoadBalancing() { + auto& scheduler = kernel.GlobalScheduler(); + return scheduler.YieldThreadAndWaitForLoadBalancing(this); +} + +void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) { + const u32 old_flags = scheduling_state; + scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) | + static_cast<u32>(new_status); + AdjustSchedulingOnStatus(old_flags); +} + +void Thread::SetCurrentPriority(u32 new_priority) { + const u32 old_priority = std::exchange(current_priority, new_priority); + AdjustSchedulingOnPriority(old_priority); +} + +ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { + const auto HighestSetCore = [](u64 mask, u32 max_cores) { + for (s32 core = max_cores - 1; core >= 0; core--) { + if (((mask >> core) & 1) != 0) { + return core; + } + } + return -1; + }; + + const bool use_override = affinity_override_count != 0; + if (new_core == THREADPROCESSORID_DONT_UPDATE) { + new_core = use_override ? ideal_core_override : ideal_core; + if ((new_affinity_mask & (1ULL << new_core)) == 0) { + return ERR_INVALID_COMBINATION; + } + } + if (use_override) { + ideal_core_override = new_core; + affinity_mask_override = new_affinity_mask; + } else { + const u64 old_affinity_mask = std::exchange(affinity_mask, new_affinity_mask); + ideal_core = new_core; + if (old_affinity_mask != new_affinity_mask) { + const s32 old_core = processor_id; + if (processor_id >= 0 && ((affinity_mask >> processor_id) & 1) == 0) { + if (ideal_core < 0) { + processor_id = HighestSetCore(affinity_mask, GlobalScheduler::NUM_CPU_CORES); + } else { + processor_id = ideal_core; + } + } + AdjustSchedulingOnAffinity(old_affinity_mask, old_core); + } + } + return RESULT_SUCCESS; +} + +void Thread::AdjustSchedulingOnStatus(u32 old_flags) { + if (old_flags == scheduling_state) { + return; + } + + auto& scheduler = kernel.GlobalScheduler(); + if (static_cast<ThreadSchedStatus>(old_flags & static_cast<u32>(ThreadSchedMasks::LowMask)) == + ThreadSchedStatus::Runnable) { + // In this case the thread was running, now it's pausing/exitting + if (processor_id >= 0) { + scheduler.Unschedule(current_priority, processor_id, this); + } + + for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { + if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { + scheduler.Unsuggest(current_priority, static_cast<u32>(core), this); + } + } + } else if (GetSchedulingStatus() == ThreadSchedStatus::Runnable) { + // The thread is now set to running from being stopped + if (processor_id >= 0) { + scheduler.Schedule(current_priority, processor_id, this); + } + + for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { + if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { + scheduler.Suggest(current_priority, static_cast<u32>(core), this); + } + } + } + + scheduler.SetReselectionPending(); +} + +void Thread::AdjustSchedulingOnPriority(u32 old_priority) { + if (GetSchedulingStatus() != ThreadSchedStatus::Runnable) { + return; + } + auto& scheduler = Core::System::GetInstance().GlobalScheduler(); + if (processor_id >= 0) { + scheduler.Unschedule(old_priority, processor_id, this); + } + + for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { + if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { + scheduler.Unsuggest(old_priority, core, this); + } + } + + // Add thread to the new priority queues. + Thread* current_thread = GetCurrentThread(); + + if (processor_id >= 0) { + if (current_thread == this) { + scheduler.SchedulePrepend(current_priority, processor_id, this); + } else { + scheduler.Schedule(current_priority, processor_id, this); + } + } + + for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { + if (core != processor_id && ((affinity_mask >> core) & 1) != 0) { + scheduler.Suggest(current_priority, core, this); + } + } + + scheduler.SetReselectionPending(); +} + +void Thread::AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core) { + auto& scheduler = Core::System::GetInstance().GlobalScheduler(); + if (GetSchedulingStatus() != ThreadSchedStatus::Runnable || + current_priority >= THREADPRIO_COUNT) { + return; + } + + for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { + if (((old_affinity_mask >> core) & 1) != 0) { + if (core == old_core) { + scheduler.Unschedule(current_priority, core, this); + } else { + scheduler.Unsuggest(current_priority, core, this); + } + } + } + + for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) { + if (((affinity_mask >> core) & 1) != 0) { + if (core == processor_id) { + scheduler.Schedule(current_priority, core, this); + } else { + scheduler.Suggest(current_priority, core, this); + } + } + } + + scheduler.SetReselectionPending(); +} + //////////////////////////////////////////////////////////////////////////////////////////////////// /** diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index 07e989637..c9870873d 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -75,6 +75,26 @@ enum class ThreadActivity : u32 { Paused = 1, }; +enum class ThreadSchedStatus : u32 { + None = 0, + Paused = 1, + Runnable = 2, + Exited = 3, +}; + +enum class ThreadSchedFlags : u32 { + ProcessPauseFlag = 1 << 4, + ThreadPauseFlag = 1 << 5, + ProcessDebugPauseFlag = 1 << 6, + KernelInitPauseFlag = 1 << 8, +}; + +enum class ThreadSchedMasks : u32 { + LowMask = 0x000f, + HighMask = 0xfff0, + ForcePauseMask = 0x0070, +}; + class Thread final : public WaitObject { public: using MutexWaitingThreads = std::vector<SharedPtr<Thread>>; @@ -278,6 +298,10 @@ public: return processor_id; } + void SetProcessorID(s32 new_core) { + processor_id = new_core; + } + Process* GetOwnerProcess() { return owner_process; } @@ -295,6 +319,9 @@ public: } void ClearWaitObjects() { + for (const auto& waiting_object : wait_objects) { + waiting_object->RemoveWaitingThread(this); + } wait_objects.clear(); } @@ -383,11 +410,47 @@ public: /// Sleeps this thread for the given amount of nanoseconds. void Sleep(s64 nanoseconds); + /// Yields this thread without rebalancing loads. + bool YieldSimple(); + + /// Yields this thread and does a load rebalancing. + bool YieldAndBalanceLoad(); + + /// Yields this thread and if the core is left idle, loads are rebalanced + bool YieldAndWaitForLoadBalancing(); + + void IncrementYieldCount() { + yield_count++; + } + + u64 GetYieldCount() const { + return yield_count; + } + + ThreadSchedStatus GetSchedulingStatus() const { + return static_cast<ThreadSchedStatus>(scheduling_state & + static_cast<u32>(ThreadSchedMasks::LowMask)); + } + + bool IsRunning() const { + return is_running; + } + + void SetIsRunning(bool value) { + is_running = value; + } + private: explicit Thread(KernelCore& kernel); ~Thread() override; - void ChangeScheduler(); + void SetSchedulingStatus(ThreadSchedStatus new_status); + void SetCurrentPriority(u32 new_priority); + ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask); + + void AdjustSchedulingOnStatus(u32 old_flags); + void AdjustSchedulingOnPriority(u32 old_priority); + void AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core); Core::ARM_Interface::ThreadContext context{}; @@ -409,6 +472,8 @@ private: u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks. u64 last_running_ticks = 0; ///< CPU tick when thread was last running + u64 yield_count = 0; ///< Number of redundant yields carried by this thread. + ///< a redundant yield is one where no scheduling is changed s32 processor_id = 0; @@ -453,6 +518,13 @@ private: ThreadActivity activity = ThreadActivity::Normal; + s32 ideal_core_override = -1; + u64 affinity_mask_override = 0x1; + u32 affinity_override_count = 0; + + u32 scheduling_state = 0; + bool is_running = false; + std::string name; }; diff --git a/src/core/hle/kernel/wait_object.cpp b/src/core/hle/kernel/wait_object.cpp index 0e96ba872..c00cef062 100644 --- a/src/core/hle/kernel/wait_object.cpp +++ b/src/core/hle/kernel/wait_object.cpp @@ -6,6 +6,9 @@ #include "common/assert.h" #include "common/common_types.h" #include "common/logging/log.h" +#include "core/core.h" +#include "core/core_cpu.h" +#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/thread.h" @@ -82,9 +85,6 @@ void WaitObject::WakeupWaitingThread(SharedPtr<Thread> thread) { const std::size_t index = thread->GetWaitObjectIndex(this); - for (const auto& object : thread->GetWaitObjects()) { - object->RemoveWaitingThread(thread.get()); - } thread->ClearWaitObjects(); thread->CancelWakeupTimer(); @@ -95,6 +95,7 @@ void WaitObject::WakeupWaitingThread(SharedPtr<Thread> thread) { } if (resume) { thread->ResumeFromWait(); + Core::System::GetInstance().PrepareReschedule(thread->GetProcessorID()); } } diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index eaa694ff8..cb6eda1b8 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -6,6 +6,7 @@ add_library(video_core STATIC dma_pusher.h debug_utils/debug_utils.cpp debug_utils/debug_utils.h + engines/const_buffer_engine_interface.h engines/const_buffer_info.h engines/engine_upload.cpp engines/engine_upload.h @@ -107,10 +108,12 @@ add_library(video_core STATIC shader/decode/other.cpp shader/ast.cpp shader/ast.h - shader/control_flow.cpp - shader/control_flow.h shader/compiler_settings.cpp shader/compiler_settings.h + shader/const_buffer_locker.cpp + shader/const_buffer_locker.h + shader/control_flow.cpp + shader/control_flow.h shader/decode.cpp shader/expr.cpp shader/expr.h diff --git a/src/video_core/engines/const_buffer_engine_interface.h b/src/video_core/engines/const_buffer_engine_interface.h new file mode 100644 index 000000000..ac27b6cbe --- /dev/null +++ b/src/video_core/engines/const_buffer_engine_interface.h @@ -0,0 +1,119 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <type_traits> +#include "common/bit_field.h" +#include "common/common_types.h" +#include "video_core/engines/shader_bytecode.h" +#include "video_core/textures/texture.h" + +namespace Tegra::Engines { + +enum class ShaderType : u32 { + Vertex = 0, + TesselationControl = 1, + TesselationEval = 2, + Geometry = 3, + Fragment = 4, + Compute = 5, +}; + +struct SamplerDescriptor { + union { + BitField<0, 20, Tegra::Shader::TextureType> texture_type; + BitField<20, 1, u32> is_array; + BitField<21, 1, u32> is_buffer; + BitField<22, 1, u32> is_shadow; + u32 raw{}; + }; + + bool operator==(const SamplerDescriptor& rhs) const noexcept { + return raw == rhs.raw; + } + + bool operator!=(const SamplerDescriptor& rhs) const noexcept { + return !operator==(rhs); + } + + static SamplerDescriptor FromTicTexture(Tegra::Texture::TextureType tic_texture_type) { + SamplerDescriptor result; + switch (tic_texture_type) { + case Tegra::Texture::TextureType::Texture1D: + result.texture_type.Assign(Tegra::Shader::TextureType::Texture1D); + result.is_array.Assign(0); + result.is_buffer.Assign(0); + result.is_shadow.Assign(0); + return result; + case Tegra::Texture::TextureType::Texture2D: + result.texture_type.Assign(Tegra::Shader::TextureType::Texture2D); + result.is_array.Assign(0); + result.is_buffer.Assign(0); + result.is_shadow.Assign(0); + return result; + case Tegra::Texture::TextureType::Texture3D: + result.texture_type.Assign(Tegra::Shader::TextureType::Texture3D); + result.is_array.Assign(0); + result.is_buffer.Assign(0); + result.is_shadow.Assign(0); + return result; + case Tegra::Texture::TextureType::TextureCubemap: + result.texture_type.Assign(Tegra::Shader::TextureType::TextureCube); + result.is_array.Assign(0); + result.is_buffer.Assign(0); + result.is_shadow.Assign(0); + return result; + case Tegra::Texture::TextureType::Texture1DArray: + result.texture_type.Assign(Tegra::Shader::TextureType::Texture1D); + result.is_array.Assign(1); + result.is_buffer.Assign(0); + result.is_shadow.Assign(0); + return result; + case Tegra::Texture::TextureType::Texture2DArray: + result.texture_type.Assign(Tegra::Shader::TextureType::Texture2D); + result.is_array.Assign(1); + result.is_buffer.Assign(0); + result.is_shadow.Assign(0); + return result; + case Tegra::Texture::TextureType::Texture1DBuffer: + result.texture_type.Assign(Tegra::Shader::TextureType::Texture1D); + result.is_array.Assign(0); + result.is_buffer.Assign(1); + result.is_shadow.Assign(0); + return result; + case Tegra::Texture::TextureType::Texture2DNoMipmap: + result.texture_type.Assign(Tegra::Shader::TextureType::Texture2D); + result.is_array.Assign(0); + result.is_buffer.Assign(0); + result.is_shadow.Assign(0); + return result; + case Tegra::Texture::TextureType::TextureCubeArray: + result.texture_type.Assign(Tegra::Shader::TextureType::TextureCube); + result.is_array.Assign(1); + result.is_buffer.Assign(0); + result.is_shadow.Assign(0); + return result; + default: + result.texture_type.Assign(Tegra::Shader::TextureType::Texture2D); + result.is_array.Assign(0); + result.is_buffer.Assign(0); + result.is_shadow.Assign(0); + return result; + } + } +}; +static_assert(std::is_trivially_copyable_v<SamplerDescriptor>); + +class ConstBufferEngineInterface { +public: + virtual ~ConstBufferEngineInterface() = default; + virtual u32 AccessConstBuffer32(ShaderType stage, u64 const_buffer, u64 offset) const = 0; + virtual SamplerDescriptor AccessBoundSampler(ShaderType stage, u64 offset) const = 0; + virtual SamplerDescriptor AccessBindlessSampler(ShaderType stage, u64 const_buffer, + u64 offset) const = 0; + virtual u32 GetBoundBuffer() const = 0; +}; + +} // namespace Tegra::Engines diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp index 63d449135..91adef360 100644 --- a/src/video_core/engines/kepler_compute.cpp +++ b/src/video_core/engines/kepler_compute.cpp @@ -70,13 +70,31 @@ Texture::FullTextureInfo KeplerCompute::GetTextureInfo(const Texture::TextureHan GetTSCEntry(tex_handle.tsc_id)}; } -u32 KeplerCompute::AccessConstBuffer32(u64 const_buffer, u64 offset) const { +u32 KeplerCompute::AccessConstBuffer32(ShaderType stage, u64 const_buffer, u64 offset) const { + ASSERT(stage == ShaderType::Compute); const auto& buffer = launch_description.const_buffer_config[const_buffer]; u32 result; std::memcpy(&result, memory_manager.GetPointer(buffer.Address() + offset), sizeof(u32)); return result; } +SamplerDescriptor KeplerCompute::AccessBoundSampler(ShaderType stage, u64 offset) const { + return AccessBindlessSampler(stage, regs.tex_cb_index, offset * sizeof(Texture::TextureHandle)); +} + +SamplerDescriptor KeplerCompute::AccessBindlessSampler(ShaderType stage, u64 const_buffer, + u64 offset) const { + ASSERT(stage == ShaderType::Compute); + const auto& tex_info_buffer = launch_description.const_buffer_config[const_buffer]; + const GPUVAddr tex_info_address = tex_info_buffer.Address() + offset; + + const Texture::TextureHandle tex_handle{memory_manager.Read<u32>(tex_info_address)}; + const Texture::FullTextureInfo tex_info = GetTextureInfo(tex_handle, offset); + SamplerDescriptor result = SamplerDescriptor::FromTicTexture(tex_info.tic.texture_type.Value()); + result.is_shadow.Assign(tex_info.tsc.depth_compare_enabled.Value()); + return result; +} + void KeplerCompute::ProcessLaunch() { const GPUVAddr launch_desc_loc = regs.launch_desc_loc.Address(); memory_manager.ReadBlockUnsafe(launch_desc_loc, &launch_description, diff --git a/src/video_core/engines/kepler_compute.h b/src/video_core/engines/kepler_compute.h index 90cf650d2..8e7182727 100644 --- a/src/video_core/engines/kepler_compute.h +++ b/src/video_core/engines/kepler_compute.h @@ -10,6 +10,7 @@ #include "common/bit_field.h" #include "common/common_funcs.h" #include "common/common_types.h" +#include "video_core/engines/const_buffer_engine_interface.h" #include "video_core/engines/engine_upload.h" #include "video_core/gpu.h" #include "video_core/textures/texture.h" @@ -37,7 +38,7 @@ namespace Tegra::Engines { #define KEPLER_COMPUTE_REG_INDEX(field_name) \ (offsetof(Tegra::Engines::KeplerCompute::Regs, field_name) / sizeof(u32)) -class KeplerCompute final { +class KeplerCompute final : public ConstBufferEngineInterface { public: explicit KeplerCompute(Core::System& system, VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager); @@ -201,7 +202,16 @@ public: Texture::FullTextureInfo GetTextureInfo(const Texture::TextureHandle tex_handle, std::size_t offset) const; - u32 AccessConstBuffer32(u64 const_buffer, u64 offset) const; + u32 AccessConstBuffer32(ShaderType stage, u64 const_buffer, u64 offset) const override; + + SamplerDescriptor AccessBoundSampler(ShaderType stage, u64 offset) const override; + + SamplerDescriptor AccessBindlessSampler(ShaderType stage, u64 const_buffer, + u64 offset) const override; + + u32 GetBoundBuffer() const override { + return regs.tex_cb_index; + } private: Core::System& system; diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index 59976943a..514ed93fa 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp @@ -98,11 +98,10 @@ void Maxwell3D::InitializeRegisterDefaults() { mme_inline[MAXWELL3D_REG_INDEX(index_array.count)] = true; } -#define DIRTY_REGS_POS(field_name) (offsetof(Maxwell3D::DirtyRegs, field_name)) +#define DIRTY_REGS_POS(field_name) static_cast<u8>(offsetof(Maxwell3D::DirtyRegs, field_name)) void Maxwell3D::InitDirtySettings() { - const auto set_block = [this](const std::size_t start, const std::size_t range, - const u8 position) { + const auto set_block = [this](std::size_t start, std::size_t range, u8 position) { const auto start_itr = dirty_pointers.begin() + start; const auto end_itr = start_itr + range; std::fill(start_itr, end_itr, position); @@ -113,10 +112,10 @@ void Maxwell3D::InitDirtySettings() { constexpr u32 registers_per_rt = sizeof(regs.rt[0]) / sizeof(u32); constexpr u32 rt_start_reg = MAXWELL3D_REG_INDEX(rt); constexpr u32 rt_end_reg = rt_start_reg + registers_per_rt * 8; - u32 rt_dirty_reg = DIRTY_REGS_POS(render_target); + u8 rt_dirty_reg = DIRTY_REGS_POS(render_target); for (u32 rt_reg = rt_start_reg; rt_reg < rt_end_reg; rt_reg += registers_per_rt) { set_block(rt_reg, registers_per_rt, rt_dirty_reg); - rt_dirty_reg++; + ++rt_dirty_reg; } constexpr u32 depth_buffer_flag = DIRTY_REGS_POS(depth_buffer); dirty_pointers[MAXWELL3D_REG_INDEX(zeta_enable)] = depth_buffer_flag; @@ -130,35 +129,35 @@ void Maxwell3D::InitDirtySettings() { constexpr u32 vertex_array_start = MAXWELL3D_REG_INDEX(vertex_array); constexpr u32 vertex_array_size = sizeof(regs.vertex_array[0]) / sizeof(u32); constexpr u32 vertex_array_end = vertex_array_start + vertex_array_size * Regs::NumVertexArrays; - u32 va_reg = DIRTY_REGS_POS(vertex_array); - u32 vi_reg = DIRTY_REGS_POS(vertex_instance); + u8 va_dirty_reg = DIRTY_REGS_POS(vertex_array); + u8 vi_dirty_reg = DIRTY_REGS_POS(vertex_instance); for (u32 vertex_reg = vertex_array_start; vertex_reg < vertex_array_end; vertex_reg += vertex_array_size) { - set_block(vertex_reg, 3, va_reg); + set_block(vertex_reg, 3, va_dirty_reg); // The divisor concerns vertex array instances - dirty_pointers[vertex_reg + 3] = vi_reg; - va_reg++; - vi_reg++; + dirty_pointers[static_cast<std::size_t>(vertex_reg) + 3] = vi_dirty_reg; + ++va_dirty_reg; + ++vi_dirty_reg; } constexpr u32 vertex_limit_start = MAXWELL3D_REG_INDEX(vertex_array_limit); constexpr u32 vertex_limit_size = sizeof(regs.vertex_array_limit[0]) / sizeof(u32); constexpr u32 vertex_limit_end = vertex_limit_start + vertex_limit_size * Regs::NumVertexArrays; - va_reg = DIRTY_REGS_POS(vertex_array); + va_dirty_reg = DIRTY_REGS_POS(vertex_array); for (u32 vertex_reg = vertex_limit_start; vertex_reg < vertex_limit_end; vertex_reg += vertex_limit_size) { - set_block(vertex_reg, vertex_limit_size, va_reg); - va_reg++; + set_block(vertex_reg, vertex_limit_size, va_dirty_reg); + va_dirty_reg++; } constexpr u32 vertex_instance_start = MAXWELL3D_REG_INDEX(instanced_arrays); constexpr u32 vertex_instance_size = sizeof(regs.instanced_arrays.is_instanced[0]) / sizeof(u32); constexpr u32 vertex_instance_end = vertex_instance_start + vertex_instance_size * Regs::NumVertexArrays; - vi_reg = DIRTY_REGS_POS(vertex_instance); + vi_dirty_reg = DIRTY_REGS_POS(vertex_instance); for (u32 vertex_reg = vertex_instance_start; vertex_reg < vertex_instance_end; vertex_reg += vertex_instance_size) { - set_block(vertex_reg, vertex_instance_size, vi_reg); - vi_reg++; + set_block(vertex_reg, vertex_instance_size, vi_dirty_reg); + vi_dirty_reg++; } set_block(MAXWELL3D_REG_INDEX(vertex_attrib_format), regs.vertex_attrib_format.size(), DIRTY_REGS_POS(vertex_attrib_format)); @@ -172,7 +171,7 @@ void Maxwell3D::InitDirtySettings() { // State // Viewport - constexpr u32 viewport_dirty_reg = DIRTY_REGS_POS(viewport); + constexpr u8 viewport_dirty_reg = DIRTY_REGS_POS(viewport); constexpr u32 viewport_start = MAXWELL3D_REG_INDEX(viewports); constexpr u32 viewport_size = sizeof(regs.viewports) / sizeof(u32); set_block(viewport_start, viewport_size, viewport_dirty_reg); @@ -199,7 +198,7 @@ void Maxwell3D::InitDirtySettings() { set_block(primitive_restart_start, primitive_restart_size, DIRTY_REGS_POS(primitive_restart)); // Depth Test - constexpr u32 depth_test_dirty_reg = DIRTY_REGS_POS(depth_test); + constexpr u8 depth_test_dirty_reg = DIRTY_REGS_POS(depth_test); dirty_pointers[MAXWELL3D_REG_INDEX(depth_test_enable)] = depth_test_dirty_reg; dirty_pointers[MAXWELL3D_REG_INDEX(depth_write_enabled)] = depth_test_dirty_reg; dirty_pointers[MAXWELL3D_REG_INDEX(depth_test_func)] = depth_test_dirty_reg; @@ -224,12 +223,12 @@ void Maxwell3D::InitDirtySettings() { dirty_pointers[MAXWELL3D_REG_INDEX(stencil_back_mask)] = stencil_test_dirty_reg; // Color Mask - constexpr u32 color_mask_dirty_reg = DIRTY_REGS_POS(color_mask); + constexpr u8 color_mask_dirty_reg = DIRTY_REGS_POS(color_mask); dirty_pointers[MAXWELL3D_REG_INDEX(color_mask_common)] = color_mask_dirty_reg; set_block(MAXWELL3D_REG_INDEX(color_mask), sizeof(regs.color_mask) / sizeof(u32), color_mask_dirty_reg); // Blend State - constexpr u32 blend_state_dirty_reg = DIRTY_REGS_POS(blend_state); + constexpr u8 blend_state_dirty_reg = DIRTY_REGS_POS(blend_state); set_block(MAXWELL3D_REG_INDEX(blend_color), sizeof(regs.blend_color) / sizeof(u32), blend_state_dirty_reg); dirty_pointers[MAXWELL3D_REG_INDEX(independent_blend_enable)] = blend_state_dirty_reg; @@ -238,12 +237,12 @@ void Maxwell3D::InitDirtySettings() { blend_state_dirty_reg); // Scissor State - constexpr u32 scissor_test_dirty_reg = DIRTY_REGS_POS(scissor_test); + constexpr u8 scissor_test_dirty_reg = DIRTY_REGS_POS(scissor_test); set_block(MAXWELL3D_REG_INDEX(scissor_test), sizeof(regs.scissor_test) / sizeof(u32), scissor_test_dirty_reg); // Polygon Offset - constexpr u32 polygon_offset_dirty_reg = DIRTY_REGS_POS(polygon_offset); + constexpr u8 polygon_offset_dirty_reg = DIRTY_REGS_POS(polygon_offset); dirty_pointers[MAXWELL3D_REG_INDEX(polygon_offset_fill_enable)] = polygon_offset_dirty_reg; dirty_pointers[MAXWELL3D_REG_INDEX(polygon_offset_line_enable)] = polygon_offset_dirty_reg; dirty_pointers[MAXWELL3D_REG_INDEX(polygon_offset_point_enable)] = polygon_offset_dirty_reg; @@ -252,7 +251,7 @@ void Maxwell3D::InitDirtySettings() { dirty_pointers[MAXWELL3D_REG_INDEX(polygon_offset_clamp)] = polygon_offset_dirty_reg; // Depth bounds - constexpr u32 depth_bounds_values_dirty_reg = DIRTY_REGS_POS(depth_bounds_values); + constexpr u8 depth_bounds_values_dirty_reg = DIRTY_REGS_POS(depth_bounds_values); dirty_pointers[MAXWELL3D_REG_INDEX(depth_bounds[0])] = depth_bounds_values_dirty_reg; dirty_pointers[MAXWELL3D_REG_INDEX(depth_bounds[1])] = depth_bounds_values_dirty_reg; } @@ -847,7 +846,8 @@ void Maxwell3D::ProcessClearBuffers() { rasterizer.Clear(); } -u32 Maxwell3D::AccessConstBuffer32(Regs::ShaderStage stage, u64 const_buffer, u64 offset) const { +u32 Maxwell3D::AccessConstBuffer32(ShaderType stage, u64 const_buffer, u64 offset) const { + ASSERT(stage != ShaderType::Compute); const auto& shader_stage = state.shader_stages[static_cast<std::size_t>(stage)]; const auto& buffer = shader_stage.const_buffers[const_buffer]; u32 result; @@ -855,4 +855,22 @@ u32 Maxwell3D::AccessConstBuffer32(Regs::ShaderStage stage, u64 const_buffer, u6 return result; } +SamplerDescriptor Maxwell3D::AccessBoundSampler(ShaderType stage, u64 offset) const { + return AccessBindlessSampler(stage, regs.tex_cb_index, offset * sizeof(Texture::TextureHandle)); +} + +SamplerDescriptor Maxwell3D::AccessBindlessSampler(ShaderType stage, u64 const_buffer, + u64 offset) const { + ASSERT(stage != ShaderType::Compute); + const auto& shader = state.shader_stages[static_cast<std::size_t>(stage)]; + const auto& tex_info_buffer = shader.const_buffers[const_buffer]; + const GPUVAddr tex_info_address = tex_info_buffer.address + offset; + + const Texture::TextureHandle tex_handle{memory_manager.Read<u32>(tex_info_address)}; + const Texture::FullTextureInfo tex_info = GetTextureInfo(tex_handle, offset); + SamplerDescriptor result = SamplerDescriptor::FromTicTexture(tex_info.tic.texture_type.Value()); + result.is_shadow.Assign(tex_info.tsc.depth_compare_enabled.Value()); + return result; +} + } // namespace Tegra::Engines diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h index e3f1047d5..987ad77b2 100644 --- a/src/video_core/engines/maxwell_3d.h +++ b/src/video_core/engines/maxwell_3d.h @@ -15,6 +15,7 @@ #include "common/common_funcs.h" #include "common/common_types.h" #include "common/math_util.h" +#include "video_core/engines/const_buffer_engine_interface.h" #include "video_core/engines/const_buffer_info.h" #include "video_core/engines/engine_upload.h" #include "video_core/gpu.h" @@ -44,7 +45,7 @@ namespace Tegra::Engines { #define MAXWELL3D_REG_INDEX(field_name) \ (offsetof(Tegra::Engines::Maxwell3D::Regs, field_name) / sizeof(u32)) -class Maxwell3D final { +class Maxwell3D final : public ConstBufferEngineInterface { public: explicit Maxwell3D(Core::System& system, VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager); @@ -1165,6 +1166,8 @@ public: struct DirtyRegs { static constexpr std::size_t NUM_REGS = 256; + static_assert(NUM_REGS - 1 <= std::numeric_limits<u8>::max()); + union { struct { bool null_dirty; @@ -1257,7 +1260,16 @@ public: /// Returns the texture information for a specific texture in a specific shader stage. Texture::FullTextureInfo GetStageTexture(Regs::ShaderStage stage, std::size_t offset) const; - u32 AccessConstBuffer32(Regs::ShaderStage stage, u64 const_buffer, u64 offset) const; + u32 AccessConstBuffer32(ShaderType stage, u64 const_buffer, u64 offset) const override; + + SamplerDescriptor AccessBoundSampler(ShaderType stage, u64 offset) const override; + + SamplerDescriptor AccessBindlessSampler(ShaderType stage, u64 const_buffer, + u64 offset) const override; + + u32 GetBoundBuffer() const override { + return regs.tex_cb_index; + } /// Memory for macro code - it's undetermined how big this is, however 1MB is much larger than /// we've seen used. diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h index 7a6355ce2..d3d05a866 100644 --- a/src/video_core/engines/shader_bytecode.h +++ b/src/video_core/engines/shader_bytecode.h @@ -574,7 +574,7 @@ enum class ShuffleOperation : u64 { }; union Instruction { - Instruction& operator=(const Instruction& instr) { + constexpr Instruction& operator=(const Instruction& instr) { value = instr.value; return *this; } @@ -1760,22 +1760,22 @@ public: class Matcher { public: - Matcher(const char* const name, u16 mask, u16 expected, OpCode::Id id, OpCode::Type type) + constexpr Matcher(const char* const name, u16 mask, u16 expected, Id id, Type type) : name{name}, mask{mask}, expected{expected}, id{id}, type{type} {} - const char* GetName() const { + constexpr const char* GetName() const { return name; } - u16 GetMask() const { + constexpr u16 GetMask() const { return mask; } - Id GetId() const { + constexpr Id GetId() const { return id; } - Type GetType() const { + constexpr Type GetType() const { return type; } @@ -1784,7 +1784,7 @@ public: * @param instruction The instruction to test * @returns true if the given instruction matches. */ - bool Matches(u16 instruction) const { + constexpr bool Matches(u16 instruction) const { return (instruction & mask) == expected; } @@ -1818,7 +1818,7 @@ private: * A '0' in a bitstring indicates that a zero must be present at that bit position. * A '1' in a bitstring indicates that a one must be present at that bit position. */ - static auto GetMaskAndExpect(const char* const bitstring) { + static constexpr auto GetMaskAndExpect(const char* const bitstring) { u16 mask = 0, expect = 0; for (std::size_t i = 0; i < opcode_bitsize; i++) { const std::size_t bit_position = opcode_bitsize - i - 1; @@ -1835,15 +1835,15 @@ private: break; } } - return std::make_tuple(mask, expect); + return std::make_pair(mask, expect); } public: /// Creates a matcher that can match and parse instructions based on bitstring. - static auto GetMatcher(const char* const bitstring, OpCode::Id op, OpCode::Type type, - const char* const name) { - const auto mask_expect = GetMaskAndExpect(bitstring); - return Matcher(name, std::get<0>(mask_expect), std::get<1>(mask_expect), op, type); + static constexpr auto GetMatcher(const char* const bitstring, Id op, Type type, + const char* const name) { + const auto [mask, expected] = GetMaskAndExpect(bitstring); + return Matcher(name, mask, expected, op, type); } }; diff --git a/src/video_core/morton.cpp b/src/video_core/morton.cpp index fe5f08ace..2f2fe6859 100644 --- a/src/video_core/morton.cpp +++ b/src/video_core/morton.cpp @@ -112,6 +112,7 @@ static constexpr ConversionArray morton_to_linear_fns = { MortonCopy<true, PixelFormat::ASTC_2D_8X6_SRGB>, MortonCopy<true, PixelFormat::ASTC_2D_6X5>, MortonCopy<true, PixelFormat::ASTC_2D_6X5_SRGB>, + MortonCopy<true, PixelFormat::E5B9G9R9F>, MortonCopy<true, PixelFormat::Z32F>, MortonCopy<true, PixelFormat::Z16>, MortonCopy<true, PixelFormat::Z24S8>, @@ -192,6 +193,7 @@ static constexpr ConversionArray linear_to_morton_fns = { nullptr, nullptr, nullptr, + MortonCopy<false, PixelFormat::E5B9G9R9F>, MortonCopy<false, PixelFormat::Z32F>, MortonCopy<false, PixelFormat::Z16>, MortonCopy<false, PixelFormat::Z24S8>, diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index cbcf81414..9431d64ac 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -975,7 +975,8 @@ TextureBufferUsage RasterizerOpenGL::SetupDrawTextures(Maxwell::ShaderStage stag } const auto cbuf = entry.GetBindlessCBuf(); Tegra::Texture::TextureHandle tex_handle; - tex_handle.raw = maxwell3d.AccessConstBuffer32(stage, cbuf.first, cbuf.second); + Tegra::Engines::ShaderType shader_type = static_cast<Tegra::Engines::ShaderType>(stage); + tex_handle.raw = maxwell3d.AccessConstBuffer32(shader_type, cbuf.first, cbuf.second); return maxwell3d.GetTextureInfo(tex_handle, entry.GetOffset()); }(); @@ -1005,7 +1006,8 @@ TextureBufferUsage RasterizerOpenGL::SetupComputeTextures(const Shader& kernel) } const auto cbuf = entry.GetBindlessCBuf(); Tegra::Texture::TextureHandle tex_handle; - tex_handle.raw = compute.AccessConstBuffer32(cbuf.first, cbuf.second); + tex_handle.raw = compute.AccessConstBuffer32(Tegra::Engines::ShaderType::Compute, + cbuf.first, cbuf.second); return compute.GetTextureInfo(tex_handle, entry.GetOffset()); }(); @@ -1050,7 +1052,8 @@ void RasterizerOpenGL::SetupComputeImages(const Shader& shader) { } const auto cbuf = entry.GetBindlessCBuf(); Tegra::Texture::TextureHandle tex_handle; - tex_handle.raw = compute.AccessConstBuffer32(cbuf.first, cbuf.second); + tex_handle.raw = compute.AccessConstBuffer32(Tegra::Engines::ShaderType::Compute, + cbuf.first, cbuf.second); return compute.GetTextureInfo(tex_handle, entry.GetOffset()).tic; }(); SetupImage(bindpoint, tic, entry); diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index 42ca3b1bd..f1b89165d 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp @@ -3,13 +3,16 @@ // Refer to the license.txt file included. #include <mutex> +#include <optional> +#include <string> #include <thread> +#include <unordered_set> #include <boost/functional/hash.hpp> #include "common/assert.h" -#include "common/hash.h" #include "common/scope_exit.h" #include "core/core.h" #include "core/frontend/emu_window.h" +#include "video_core/engines/kepler_compute.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/memory_manager.h" #include "video_core/renderer_opengl/gl_rasterizer.h" @@ -21,18 +24,20 @@ namespace OpenGL { +using Tegra::Engines::ShaderType; +using VideoCommon::Shader::ConstBufferLocker; using VideoCommon::Shader::ProgramCode; +using VideoCommon::Shader::ShaderIR; + +namespace { // One UBO is always reserved for emulation values on staged shaders constexpr u32 STAGE_RESERVED_UBOS = 1; -struct UnspecializedShader { - std::string code; - GLShader::ShaderEntries entries; - ProgramType program_type; -}; +constexpr u32 STAGE_MAIN_OFFSET = 10; +constexpr u32 KERNEL_MAIN_OFFSET = 0; -namespace { +constexpr VideoCommon::Shader::CompilerSettings COMPILER_SETTINGS{}; /// Gets the address for the specified shader stage program GPUVAddr GetShaderAddress(Core::System& system, Maxwell::ShaderProgram program) { @@ -41,6 +46,39 @@ GPUVAddr GetShaderAddress(Core::System& system, Maxwell::ShaderProgram program) return gpu.regs.code_address.CodeAddress() + shader_config.offset; } +/// Gets if the current instruction offset is a scheduler instruction +constexpr bool IsSchedInstruction(std::size_t offset, std::size_t main_offset) { + // Sched instructions appear once every 4 instructions. + constexpr std::size_t SchedPeriod = 4; + const std::size_t absolute_offset = offset - main_offset; + return (absolute_offset % SchedPeriod) == 0; +} + +/// Calculates the size of a program stream +std::size_t CalculateProgramSize(const GLShader::ProgramCode& program) { + constexpr std::size_t start_offset = 10; + // This is the encoded version of BRA that jumps to itself. All Nvidia + // shaders end with one. + constexpr u64 self_jumping_branch = 0xE2400FFFFF07000FULL; + constexpr u64 mask = 0xFFFFFFFFFF7FFFFFULL; + std::size_t offset = start_offset; + while (offset < program.size()) { + const u64 instruction = program[offset]; + if (!IsSchedInstruction(offset, start_offset)) { + if ((instruction & mask) == self_jumping_branch) { + // End on Maxwell's "nop" instruction + break; + } + if (instruction == 0) { + break; + } + } + offset++; + } + // The last instruction is included in the program size + return std::min(offset + 1, program.size()); +} + /// Gets the shader program code from memory for the specified address ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, const GPUVAddr gpu_addr, const u8* host_ptr) { @@ -51,6 +89,7 @@ ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, const GPUVAddr g }); memory_manager.ReadBlockUnsafe(gpu_addr, program_code.data(), program_code.size() * sizeof(u64)); + program_code.resize(CalculateProgramSize(program_code)); return program_code; } @@ -71,14 +110,6 @@ constexpr GLenum GetShaderType(ProgramType program_type) { } } -/// Gets if the current instruction offset is a scheduler instruction -constexpr bool IsSchedInstruction(std::size_t offset, std::size_t main_offset) { - // Sched instructions appear once every 4 instructions. - constexpr std::size_t SchedPeriod = 4; - const std::size_t absolute_offset = offset - main_offset; - return (absolute_offset % SchedPeriod) == 0; -} - /// Describes primitive behavior on geometry shaders constexpr std::tuple<const char*, const char*, u32> GetPrimitiveDescription(GLenum primitive_mode) { switch (primitive_mode) { @@ -121,110 +152,142 @@ ProgramType GetProgramType(Maxwell::ShaderProgram program) { return {}; } -/// Calculates the size of a program stream -std::size_t CalculateProgramSize(const GLShader::ProgramCode& program) { - constexpr std::size_t start_offset = 10; - // This is the encoded version of BRA that jumps to itself. All Nvidia - // shaders end with one. - constexpr u64 self_jumping_branch = 0xE2400FFFFF07000FULL; - constexpr u64 mask = 0xFFFFFFFFFF7FFFFFULL; - std::size_t offset = start_offset; - std::size_t size = start_offset * sizeof(u64); - while (offset < program.size()) { - const u64 instruction = program[offset]; - if (!IsSchedInstruction(offset, start_offset)) { - if ((instruction & mask) == self_jumping_branch) { - // End on Maxwell's "nop" instruction - break; - } - if (instruction == 0) { - break; - } - } - size += sizeof(u64); - offset++; - } - // The last instruction is included in the program size - return std::min(size + sizeof(u64), program.size() * sizeof(u64)); -} - /// Hashes one (or two) program streams u64 GetUniqueIdentifier(ProgramType program_type, const ProgramCode& code, - const ProgramCode& code_b, std::size_t size_a = 0, std::size_t size_b = 0) { - if (size_a == 0) { - size_a = CalculateProgramSize(code); - } - u64 unique_identifier = Common::CityHash64(reinterpret_cast<const char*>(code.data()), size_a); - if (program_type != ProgramType::VertexA) { - return unique_identifier; - } - // VertexA programs include two programs - - std::size_t seed = 0; - boost::hash_combine(seed, unique_identifier); - - if (size_b == 0) { - size_b = CalculateProgramSize(code_b); + const ProgramCode& code_b) { + u64 unique_identifier = boost::hash_value(code); + if (program_type == ProgramType::VertexA) { + // VertexA programs include two programs + boost::hash_combine(unique_identifier, boost::hash_value(code_b)); } - const u64 identifier_b = - Common::CityHash64(reinterpret_cast<const char*>(code_b.data()), size_b); - boost::hash_combine(seed, identifier_b); - return static_cast<u64>(seed); + return unique_identifier; } /// Creates an unspecialized program from code streams -GLShader::ProgramResult CreateProgram(const Device& device, ProgramType program_type, - ProgramCode program_code, ProgramCode program_code_b) { - GLShader::ShaderSetup setup(program_code); - setup.program.size_a = CalculateProgramSize(program_code); - setup.program.size_b = 0; - if (program_type == ProgramType::VertexA) { - // VertexB is always enabled, so when VertexA is enabled, we have two vertex shaders. - // Conventional HW does not support this, so we combine VertexA and VertexB into one - // stage here. - setup.SetProgramB(program_code_b); - setup.program.size_b = CalculateProgramSize(program_code_b); - } - setup.program.unique_identifier = GetUniqueIdentifier( - program_type, program_code, program_code_b, setup.program.size_a, setup.program.size_b); - +std::string GenerateGLSL(const Device& device, ProgramType program_type, const ShaderIR& ir, + const std::optional<ShaderIR>& ir_b) { switch (program_type) { case ProgramType::VertexA: case ProgramType::VertexB: - return GLShader::GenerateVertexShader(device, setup); + return GLShader::GenerateVertexShader(device, ir, ir_b ? &*ir_b : nullptr); case ProgramType::Geometry: - return GLShader::GenerateGeometryShader(device, setup); + return GLShader::GenerateGeometryShader(device, ir); case ProgramType::Fragment: - return GLShader::GenerateFragmentShader(device, setup); + return GLShader::GenerateFragmentShader(device, ir); case ProgramType::Compute: - return GLShader::GenerateComputeShader(device, setup); + return GLShader::GenerateComputeShader(device, ir); default: UNIMPLEMENTED_MSG("Unimplemented program_type={}", static_cast<u32>(program_type)); return {}; } } -CachedProgram SpecializeShader(const std::string& code, const GLShader::ShaderEntries& entries, - ProgramType program_type, const ProgramVariant& variant, - bool hint_retrievable = false) { +constexpr const char* GetProgramTypeName(ProgramType program_type) { + switch (program_type) { + case ProgramType::VertexA: + case ProgramType::VertexB: + return "VS"; + case ProgramType::TessellationControl: + return "TCS"; + case ProgramType::TessellationEval: + return "TES"; + case ProgramType::Geometry: + return "GS"; + case ProgramType::Fragment: + return "FS"; + case ProgramType::Compute: + return "CS"; + } + return "UNK"; +} + +Tegra::Engines::ShaderType GetEnginesShaderType(ProgramType program_type) { + switch (program_type) { + case ProgramType::VertexA: + case ProgramType::VertexB: + return Tegra::Engines::ShaderType::Vertex; + case ProgramType::TessellationControl: + return Tegra::Engines::ShaderType::TesselationControl; + case ProgramType::TessellationEval: + return Tegra::Engines::ShaderType::TesselationEval; + case ProgramType::Geometry: + return Tegra::Engines::ShaderType::Geometry; + case ProgramType::Fragment: + return Tegra::Engines::ShaderType::Fragment; + case ProgramType::Compute: + return Tegra::Engines::ShaderType::Compute; + } + UNREACHABLE(); + return {}; +} + +std::string GetShaderId(u64 unique_identifier, ProgramType program_type) { + return fmt::format("{}{:016X}", GetProgramTypeName(program_type), unique_identifier); +} + +Tegra::Engines::ConstBufferEngineInterface& GetConstBufferEngineInterface( + Core::System& system, ProgramType program_type) { + if (program_type == ProgramType::Compute) { + return system.GPU().KeplerCompute(); + } else { + return system.GPU().Maxwell3D(); + } +} + +std::unique_ptr<ConstBufferLocker> MakeLocker(Core::System& system, ProgramType program_type) { + return std::make_unique<ConstBufferLocker>(GetEnginesShaderType(program_type), + GetConstBufferEngineInterface(system, program_type)); +} + +void FillLocker(ConstBufferLocker& locker, const ShaderDiskCacheUsage& usage) { + for (const auto& key : usage.keys) { + const auto [buffer, offset] = key.first; + locker.InsertKey(buffer, offset, key.second); + } + for (const auto& [offset, sampler] : usage.bound_samplers) { + locker.InsertBoundSampler(offset, sampler); + } + for (const auto& [key, sampler] : usage.bindless_samplers) { + const auto [buffer, offset] = key; + locker.InsertBindlessSampler(buffer, offset, sampler); + } +} + +CachedProgram BuildShader(const Device& device, u64 unique_identifier, ProgramType program_type, + const ProgramCode& program_code, const ProgramCode& program_code_b, + const ProgramVariant& variant, ConstBufferLocker& locker, + bool hint_retrievable = false) { + LOG_INFO(Render_OpenGL, "called. {}", GetShaderId(unique_identifier, program_type)); + + const bool is_compute = program_type == ProgramType::Compute; + const u32 main_offset = is_compute ? KERNEL_MAIN_OFFSET : STAGE_MAIN_OFFSET; + const ShaderIR ir(program_code, main_offset, COMPILER_SETTINGS, locker); + std::optional<ShaderIR> ir_b; + if (!program_code_b.empty()) { + ir_b.emplace(program_code_b, main_offset, COMPILER_SETTINGS, locker); + } + const auto entries = GLShader::GetEntries(ir); + auto base_bindings{variant.base_bindings}; const auto primitive_mode{variant.primitive_mode}; const auto texture_buffer_usage{variant.texture_buffer_usage}; - std::string source = R"(#version 430 core + std::string source = fmt::format(R"(// {} +#version 430 core #extension GL_ARB_separate_shader_objects : enable #extension GL_ARB_shader_viewport_layer_array : enable #extension GL_EXT_shader_image_load_formatted : enable #extension GL_NV_gpu_shader5 : enable #extension GL_NV_shader_thread_group : enable #extension GL_NV_shader_thread_shuffle : enable -)"; - if (program_type == ProgramType::Compute) { +)", + GetShaderId(unique_identifier, program_type)); + if (is_compute) { source += "#extension GL_ARB_compute_variable_group_size : require\n"; } source += '\n'; - if (program_type != ProgramType::Compute) { + if (!is_compute) { source += fmt::format("#define EMULATION_UBO_BINDING {}\n", base_bindings.cbuf++); } @@ -268,7 +331,7 @@ CachedProgram SpecializeShader(const std::string& code, const GLShader::ShaderEn } source += '\n'; - source += code; + source += GenerateGLSL(device, program_type, ir, ir_b); OGLShader shader; shader.Create(source.c_str(), GetShaderType(program_type)); @@ -278,85 +341,97 @@ CachedProgram SpecializeShader(const std::string& code, const GLShader::ShaderEn return program; } -std::set<GLenum> GetSupportedFormats() { - std::set<GLenum> supported_formats; - +std::unordered_set<GLenum> GetSupportedFormats() { GLint num_formats{}; glGetIntegerv(GL_NUM_PROGRAM_BINARY_FORMATS, &num_formats); std::vector<GLint> formats(num_formats); glGetIntegerv(GL_PROGRAM_BINARY_FORMATS, formats.data()); - for (const GLint format : formats) + std::unordered_set<GLenum> supported_formats; + for (const GLint format : formats) { supported_formats.insert(static_cast<GLenum>(format)); + } return supported_formats; } } // Anonymous namespace CachedShader::CachedShader(const ShaderParameters& params, ProgramType program_type, - GLShader::ProgramResult result) - : RasterizerCacheObject{params.host_ptr}, cpu_addr{params.cpu_addr}, - unique_identifier{params.unique_identifier}, program_type{program_type}, - disk_cache{params.disk_cache}, precompiled_programs{params.precompiled_programs}, - entries{result.second}, code{std::move(result.first)}, shader_length{entries.shader_length} {} + GLShader::ShaderEntries entries, ProgramCode program_code, + ProgramCode program_code_b) + : RasterizerCacheObject{params.host_ptr}, system{params.system}, + disk_cache{params.disk_cache}, device{params.device}, cpu_addr{params.cpu_addr}, + unique_identifier{params.unique_identifier}, program_type{program_type}, entries{entries}, + program_code{std::move(program_code)}, program_code_b{std::move(program_code_b)} { + if (!params.precompiled_variants) { + return; + } + for (const auto& pair : *params.precompiled_variants) { + auto locker = MakeLocker(system, program_type); + const auto& usage = pair->first; + FillLocker(*locker, usage); + + std::unique_ptr<LockerVariant>* locker_variant = nullptr; + const auto it = + std::find_if(locker_variants.begin(), locker_variants.end(), [&](const auto& variant) { + return variant->locker->HasEqualKeys(*locker); + }); + if (it == locker_variants.end()) { + locker_variant = &locker_variants.emplace_back(); + *locker_variant = std::make_unique<LockerVariant>(); + locker_variant->get()->locker = std::move(locker); + } else { + locker_variant = &*it; + } + locker_variant->get()->programs.emplace(usage.variant, pair->second); + } +} Shader CachedShader::CreateStageFromMemory(const ShaderParameters& params, Maxwell::ShaderProgram program_type, - ProgramCode&& program_code, - ProgramCode&& program_code_b) { - const auto code_size{CalculateProgramSize(program_code)}; - const auto code_size_b{CalculateProgramSize(program_code_b)}; - auto result{ - CreateProgram(params.device, GetProgramType(program_type), program_code, program_code_b)}; - if (result.first.empty()) { - // TODO(Rodrigo): Unimplemented shader stages hit here, avoid using these for now - return {}; - } - + ProgramCode program_code, ProgramCode program_code_b) { params.disk_cache.SaveRaw(ShaderDiskCacheRaw( - params.unique_identifier, GetProgramType(program_type), - static_cast<u32>(code_size / sizeof(u64)), static_cast<u32>(code_size_b / sizeof(u64)), - std::move(program_code), std::move(program_code_b))); - + params.unique_identifier, GetProgramType(program_type), program_code, program_code_b)); + + ConstBufferLocker locker(GetEnginesShaderType(GetProgramType(program_type))); + const ShaderIR ir(program_code, STAGE_MAIN_OFFSET, COMPILER_SETTINGS, locker); + // TODO(Rodrigo): Handle VertexA shaders + // std::optional<ShaderIR> ir_b; + // if (!program_code_b.empty()) { + // ir_b.emplace(program_code_b, STAGE_MAIN_OFFSET); + // } return std::shared_ptr<CachedShader>( - new CachedShader(params, GetProgramType(program_type), std::move(result))); + new CachedShader(params, GetProgramType(program_type), GLShader::GetEntries(ir), + std::move(program_code), std::move(program_code_b))); } -Shader CachedShader::CreateStageFromCache(const ShaderParameters& params, - Maxwell::ShaderProgram program_type, - GLShader::ProgramResult result) { - return std::shared_ptr<CachedShader>( - new CachedShader(params, GetProgramType(program_type), std::move(result))); -} - -Shader CachedShader::CreateKernelFromMemory(const ShaderParameters& params, ProgramCode&& code) { - auto result{CreateProgram(params.device, ProgramType::Compute, code, {})}; - - const auto code_size{CalculateProgramSize(code)}; - params.disk_cache.SaveRaw(ShaderDiskCacheRaw(params.unique_identifier, ProgramType::Compute, - static_cast<u32>(code_size / sizeof(u64)), 0, - std::move(code), {})); +Shader CachedShader::CreateKernelFromMemory(const ShaderParameters& params, ProgramCode code) { + params.disk_cache.SaveRaw( + ShaderDiskCacheRaw(params.unique_identifier, ProgramType::Compute, code)); - return std::shared_ptr<CachedShader>( - new CachedShader(params, ProgramType::Compute, std::move(result))); + ConstBufferLocker locker(Tegra::Engines::ShaderType::Compute); + const ShaderIR ir(code, KERNEL_MAIN_OFFSET, COMPILER_SETTINGS, locker); + return std::shared_ptr<CachedShader>(new CachedShader( + params, ProgramType::Compute, GLShader::GetEntries(ir), std::move(code), {})); } -Shader CachedShader::CreateKernelFromCache(const ShaderParameters& params, - GLShader::ProgramResult result) { - return std::shared_ptr<CachedShader>( - new CachedShader(params, ProgramType::Compute, std::move(result))); +Shader CachedShader::CreateFromCache(const ShaderParameters& params, + const UnspecializedShader& unspecialized) { + return std::shared_ptr<CachedShader>(new CachedShader(params, unspecialized.program_type, + unspecialized.entries, unspecialized.code, + unspecialized.code_b)); } std::tuple<GLuint, BaseBindings> CachedShader::GetProgramHandle(const ProgramVariant& variant) { - const auto [entry, is_cache_miss] = programs.try_emplace(variant); + UpdateVariant(); + + const auto [entry, is_cache_miss] = curr_variant->programs.try_emplace(variant); auto& program = entry->second; if (is_cache_miss) { - program = TryLoadProgram(variant); - if (!program) { - program = SpecializeShader(code, entries, program_type, variant); - disk_cache.SaveUsage(GetUsage(variant)); - } + program = BuildShader(device, unique_identifier, program_type, program_code, program_code_b, + variant, *curr_variant->locker); + disk_cache.SaveUsage(GetUsage(variant, *curr_variant->locker)); LabelGLObject(GL_PROGRAM, program->handle, cpu_addr); } @@ -372,18 +447,33 @@ std::tuple<GLuint, BaseBindings> CachedShader::GetProgramHandle(const ProgramVar return {program->handle, base_bindings}; } -CachedProgram CachedShader::TryLoadProgram(const ProgramVariant& variant) const { - const auto found = precompiled_programs.find(GetUsage(variant)); - if (found == precompiled_programs.end()) { - return {}; +void CachedShader::UpdateVariant() { + if (curr_variant && !curr_variant->locker->IsConsistent()) { + curr_variant = nullptr; + } + if (!curr_variant) { + for (auto& variant : locker_variants) { + if (variant->locker->IsConsistent()) { + curr_variant = variant.get(); + } + } + } + if (!curr_variant) { + auto& new_variant = locker_variants.emplace_back(); + new_variant = std::make_unique<LockerVariant>(); + new_variant->locker = MakeLocker(system, program_type); + curr_variant = new_variant.get(); } - return found->second; } -ShaderDiskCacheUsage CachedShader::GetUsage(const ProgramVariant& variant) const { +ShaderDiskCacheUsage CachedShader::GetUsage(const ProgramVariant& variant, + const ConstBufferLocker& locker) const { ShaderDiskCacheUsage usage; usage.unique_identifier = unique_identifier; usage.variant = variant; + usage.keys = locker.GetKeys(); + usage.bound_samplers = locker.GetBoundSamplers(); + usage.bindless_samplers = locker.GetBindlessSamplers(); return usage; } @@ -399,18 +489,15 @@ void ShaderCacheOpenGL::LoadDiskCache(const std::atomic_bool& stop_loading, return; } const auto [raws, shader_usages] = *transferable; - - auto [decompiled, dumps] = disk_cache.LoadPrecompiled(); - - const auto supported_formats{GetSupportedFormats()}; - const auto unspecialized_shaders{ - GenerateUnspecializedShaders(stop_loading, callback, raws, decompiled)}; - if (stop_loading) { + if (!GenerateUnspecializedShaders(stop_loading, callback, raws) || stop_loading) { return; } - // Track if precompiled cache was altered during loading to know if we have to serialize the - // virtual precompiled cache file back to the hard drive + const auto dumps = disk_cache.LoadPrecompiled(); + const auto supported_formats = GetSupportedFormats(); + + // Track if precompiled cache was altered during loading to know if we have to + // serialize the virtual precompiled cache file back to the hard drive bool precompiled_cache_altered = false; // Inform the frontend about shader build initialization @@ -433,9 +520,6 @@ void ShaderCacheOpenGL::LoadDiskCache(const std::atomic_bool& stop_loading, return; } const auto& usage{shader_usages[i]}; - LOG_INFO(Render_OpenGL, "Building shader {:016x} (index {} of {})", - usage.unique_identifier, i, shader_usages.size()); - const auto& unspecialized{unspecialized_shaders.at(usage.unique_identifier)}; const auto dump{dumps.find(usage)}; @@ -449,21 +533,28 @@ void ShaderCacheOpenGL::LoadDiskCache(const std::atomic_bool& stop_loading, } } if (!shader) { - shader = SpecializeShader(unspecialized.code, unspecialized.entries, - unspecialized.program_type, usage.variant, true); + auto locker{MakeLocker(system, unspecialized.program_type)}; + FillLocker(*locker, usage); + shader = BuildShader(device, usage.unique_identifier, unspecialized.program_type, + unspecialized.code, unspecialized.code_b, usage.variant, + *locker, true); } - std::scoped_lock lock(mutex); + std::scoped_lock lock{mutex}; if (callback) { callback(VideoCore::LoadCallbackStage::Build, ++built_shaders, shader_usages.size()); } precompiled_programs.emplace(usage, std::move(shader)); + + // TODO(Rodrigo): Is there a better way to do this? + precompiled_variants[usage.unique_identifier].push_back( + precompiled_programs.find(usage)); } }; - const auto num_workers{static_cast<std::size_t>(std::thread::hardware_concurrency() + 1)}; + const auto num_workers{static_cast<std::size_t>(std::thread::hardware_concurrency() + 1ULL)}; const std::size_t bucket_size{shader_usages.size() / num_workers}; std::vector<std::unique_ptr<Core::Frontend::GraphicsContext>> contexts(num_workers); std::vector<std::thread> threads(num_workers); @@ -483,7 +574,6 @@ void ShaderCacheOpenGL::LoadDiskCache(const std::atomic_bool& stop_loading, if (compilation_failed) { // Invalidate the precompiled cache if a shader dumped shader was rejected disk_cache.InvalidatePrecompiled(); - dumps.clear(); precompiled_cache_altered = true; return; } @@ -491,8 +581,8 @@ void ShaderCacheOpenGL::LoadDiskCache(const std::atomic_bool& stop_loading, return; } - // TODO(Rodrigo): Do state tracking for transferable shaders and do a dummy draw before - // precompiling them + // TODO(Rodrigo): Do state tracking for transferable shaders and do a dummy draw + // before precompiling them for (std::size_t i = 0; i < shader_usages.size(); ++i) { const auto& usage{shader_usages[i]}; @@ -508,9 +598,13 @@ void ShaderCacheOpenGL::LoadDiskCache(const std::atomic_bool& stop_loading, } } -CachedProgram ShaderCacheOpenGL::GeneratePrecompiledProgram( - const ShaderDiskCacheDump& dump, const std::set<GLenum>& supported_formats) { +const PrecompiledVariants* ShaderCacheOpenGL::GetPrecompiledVariants(u64 unique_identifier) const { + const auto it = precompiled_variants.find(unique_identifier); + return it == precompiled_variants.end() ? nullptr : &it->second; +} +CachedProgram ShaderCacheOpenGL::GeneratePrecompiledProgram( + const ShaderDiskCacheDump& dump, const std::unordered_set<GLenum>& supported_formats) { if (supported_formats.find(dump.binary_format) == supported_formats.end()) { LOG_INFO(Render_OpenGL, "Precompiled cache entry with unsupported format - removing"); return {}; @@ -532,56 +626,52 @@ CachedProgram ShaderCacheOpenGL::GeneratePrecompiledProgram( return shader; } -std::unordered_map<u64, UnspecializedShader> ShaderCacheOpenGL::GenerateUnspecializedShaders( +bool ShaderCacheOpenGL::GenerateUnspecializedShaders( const std::atomic_bool& stop_loading, const VideoCore::DiskResourceLoadCallback& callback, - const std::vector<ShaderDiskCacheRaw>& raws, - const std::unordered_map<u64, ShaderDiskCacheDecompiled>& decompiled) { - std::unordered_map<u64, UnspecializedShader> unspecialized; - + const std::vector<ShaderDiskCacheRaw>& raws) { if (callback) { callback(VideoCore::LoadCallbackStage::Decompile, 0, raws.size()); } for (std::size_t i = 0; i < raws.size(); ++i) { if (stop_loading) { - return {}; + return false; } const auto& raw{raws[i]}; const u64 unique_identifier{raw.GetUniqueIdentifier()}; const u64 calculated_hash{ GetUniqueIdentifier(raw.GetProgramType(), raw.GetProgramCode(), raw.GetProgramCodeB())}; if (unique_identifier != calculated_hash) { - LOG_ERROR( - Render_OpenGL, - "Invalid hash in entry={:016x} (obtained hash={:016x}) - removing shader cache", - raw.GetUniqueIdentifier(), calculated_hash); + LOG_ERROR(Render_OpenGL, + "Invalid hash in entry={:016x} (obtained hash={:016x}) - " + "removing shader cache", + raw.GetUniqueIdentifier(), calculated_hash); disk_cache.InvalidateTransferable(); - return {}; + return false; } - GLShader::ProgramResult result; - if (const auto it = decompiled.find(unique_identifier); it != decompiled.end()) { - // If it's stored in the precompiled file, avoid decompiling it here - const auto& stored_decompiled{it->second}; - result = {stored_decompiled.code, stored_decompiled.entries}; - } else { - // Otherwise decompile the shader at boot and save the result to the decompiled file - result = CreateProgram(device, raw.GetProgramType(), raw.GetProgramCode(), - raw.GetProgramCodeB()); - disk_cache.SaveDecompiled(unique_identifier, result.first, result.second); - } - - precompiled_shaders.insert({unique_identifier, result}); - - unspecialized.insert( - {raw.GetUniqueIdentifier(), - {std::move(result.first), std::move(result.second), raw.GetProgramType()}}); + const u32 main_offset = + raw.GetProgramType() == ProgramType::Compute ? KERNEL_MAIN_OFFSET : STAGE_MAIN_OFFSET; + ConstBufferLocker locker(GetEnginesShaderType(raw.GetProgramType())); + const ShaderIR ir(raw.GetProgramCode(), main_offset, COMPILER_SETTINGS, locker); + // TODO(Rodrigo): Handle VertexA shaders + // std::optional<ShaderIR> ir_b; + // if (raw.HasProgramA()) { + // ir_b.emplace(raw.GetProgramCodeB(), main_offset); + // } + + UnspecializedShader unspecialized; + unspecialized.entries = GLShader::GetEntries(ir); + unspecialized.program_type = raw.GetProgramType(); + unspecialized.code = raw.GetProgramCode(); + unspecialized.code_b = raw.GetProgramCodeB(); + unspecialized_shaders.emplace(raw.GetUniqueIdentifier(), unspecialized); if (callback) { callback(VideoCore::LoadCallbackStage::Decompile, i, raws.size()); } } - return unspecialized; + return true; } Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) { @@ -590,37 +680,35 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) { } auto& memory_manager{system.GPU().MemoryManager()}; - const GPUVAddr program_addr{GetShaderAddress(system, program)}; + const GPUVAddr address{GetShaderAddress(system, program)}; // Look up shader in the cache based on address - const auto host_ptr{memory_manager.GetPointer(program_addr)}; + const auto host_ptr{memory_manager.GetPointer(address)}; Shader shader{TryGet(host_ptr)}; if (shader) { return last_shaders[static_cast<std::size_t>(program)] = shader; } // No shader found - create a new one - ProgramCode program_code{GetShaderCode(memory_manager, program_addr, host_ptr)}; - ProgramCode program_code_b; - const bool is_program_a{program == Maxwell::ShaderProgram::VertexA}; - if (is_program_a) { - const GPUVAddr program_addr_b{GetShaderAddress(system, Maxwell::ShaderProgram::VertexB)}; - program_code_b = GetShaderCode(memory_manager, program_addr_b, - memory_manager.GetPointer(program_addr_b)); - } - - const auto unique_identifier = - GetUniqueIdentifier(GetProgramType(program), program_code, program_code_b); - const auto cpu_addr{*memory_manager.GpuToCpuAddress(program_addr)}; - const ShaderParameters params{disk_cache, precompiled_programs, device, cpu_addr, - host_ptr, unique_identifier}; - - const auto found = precompiled_shaders.find(unique_identifier); - if (found == precompiled_shaders.end()) { - shader = CachedShader::CreateStageFromMemory(params, program, std::move(program_code), - std::move(program_code_b)); + ProgramCode code{GetShaderCode(memory_manager, address, host_ptr)}; + ProgramCode code_b; + if (program == Maxwell::ShaderProgram::VertexA) { + const GPUVAddr address_b{GetShaderAddress(system, Maxwell::ShaderProgram::VertexB)}; + code_b = GetShaderCode(memory_manager, address_b, memory_manager.GetPointer(address_b)); + } + + const auto unique_identifier = GetUniqueIdentifier(GetProgramType(program), code, code_b); + const auto precompiled_variants = GetPrecompiledVariants(unique_identifier); + const auto cpu_addr{*memory_manager.GpuToCpuAddress(address)}; + const ShaderParameters params{system, disk_cache, precompiled_variants, device, + cpu_addr, host_ptr, unique_identifier}; + + const auto found = unspecialized_shaders.find(unique_identifier); + if (found == unspecialized_shaders.end()) { + shader = CachedShader::CreateStageFromMemory(params, program, std::move(code), + std::move(code_b)); } else { - shader = CachedShader::CreateStageFromCache(params, program, found->second); + shader = CachedShader::CreateFromCache(params, found->second); } Register(shader); @@ -638,15 +726,16 @@ Shader ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) { // No kernel found - create a new one auto code{GetShaderCode(memory_manager, code_addr, host_ptr)}; const auto unique_identifier{GetUniqueIdentifier(ProgramType::Compute, code, {})}; + const auto precompiled_variants = GetPrecompiledVariants(unique_identifier); const auto cpu_addr{*memory_manager.GpuToCpuAddress(code_addr)}; - const ShaderParameters params{disk_cache, precompiled_programs, device, cpu_addr, - host_ptr, unique_identifier}; + const ShaderParameters params{system, disk_cache, precompiled_variants, device, + cpu_addr, host_ptr, unique_identifier}; - const auto found = precompiled_shaders.find(unique_identifier); - if (found == precompiled_shaders.end()) { + const auto found = unspecialized_shaders.find(unique_identifier); + if (found == unspecialized_shaders.end()) { kernel = CachedShader::CreateKernelFromMemory(params, std::move(code)); } else { - kernel = CachedShader::CreateKernelFromCache(params, found->second); + kernel = CachedShader::CreateFromCache(params, found->second); } Register(kernel); diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h index de195cc5d..6bd7c9cf1 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.h +++ b/src/video_core/renderer_opengl/gl_shader_cache.h @@ -8,9 +8,10 @@ #include <atomic> #include <bitset> #include <memory> -#include <set> +#include <string> #include <tuple> #include <unordered_map> +#include <unordered_set> #include <vector> #include <glad/glad.h> @@ -20,6 +21,8 @@ #include "video_core/renderer_opengl/gl_resource_manager.h" #include "video_core/renderer_opengl/gl_shader_decompiler.h" #include "video_core/renderer_opengl/gl_shader_disk_cache.h" +#include "video_core/shader/const_buffer_locker.h" +#include "video_core/shader/shader_ir.h" namespace Core { class System; @@ -40,11 +43,19 @@ using Shader = std::shared_ptr<CachedShader>; using CachedProgram = std::shared_ptr<OGLProgram>; using Maxwell = Tegra::Engines::Maxwell3D::Regs; using PrecompiledPrograms = std::unordered_map<ShaderDiskCacheUsage, CachedProgram>; -using PrecompiledShaders = std::unordered_map<u64, GLShader::ProgramResult>; +using PrecompiledVariants = std::vector<PrecompiledPrograms::iterator>; + +struct UnspecializedShader { + GLShader::ShaderEntries entries; + ProgramType program_type; + ProgramCode code; + ProgramCode code_b; +}; struct ShaderParameters { + Core::System& system; ShaderDiskCacheOpenGL& disk_cache; - const PrecompiledPrograms& precompiled_programs; + const PrecompiledVariants* precompiled_variants; const Device& device; VAddr cpu_addr; u8* host_ptr; @@ -55,23 +66,18 @@ class CachedShader final : public RasterizerCacheObject { public: static Shader CreateStageFromMemory(const ShaderParameters& params, Maxwell::ShaderProgram program_type, - ProgramCode&& program_code, ProgramCode&& program_code_b); - - static Shader CreateStageFromCache(const ShaderParameters& params, - Maxwell::ShaderProgram program_type, - GLShader::ProgramResult result); + ProgramCode program_code, ProgramCode program_code_b); + static Shader CreateKernelFromMemory(const ShaderParameters& params, ProgramCode code); - static Shader CreateKernelFromMemory(const ShaderParameters& params, ProgramCode&& code); - - static Shader CreateKernelFromCache(const ShaderParameters& params, - GLShader::ProgramResult result); + static Shader CreateFromCache(const ShaderParameters& params, + const UnspecializedShader& unspecialized); VAddr GetCpuAddr() const override { return cpu_addr; } std::size_t GetSizeInBytes() const override { - return shader_length; + return program_code.size() * sizeof(u64); } /// Gets the shader entries for the shader @@ -83,24 +89,36 @@ public: std::tuple<GLuint, BaseBindings> GetProgramHandle(const ProgramVariant& variant); private: + struct LockerVariant { + std::unique_ptr<VideoCommon::Shader::ConstBufferLocker> locker; + std::unordered_map<ProgramVariant, CachedProgram> programs; + }; + explicit CachedShader(const ShaderParameters& params, ProgramType program_type, - GLShader::ProgramResult result); + GLShader::ShaderEntries entries, ProgramCode program_code, + ProgramCode program_code_b); - CachedProgram TryLoadProgram(const ProgramVariant& variant) const; + void UpdateVariant(); - ShaderDiskCacheUsage GetUsage(const ProgramVariant& variant) const; + ShaderDiskCacheUsage GetUsage(const ProgramVariant& variant, + const VideoCommon::Shader::ConstBufferLocker& locker) const; + + Core::System& system; + ShaderDiskCacheOpenGL& disk_cache; + const Device& device; VAddr cpu_addr{}; + u64 unique_identifier{}; ProgramType program_type{}; - ShaderDiskCacheOpenGL& disk_cache; - const PrecompiledPrograms& precompiled_programs; GLShader::ShaderEntries entries; - std::string code; - std::size_t shader_length{}; - std::unordered_map<ProgramVariant, CachedProgram> programs; + ProgramCode program_code; + ProgramCode program_code_b; + + LockerVariant* curr_variant = nullptr; + std::vector<std::unique_ptr<LockerVariant>> locker_variants; }; class ShaderCacheOpenGL final : public RasterizerCache<Shader> { @@ -123,21 +141,26 @@ protected: void FlushObjectInner(const Shader& object) override {} private: - std::unordered_map<u64, UnspecializedShader> GenerateUnspecializedShaders( - const std::atomic_bool& stop_loading, const VideoCore::DiskResourceLoadCallback& callback, - const std::vector<ShaderDiskCacheRaw>& raws, - const std::unordered_map<u64, ShaderDiskCacheDecompiled>& decompiled); + bool GenerateUnspecializedShaders(const std::atomic_bool& stop_loading, + const VideoCore::DiskResourceLoadCallback& callback, + const std::vector<ShaderDiskCacheRaw>& raws); CachedProgram GeneratePrecompiledProgram(const ShaderDiskCacheDump& dump, - const std::set<GLenum>& supported_formats); + const std::unordered_set<GLenum>& supported_formats); + + const PrecompiledVariants* GetPrecompiledVariants(u64 unique_identifier) const; Core::System& system; Core::Frontend::EmuWindow& emu_window; const Device& device; + ShaderDiskCacheOpenGL disk_cache; - PrecompiledShaders precompiled_shaders; PrecompiledPrograms precompiled_programs; + std::unordered_map<u64, PrecompiledVariants> precompiled_variants; + + std::unordered_map<u64, UnspecializedShader> unspecialized_shaders; + std::array<Shader, Maxwell::MaxShaderProgram> last_shaders; }; diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp index baec66ff0..030550c53 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp @@ -415,27 +415,6 @@ public: return code.GetResult(); } - ShaderEntries GetShaderEntries() const { - ShaderEntries entries; - for (const auto& cbuf : ir.GetConstantBuffers()) { - entries.const_buffers.emplace_back(cbuf.second.GetMaxOffset(), cbuf.second.IsIndirect(), - cbuf.first); - } - for (const auto& sampler : ir.GetSamplers()) { - entries.samplers.emplace_back(sampler); - } - for (const auto& [offset, image] : ir.GetImages()) { - entries.images.emplace_back(image); - } - for (const auto& [base, usage] : ir.GetGlobalMemory()) { - entries.global_memory_entries.emplace_back(base.cbuf_index, base.cbuf_offset, - usage.is_read, usage.is_written); - } - entries.clip_distances = ir.GetClipDistances(); - entries.shader_length = ir.GetLength(); - return entries; - } - private: friend class ASTDecompiler; friend class ExprDecompiler; @@ -2338,6 +2317,11 @@ public: inner += expr.value ? "true" : "false"; } + void operator()(VideoCommon::Shader::ExprGprEqual& expr) { + inner += + "( ftou(" + decomp.GetRegister(expr.gpr) + ") == " + std::to_string(expr.value) + ')'; + } + const std::string& GetResult() const { return inner; } @@ -2476,25 +2460,46 @@ void GLSLDecompiler::DecompileAST() { } // Anonymous namespace +ShaderEntries GetEntries(const VideoCommon::Shader::ShaderIR& ir) { + ShaderEntries entries; + for (const auto& cbuf : ir.GetConstantBuffers()) { + entries.const_buffers.emplace_back(cbuf.second.GetMaxOffset(), cbuf.second.IsIndirect(), + cbuf.first); + } + for (const auto& sampler : ir.GetSamplers()) { + entries.samplers.emplace_back(sampler); + } + for (const auto& [offset, image] : ir.GetImages()) { + entries.images.emplace_back(image); + } + for (const auto& [base, usage] : ir.GetGlobalMemory()) { + entries.global_memory_entries.emplace_back(base.cbuf_index, base.cbuf_offset, usage.is_read, + usage.is_written); + } + entries.clip_distances = ir.GetClipDistances(); + entries.shader_length = ir.GetLength(); + return entries; +} + std::string GetCommonDeclarations() { - return fmt::format( - "#define ftoi floatBitsToInt\n" - "#define ftou floatBitsToUint\n" - "#define itof intBitsToFloat\n" - "#define utof uintBitsToFloat\n\n" - "bvec2 HalfFloatNanComparison(bvec2 comparison, vec2 pair1, vec2 pair2) {{\n" - " bvec2 is_nan1 = isnan(pair1);\n" - " bvec2 is_nan2 = isnan(pair2);\n" - " return bvec2(comparison.x || is_nan1.x || is_nan2.x, comparison.y || is_nan1.y || " - "is_nan2.y);\n" - "}}\n\n"); + return R"(#define ftoi floatBitsToInt +#define ftou floatBitsToUint +#define itof intBitsToFloat +#define utof uintBitsToFloat + +bvec2 HalfFloatNanComparison(bvec2 comparison, vec2 pair1, vec2 pair2) { + bvec2 is_nan1 = isnan(pair1); + bvec2 is_nan2 = isnan(pair2); + return bvec2(comparison.x || is_nan1.x || is_nan2.x, comparison.y || is_nan1.y || is_nan2.y); +} +)"; } -ProgramResult Decompile(const Device& device, const ShaderIR& ir, ProgramType stage, - const std::string& suffix) { +std::string Decompile(const Device& device, const ShaderIR& ir, ProgramType stage, + const std::string& suffix) { GLSLDecompiler decompiler(device, ir, stage, suffix); decompiler.Decompile(); - return {decompiler.GetResult(), decompiler.GetShaderEntries()}; + return decompiler.GetResult(); } } // namespace OpenGL::GLShader diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.h b/src/video_core/renderer_opengl/gl_shader_decompiler.h index e538dc001..fead2a51e 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.h +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.h @@ -34,10 +34,7 @@ enum class ProgramType : u32 { namespace OpenGL::GLShader { -struct ShaderEntries; - using Maxwell = Tegra::Engines::Maxwell3D::Regs; -using ProgramResult = std::pair<std::string, ShaderEntries>; using SamplerEntry = VideoCommon::Shader::Sampler; using ImageEntry = VideoCommon::Shader::Image; @@ -93,9 +90,11 @@ struct ShaderEntries { std::size_t shader_length{}; }; +ShaderEntries GetEntries(const VideoCommon::Shader::ShaderIR& ir); + std::string GetCommonDeclarations(); -ProgramResult Decompile(const Device& device, const VideoCommon::Shader::ShaderIR& ir, - ProgramType stage, const std::string& suffix); +std::string Decompile(const Device& device, const VideoCommon::Shader::ShaderIR& ir, + ProgramType stage, const std::string& suffix); } // namespace OpenGL::GLShader diff --git a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp index 74cc33476..184a565e6 100644 --- a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp @@ -22,6 +22,29 @@ namespace OpenGL { +using VideoCommon::Shader::BindlessSamplerMap; +using VideoCommon::Shader::BoundSamplerMap; +using VideoCommon::Shader::KeyMap; + +namespace { + +struct ConstBufferKey { + u32 cbuf; + u32 offset; + u32 value; +}; + +struct BoundSamplerKey { + u32 offset; + Tegra::Engines::SamplerDescriptor sampler; +}; + +struct BindlessSamplerKey { + u32 cbuf; + u32 offset; + Tegra::Engines::SamplerDescriptor sampler; +}; + using ShaderCacheVersionHash = std::array<u8, 64>; enum class TransferableEntryKind : u32 { @@ -29,18 +52,10 @@ enum class TransferableEntryKind : u32 { Usage, }; -enum class PrecompiledEntryKind : u32 { - Decompiled, - Dump, -}; - -constexpr u32 NativeVersion = 4; +constexpr u32 NativeVersion = 5; // Making sure sizes doesn't change by accident static_assert(sizeof(BaseBindings) == 16); -static_assert(sizeof(ShaderDiskCacheUsage) == 40); - -namespace { ShaderCacheVersionHash GetShaderCacheVersionHash() { ShaderCacheVersionHash hash{}; @@ -49,13 +64,11 @@ ShaderCacheVersionHash GetShaderCacheVersionHash() { return hash; } -} // namespace +} // Anonymous namespace ShaderDiskCacheRaw::ShaderDiskCacheRaw(u64 unique_identifier, ProgramType program_type, - u32 program_code_size, u32 program_code_size_b, ProgramCode program_code, ProgramCode program_code_b) : unique_identifier{unique_identifier}, program_type{program_type}, - program_code_size{program_code_size}, program_code_size_b{program_code_size_b}, program_code{std::move(program_code)}, program_code_b{std::move(program_code_b)} {} ShaderDiskCacheRaw::ShaderDiskCacheRaw() = default; @@ -90,15 +103,16 @@ bool ShaderDiskCacheRaw::Load(FileUtil::IOFile& file) { bool ShaderDiskCacheRaw::Save(FileUtil::IOFile& file) const { if (file.WriteObject(unique_identifier) != 1 || file.WriteObject(static_cast<u32>(program_type)) != 1 || - file.WriteObject(program_code_size) != 1 || file.WriteObject(program_code_size_b) != 1) { + file.WriteObject(static_cast<u32>(program_code.size())) != 1 || + file.WriteObject(static_cast<u32>(program_code_b.size())) != 1) { return false; } - if (file.WriteArray(program_code.data(), program_code_size) != program_code_size) + if (file.WriteArray(program_code.data(), program_code.size()) != program_code.size()) return false; if (HasProgramA() && - file.WriteArray(program_code_b.data(), program_code_size_b) != program_code_size_b) { + file.WriteArray(program_code_b.data(), program_code_b.size()) != program_code_b.size()) { return false; } return true; @@ -127,13 +141,13 @@ ShaderDiskCacheOpenGL::LoadTransferable() { u32 version{}; if (file.ReadBytes(&version, sizeof(version)) != sizeof(version)) { LOG_ERROR(Render_OpenGL, - "Failed to get transferable cache version for title id={} - skipping", + "Failed to get transferable cache version for title id={}, skipping", GetTitleID()); return {}; } if (version < NativeVersion) { - LOG_INFO(Render_OpenGL, "Transferable shader cache is old - removing"); + LOG_INFO(Render_OpenGL, "Transferable shader cache is old, removing"); file.Close(); InvalidateTransferable(); is_usable = true; @@ -141,17 +155,18 @@ ShaderDiskCacheOpenGL::LoadTransferable() { } if (version > NativeVersion) { LOG_WARNING(Render_OpenGL, "Transferable shader cache was generated with a newer version " - "of the emulator - skipping"); + "of the emulator, skipping"); return {}; } // Version is valid, load the shaders + constexpr const char error_loading[] = "Failed to load transferable raw entry, skipping"; std::vector<ShaderDiskCacheRaw> raws; std::vector<ShaderDiskCacheUsage> usages; while (file.Tell() < file.GetSize()) { TransferableEntryKind kind{}; if (file.ReadBytes(&kind, sizeof(u32)) != sizeof(u32)) { - LOG_ERROR(Render_OpenGL, "Failed to read transferable file - skipping"); + LOG_ERROR(Render_OpenGL, "Failed to read transferable file, skipping"); return {}; } @@ -159,7 +174,7 @@ ShaderDiskCacheOpenGL::LoadTransferable() { case TransferableEntryKind::Raw: { ShaderDiskCacheRaw entry; if (!entry.Load(file)) { - LOG_ERROR(Render_OpenGL, "Failed to load transferable raw entry - skipping"); + LOG_ERROR(Render_OpenGL, error_loading); return {}; } transferable.insert({entry.GetUniqueIdentifier(), {}}); @@ -167,16 +182,45 @@ ShaderDiskCacheOpenGL::LoadTransferable() { break; } case TransferableEntryKind::Usage: { - ShaderDiskCacheUsage usage{}; - if (file.ReadBytes(&usage, sizeof(usage)) != sizeof(usage)) { - LOG_ERROR(Render_OpenGL, "Failed to load transferable usage entry - skipping"); + ShaderDiskCacheUsage usage; + + u32 num_keys{}; + u32 num_bound_samplers{}; + u32 num_bindless_samplers{}; + if (file.ReadArray(&usage.unique_identifier, 1) != 1 || + file.ReadArray(&usage.variant, 1) != 1 || file.ReadArray(&num_keys, 1) != 1 || + file.ReadArray(&num_bound_samplers, 1) != 1 || + file.ReadArray(&num_bindless_samplers, 1) != 1) { + LOG_ERROR(Render_OpenGL, error_loading); return {}; } + + std::vector<ConstBufferKey> keys(num_keys); + std::vector<BoundSamplerKey> bound_samplers(num_bound_samplers); + std::vector<BindlessSamplerKey> bindless_samplers(num_bindless_samplers); + if (file.ReadArray(keys.data(), keys.size()) != keys.size() || + file.ReadArray(bound_samplers.data(), bound_samplers.size()) != + bound_samplers.size() || + file.ReadArray(bindless_samplers.data(), bindless_samplers.size()) != + bindless_samplers.size()) { + LOG_ERROR(Render_OpenGL, error_loading); + return {}; + } + for (const auto& key : keys) { + usage.keys.insert({{key.cbuf, key.offset}, key.value}); + } + for (const auto& key : bound_samplers) { + usage.bound_samplers.emplace(key.offset, key.sampler); + } + for (const auto& key : bindless_samplers) { + usage.bindless_samplers.insert({{key.cbuf, key.offset}, key.sampler}); + } + usages.push_back(std::move(usage)); break; } default: - LOG_ERROR(Render_OpenGL, "Unknown transferable shader cache entry kind={} - skipping", + LOG_ERROR(Render_OpenGL, "Unknown transferable shader cache entry kind={}, skipping", static_cast<u32>(kind)); return {}; } @@ -186,13 +230,14 @@ ShaderDiskCacheOpenGL::LoadTransferable() { return {{std::move(raws), std::move(usages)}}; } -std::pair<std::unordered_map<u64, ShaderDiskCacheDecompiled>, ShaderDumpsMap> +std::unordered_map<ShaderDiskCacheUsage, ShaderDiskCacheDump> ShaderDiskCacheOpenGL::LoadPrecompiled() { if (!is_usable) { return {}; } - FileUtil::IOFile file(GetPrecompiledPath(), "rb"); + std::string path = GetPrecompiledPath(); + FileUtil::IOFile file(path, "rb"); if (!file.IsOpen()) { LOG_INFO(Render_OpenGL, "No precompiled shader cache found for game with title id={}", GetTitleID()); @@ -202,7 +247,7 @@ ShaderDiskCacheOpenGL::LoadPrecompiled() { const auto result = LoadPrecompiledFile(file); if (!result) { LOG_INFO(Render_OpenGL, - "Failed to load precompiled cache for game with title id={} - removing", + "Failed to load precompiled cache for game with title id={}, removing", GetTitleID()); file.Close(); InvalidatePrecompiled(); @@ -211,7 +256,7 @@ ShaderDiskCacheOpenGL::LoadPrecompiled() { return *result; } -std::optional<std::pair<std::unordered_map<u64, ShaderDiskCacheDecompiled>, ShaderDumpsMap>> +std::optional<std::unordered_map<ShaderDiskCacheUsage, ShaderDiskCacheDump>> ShaderDiskCacheOpenGL::LoadPrecompiledFile(FileUtil::IOFile& file) { // Read compressed file from disk and decompress to virtual precompiled cache file std::vector<u8> compressed(file.GetSize()); @@ -231,238 +276,56 @@ ShaderDiskCacheOpenGL::LoadPrecompiledFile(FileUtil::IOFile& file) { return {}; } - std::unordered_map<u64, ShaderDiskCacheDecompiled> decompiled; ShaderDumpsMap dumps; while (precompiled_cache_virtual_file_offset < precompiled_cache_virtual_file.GetSize()) { - PrecompiledEntryKind kind{}; - if (!LoadObjectFromPrecompiled(kind)) { + u32 num_keys{}; + u32 num_bound_samplers{}; + u32 num_bindless_samplers{}; + ShaderDiskCacheUsage usage; + if (!LoadObjectFromPrecompiled(usage.unique_identifier) || + !LoadObjectFromPrecompiled(usage.variant) || !LoadObjectFromPrecompiled(num_keys) || + !LoadObjectFromPrecompiled(num_bound_samplers) || + !LoadObjectFromPrecompiled(num_bindless_samplers)) { return {}; } - - switch (kind) { - case PrecompiledEntryKind::Decompiled: { - u64 unique_identifier{}; - if (!LoadObjectFromPrecompiled(unique_identifier)) { - return {}; - } - - auto entry = LoadDecompiledEntry(); - if (!entry) { - return {}; - } - decompiled.insert({unique_identifier, std::move(*entry)}); - break; - } - case PrecompiledEntryKind::Dump: { - ShaderDiskCacheUsage usage; - if (!LoadObjectFromPrecompiled(usage)) { - return {}; - } - - ShaderDiskCacheDump dump; - if (!LoadObjectFromPrecompiled(dump.binary_format)) { - return {}; - } - - u32 binary_length{}; - if (!LoadObjectFromPrecompiled(binary_length)) { - return {}; - } - - dump.binary.resize(binary_length); - if (!LoadArrayFromPrecompiled(dump.binary.data(), dump.binary.size())) { - return {}; - } - - dumps.insert({usage, dump}); - break; - } - default: + std::vector<ConstBufferKey> keys(num_keys); + std::vector<BoundSamplerKey> bound_samplers(num_bound_samplers); + std::vector<BindlessSamplerKey> bindless_samplers(num_bindless_samplers); + if (!LoadArrayFromPrecompiled(keys.data(), keys.size()) || + !LoadArrayFromPrecompiled(bound_samplers.data(), bound_samplers.size()) != + bound_samplers.size() || + !LoadArrayFromPrecompiled(bindless_samplers.data(), bindless_samplers.size()) != + bindless_samplers.size()) { return {}; } - } - return {{decompiled, dumps}}; -} - -std::optional<ShaderDiskCacheDecompiled> ShaderDiskCacheOpenGL::LoadDecompiledEntry() { - u32 code_size{}; - if (!LoadObjectFromPrecompiled(code_size)) { - return {}; - } - - std::string code(code_size, '\0'); - if (!LoadArrayFromPrecompiled(code.data(), code.size())) { - return {}; - } - - ShaderDiskCacheDecompiled entry; - entry.code = std::move(code); - - u32 const_buffers_count{}; - if (!LoadObjectFromPrecompiled(const_buffers_count)) { - return {}; - } - - for (u32 i = 0; i < const_buffers_count; ++i) { - u32 max_offset{}; - u32 index{}; - bool is_indirect{}; - if (!LoadObjectFromPrecompiled(max_offset) || !LoadObjectFromPrecompiled(index) || - !LoadObjectFromPrecompiled(is_indirect)) { - return {}; + for (const auto& key : keys) { + usage.keys.insert({{key.cbuf, key.offset}, key.value}); } - entry.entries.const_buffers.emplace_back(max_offset, is_indirect, index); - } - - u32 samplers_count{}; - if (!LoadObjectFromPrecompiled(samplers_count)) { - return {}; - } - - for (u32 i = 0; i < samplers_count; ++i) { - u64 offset{}; - u64 index{}; - u32 type{}; - bool is_array{}; - bool is_shadow{}; - bool is_bindless{}; - if (!LoadObjectFromPrecompiled(offset) || !LoadObjectFromPrecompiled(index) || - !LoadObjectFromPrecompiled(type) || !LoadObjectFromPrecompiled(is_array) || - !LoadObjectFromPrecompiled(is_shadow) || !LoadObjectFromPrecompiled(is_bindless)) { - return {}; + for (const auto& key : bound_samplers) { + usage.bound_samplers.emplace(key.offset, key.sampler); } - entry.entries.samplers.emplace_back( - static_cast<std::size_t>(offset), static_cast<std::size_t>(index), - static_cast<Tegra::Shader::TextureType>(type), is_array, is_shadow, is_bindless); - } - - u32 images_count{}; - if (!LoadObjectFromPrecompiled(images_count)) { - return {}; - } - for (u32 i = 0; i < images_count; ++i) { - u64 offset{}; - u64 index{}; - u32 type{}; - u8 is_bindless{}; - u8 is_written{}; - u8 is_read{}; - u8 is_atomic{}; - if (!LoadObjectFromPrecompiled(offset) || !LoadObjectFromPrecompiled(index) || - !LoadObjectFromPrecompiled(type) || !LoadObjectFromPrecompiled(is_bindless) || - !LoadObjectFromPrecompiled(is_written) || !LoadObjectFromPrecompiled(is_read) || - !LoadObjectFromPrecompiled(is_atomic)) { - return {}; + for (const auto& key : bindless_samplers) { + usage.bindless_samplers.insert({{key.cbuf, key.offset}, key.sampler}); } - entry.entries.images.emplace_back( - static_cast<std::size_t>(offset), static_cast<std::size_t>(index), - static_cast<Tegra::Shader::ImageType>(type), is_bindless != 0, is_written != 0, - is_read != 0, is_atomic != 0); - } - u32 global_memory_count{}; - if (!LoadObjectFromPrecompiled(global_memory_count)) { - return {}; - } - for (u32 i = 0; i < global_memory_count; ++i) { - u32 cbuf_index{}; - u32 cbuf_offset{}; - bool is_read{}; - bool is_written{}; - if (!LoadObjectFromPrecompiled(cbuf_index) || !LoadObjectFromPrecompiled(cbuf_offset) || - !LoadObjectFromPrecompiled(is_read) || !LoadObjectFromPrecompiled(is_written)) { + ShaderDiskCacheDump dump; + if (!LoadObjectFromPrecompiled(dump.binary_format)) { return {}; } - entry.entries.global_memory_entries.emplace_back(cbuf_index, cbuf_offset, is_read, - is_written); - } - for (auto& clip_distance : entry.entries.clip_distances) { - if (!LoadObjectFromPrecompiled(clip_distance)) { + u32 binary_length{}; + if (!LoadObjectFromPrecompiled(binary_length)) { return {}; } - } - - u64 shader_length{}; - if (!LoadObjectFromPrecompiled(shader_length)) { - return {}; - } - entry.entries.shader_length = static_cast<std::size_t>(shader_length); - - return entry; -} - -bool ShaderDiskCacheOpenGL::SaveDecompiledFile(u64 unique_identifier, const std::string& code, - const GLShader::ShaderEntries& entries) { - if (!SaveObjectToPrecompiled(static_cast<u32>(PrecompiledEntryKind::Decompiled)) || - !SaveObjectToPrecompiled(unique_identifier) || - !SaveObjectToPrecompiled(static_cast<u32>(code.size())) || - !SaveArrayToPrecompiled(code.data(), code.size())) { - return false; - } - - if (!SaveObjectToPrecompiled(static_cast<u32>(entries.const_buffers.size()))) { - return false; - } - for (const auto& cbuf : entries.const_buffers) { - if (!SaveObjectToPrecompiled(static_cast<u32>(cbuf.GetMaxOffset())) || - !SaveObjectToPrecompiled(static_cast<u32>(cbuf.GetIndex())) || - !SaveObjectToPrecompiled(cbuf.IsIndirect())) { - return false; - } - } - - if (!SaveObjectToPrecompiled(static_cast<u32>(entries.samplers.size()))) { - return false; - } - for (const auto& sampler : entries.samplers) { - if (!SaveObjectToPrecompiled(static_cast<u64>(sampler.GetOffset())) || - !SaveObjectToPrecompiled(static_cast<u64>(sampler.GetIndex())) || - !SaveObjectToPrecompiled(static_cast<u32>(sampler.GetType())) || - !SaveObjectToPrecompiled(sampler.IsArray()) || - !SaveObjectToPrecompiled(sampler.IsShadow()) || - !SaveObjectToPrecompiled(sampler.IsBindless())) { - return false; - } - } - - if (!SaveObjectToPrecompiled(static_cast<u32>(entries.images.size()))) { - return false; - } - for (const auto& image : entries.images) { - if (!SaveObjectToPrecompiled(static_cast<u64>(image.GetOffset())) || - !SaveObjectToPrecompiled(static_cast<u64>(image.GetIndex())) || - !SaveObjectToPrecompiled(static_cast<u32>(image.GetType())) || - !SaveObjectToPrecompiled(static_cast<u8>(image.IsBindless() ? 1 : 0)) || - !SaveObjectToPrecompiled(static_cast<u8>(image.IsWritten() ? 1 : 0)) || - !SaveObjectToPrecompiled(static_cast<u8>(image.IsRead() ? 1 : 0)) || - !SaveObjectToPrecompiled(static_cast<u8>(image.IsAtomic() ? 1 : 0))) { - return false; - } - } - if (!SaveObjectToPrecompiled(static_cast<u32>(entries.global_memory_entries.size()))) { - return false; - } - for (const auto& gmem : entries.global_memory_entries) { - if (!SaveObjectToPrecompiled(static_cast<u32>(gmem.GetCbufIndex())) || - !SaveObjectToPrecompiled(static_cast<u32>(gmem.GetCbufOffset())) || - !SaveObjectToPrecompiled(gmem.IsRead()) || !SaveObjectToPrecompiled(gmem.IsWritten())) { - return false; - } - } - - for (const bool clip_distance : entries.clip_distances) { - if (!SaveObjectToPrecompiled(clip_distance)) { - return false; + dump.binary.resize(binary_length); + if (!LoadArrayFromPrecompiled(dump.binary.data(), dump.binary.size())) { + return {}; } - } - if (!SaveObjectToPrecompiled(static_cast<u64>(entries.shader_length))) { - return false; + dumps.emplace(std::move(usage), dump); } - - return true; + return dumps; } void ShaderDiskCacheOpenGL::InvalidateTransferable() { @@ -494,10 +357,11 @@ void ShaderDiskCacheOpenGL::SaveRaw(const ShaderDiskCacheRaw& entry) { } FileUtil::IOFile file = AppendTransferableFile(); - if (!file.IsOpen()) + if (!file.IsOpen()) { return; + } if (file.WriteObject(TransferableEntryKind::Raw) != 1 || !entry.Save(file)) { - LOG_ERROR(Render_OpenGL, "Failed to save raw transferable cache entry - removing"); + LOG_ERROR(Render_OpenGL, "Failed to save raw transferable cache entry, removing"); file.Close(); InvalidateTransferable(); return; @@ -523,29 +387,39 @@ void ShaderDiskCacheOpenGL::SaveUsage(const ShaderDiskCacheUsage& usage) { FileUtil::IOFile file = AppendTransferableFile(); if (!file.IsOpen()) return; - - if (file.WriteObject(TransferableEntryKind::Usage) != 1 || file.WriteObject(usage) != 1) { - LOG_ERROR(Render_OpenGL, "Failed to save usage transferable cache entry - removing"); + const auto Close = [&] { + LOG_ERROR(Render_OpenGL, "Failed to save usage transferable cache entry, removing"); file.Close(); InvalidateTransferable(); - return; - } -} + }; -void ShaderDiskCacheOpenGL::SaveDecompiled(u64 unique_identifier, const std::string& code, - const GLShader::ShaderEntries& entries) { - if (!is_usable) { + if (file.WriteObject(TransferableEntryKind::Usage) != 1 || + file.WriteObject(usage.unique_identifier) != 1 || file.WriteObject(usage.variant) != 1 || + file.WriteObject(static_cast<u32>(usage.keys.size())) != 1 || + file.WriteObject(static_cast<u32>(usage.bound_samplers.size())) != 1 || + file.WriteObject(static_cast<u32>(usage.bindless_samplers.size())) != 1) { + Close(); return; } - - if (precompiled_cache_virtual_file.GetSize() == 0) { - SavePrecompiledHeaderToVirtualPrecompiledCache(); + for (const auto& [pair, value] : usage.keys) { + const auto [cbuf, offset] = pair; + if (file.WriteObject(ConstBufferKey{cbuf, offset, value}) != 1) { + Close(); + return; + } } - - if (!SaveDecompiledFile(unique_identifier, code, entries)) { - LOG_ERROR(Render_OpenGL, - "Failed to save decompiled entry to the precompiled file - removing"); - InvalidatePrecompiled(); + for (const auto& [offset, sampler] : usage.bound_samplers) { + if (file.WriteObject(BoundSamplerKey{offset, sampler}) != 1) { + Close(); + return; + } + } + for (const auto& [pair, sampler] : usage.bindless_samplers) { + const auto [cbuf, offset] = pair; + if (file.WriteObject(BindlessSamplerKey{cbuf, offset, sampler}) != 1) { + Close(); + return; + } } } @@ -554,6 +428,13 @@ void ShaderDiskCacheOpenGL::SaveDump(const ShaderDiskCacheUsage& usage, GLuint p return; } + // TODO(Rodrigo): This is a design smell. I shouldn't be having to manually write the header + // when writing the dump. This should be done the moment I get access to write to the virtual + // file. + if (precompiled_cache_virtual_file.GetSize() == 0) { + SavePrecompiledHeaderToVirtualPrecompiledCache(); + } + GLint binary_length{}; glGetProgramiv(program, GL_PROGRAM_BINARY_LENGTH, &binary_length); @@ -561,21 +442,51 @@ void ShaderDiskCacheOpenGL::SaveDump(const ShaderDiskCacheUsage& usage, GLuint p std::vector<u8> binary(binary_length); glGetProgramBinary(program, binary_length, nullptr, &binary_format, binary.data()); - if (!SaveObjectToPrecompiled(static_cast<u32>(PrecompiledEntryKind::Dump)) || - !SaveObjectToPrecompiled(usage) || - !SaveObjectToPrecompiled(static_cast<u32>(binary_format)) || - !SaveObjectToPrecompiled(static_cast<u32>(binary_length)) || - !SaveArrayToPrecompiled(binary.data(), binary.size())) { - LOG_ERROR(Render_OpenGL, "Failed to save binary program file in shader={:016x} - removing", + const auto Close = [&] { + LOG_ERROR(Render_OpenGL, "Failed to save binary program file in shader={:016X}, removing", usage.unique_identifier); InvalidatePrecompiled(); + }; + + if (!SaveObjectToPrecompiled(usage.unique_identifier) || + !SaveObjectToPrecompiled(usage.variant) || + !SaveObjectToPrecompiled(static_cast<u32>(usage.keys.size())) || + !SaveObjectToPrecompiled(static_cast<u32>(usage.bound_samplers.size())) || + !SaveObjectToPrecompiled(static_cast<u32>(usage.bindless_samplers.size()))) { + Close(); return; } + for (const auto& [pair, value] : usage.keys) { + const auto [cbuf, offset] = pair; + if (SaveObjectToPrecompiled(ConstBufferKey{cbuf, offset, value}) != 1) { + Close(); + return; + } + } + for (const auto& [offset, sampler] : usage.bound_samplers) { + if (SaveObjectToPrecompiled(BoundSamplerKey{offset, sampler}) != 1) { + Close(); + return; + } + } + for (const auto& [pair, sampler] : usage.bindless_samplers) { + const auto [cbuf, offset] = pair; + if (SaveObjectToPrecompiled(BindlessSamplerKey{cbuf, offset, sampler}) != 1) { + Close(); + return; + } + } + if (!SaveObjectToPrecompiled(static_cast<u32>(binary_format)) || + !SaveObjectToPrecompiled(static_cast<u32>(binary_length)) || + !SaveArrayToPrecompiled(binary.data(), binary.size())) { + Close(); + } } FileUtil::IOFile ShaderDiskCacheOpenGL::AppendTransferableFile() const { - if (!EnsureDirectories()) + if (!EnsureDirectories()) { return {}; + } const auto transferable_path{GetTransferablePath()}; const bool existed = FileUtil::Exists(transferable_path); @@ -607,8 +518,8 @@ void ShaderDiskCacheOpenGL::SavePrecompiledHeaderToVirtualPrecompiledCache() { void ShaderDiskCacheOpenGL::SaveVirtualPrecompiledFile() { precompiled_cache_virtual_file_offset = 0; - const std::vector<u8>& uncompressed = precompiled_cache_virtual_file.ReadAllBytes(); - const std::vector<u8>& compressed = + const std::vector<u8> uncompressed = precompiled_cache_virtual_file.ReadAllBytes(); + const std::vector<u8> compressed = Common::Compression::CompressDataZSTDDefault(uncompressed.data(), uncompressed.size()); const auto precompiled_path{GetPrecompiledPath()}; diff --git a/src/video_core/renderer_opengl/gl_shader_disk_cache.h b/src/video_core/renderer_opengl/gl_shader_disk_cache.h index 9595bd71b..db23ada93 100644 --- a/src/video_core/renderer_opengl/gl_shader_disk_cache.h +++ b/src/video_core/renderer_opengl/gl_shader_disk_cache.h @@ -8,6 +8,7 @@ #include <optional> #include <string> #include <tuple> +#include <type_traits> #include <unordered_map> #include <unordered_set> #include <utility> @@ -19,6 +20,7 @@ #include "common/common_types.h" #include "core/file_sys/vfs_vector.h" #include "video_core/renderer_opengl/gl_shader_gen.h" +#include "video_core/shader/const_buffer_locker.h" namespace Core { class System; @@ -53,6 +55,7 @@ struct BaseBindings { return !operator==(rhs); } }; +static_assert(std::is_trivially_copyable_v<BaseBindings>); /// Describes the different variants a single program can be compiled. struct ProgramVariant { @@ -70,13 +73,20 @@ struct ProgramVariant { } }; +static_assert(std::is_trivially_copyable_v<ProgramVariant>); + /// Describes how a shader is used. struct ShaderDiskCacheUsage { u64 unique_identifier{}; ProgramVariant variant; + VideoCommon::Shader::KeyMap keys; + VideoCommon::Shader::BoundSamplerMap bound_samplers; + VideoCommon::Shader::BindlessSamplerMap bindless_samplers; bool operator==(const ShaderDiskCacheUsage& rhs) const { - return std::tie(unique_identifier, variant) == std::tie(rhs.unique_identifier, rhs.variant); + return std::tie(unique_identifier, variant, keys, bound_samplers, bindless_samplers) == + std::tie(rhs.unique_identifier, rhs.variant, rhs.keys, rhs.bound_samplers, + rhs.bindless_samplers); } bool operator!=(const ShaderDiskCacheUsage& rhs) const { @@ -123,8 +133,7 @@ namespace OpenGL { class ShaderDiskCacheRaw { public: explicit ShaderDiskCacheRaw(u64 unique_identifier, ProgramType program_type, - u32 program_code_size, u32 program_code_size_b, - ProgramCode program_code, ProgramCode program_code_b); + ProgramCode program_code, ProgramCode program_code_b = {}); ShaderDiskCacheRaw(); ~ShaderDiskCacheRaw(); @@ -155,22 +164,14 @@ public: private: u64 unique_identifier{}; ProgramType program_type{}; - u32 program_code_size{}; - u32 program_code_size_b{}; ProgramCode program_code; ProgramCode program_code_b; }; -/// Contains decompiled data from a shader -struct ShaderDiskCacheDecompiled { - std::string code; - GLShader::ShaderEntries entries; -}; - /// Contains an OpenGL dumped binary program struct ShaderDiskCacheDump { - GLenum binary_format; + GLenum binary_format{}; std::vector<u8> binary; }; @@ -184,9 +185,7 @@ public: LoadTransferable(); /// Loads current game's precompiled cache. Invalidates on failure. - std::pair<std::unordered_map<u64, ShaderDiskCacheDecompiled>, - std::unordered_map<ShaderDiskCacheUsage, ShaderDiskCacheDump>> - LoadPrecompiled(); + std::unordered_map<ShaderDiskCacheUsage, ShaderDiskCacheDump> LoadPrecompiled(); /// Removes the transferable (and precompiled) cache file. void InvalidateTransferable(); @@ -200,10 +199,6 @@ public: /// Saves shader usage to the transferable file. Does not check for collisions. void SaveUsage(const ShaderDiskCacheUsage& usage); - /// Saves a decompiled entry to the precompiled file. Does not check for collisions. - void SaveDecompiled(u64 unique_identifier, const std::string& code, - const GLShader::ShaderEntries& entries); - /// Saves a dump entry to the precompiled file. Does not check for collisions. void SaveDump(const ShaderDiskCacheUsage& usage, GLuint program); @@ -212,18 +207,9 @@ public: private: /// Loads the transferable cache. Returns empty on failure. - std::optional<std::pair<std::unordered_map<u64, ShaderDiskCacheDecompiled>, - std::unordered_map<ShaderDiskCacheUsage, ShaderDiskCacheDump>>> + std::optional<std::unordered_map<ShaderDiskCacheUsage, ShaderDiskCacheDump>> LoadPrecompiledFile(FileUtil::IOFile& file); - /// Loads a decompiled cache entry from m_precompiled_cache_virtual_file. Returns empty on - /// failure. - std::optional<ShaderDiskCacheDecompiled> LoadDecompiledEntry(); - - /// Saves a decompiled entry to the passed file. Returns true on success. - bool SaveDecompiledFile(u64 unique_identifier, const std::string& code, - const GLShader::ShaderEntries& entries); - /// Opens current game's transferable file and write it's header if it doesn't exist FileUtil::IOFile AppendTransferableFile() const; diff --git a/src/video_core/renderer_opengl/gl_shader_gen.cpp b/src/video_core/renderer_opengl/gl_shader_gen.cpp index b5a43e79e..0e22eede9 100644 --- a/src/video_core/renderer_opengl/gl_shader_gen.cpp +++ b/src/video_core/renderer_opengl/gl_shader_gen.cpp @@ -16,17 +16,8 @@ using VideoCommon::Shader::CompilerSettings; using VideoCommon::Shader::ProgramCode; using VideoCommon::Shader::ShaderIR; -static constexpr u32 PROGRAM_OFFSET = 10; -static constexpr u32 COMPUTE_OFFSET = 0; - -static constexpr CompilerSettings settings{CompileDepth::NoFlowStack, true}; - -ProgramResult GenerateVertexShader(const Device& device, const ShaderSetup& setup) { - const std::string id = fmt::format("{:016x}", setup.program.unique_identifier); - - std::string out = "// Shader Unique Id: VS" + id + "\n\n"; - out += GetCommonDeclarations(); - +std::string GenerateVertexShader(const Device& device, const ShaderIR& ir, const ShaderIR* ir_b) { + std::string out = GetCommonDeclarations(); out += R"( layout (std140, binding = EMULATION_UBO_BINDING) uniform vs_config { vec4 viewport_flip; @@ -34,17 +25,10 @@ layout (std140, binding = EMULATION_UBO_BINDING) uniform vs_config { }; )"; - - const ShaderIR program_ir(setup.program.code, PROGRAM_OFFSET, setup.program.size_a, settings); - const auto stage = setup.IsDualProgram() ? ProgramType::VertexA : ProgramType::VertexB; - ProgramResult program = Decompile(device, program_ir, stage, "vertex"); - out += program.first; - - if (setup.IsDualProgram()) { - const ShaderIR program_ir_b(setup.program.code_b, PROGRAM_OFFSET, setup.program.size_b, - settings); - ProgramResult program_b = Decompile(device, program_ir_b, ProgramType::VertexB, "vertex_b"); - out += program_b.first; + const auto stage = ir_b ? ProgramType::VertexA : ProgramType::VertexB; + out += Decompile(device, ir, stage, "vertex"); + if (ir_b) { + out += Decompile(device, *ir_b, ProgramType::VertexB, "vertex_b"); } out += R"( @@ -52,7 +36,7 @@ void main() { execute_vertex(); )"; - if (setup.IsDualProgram()) { + if (ir_b) { out += " execute_vertex_b();"; } @@ -66,17 +50,13 @@ void main() { // Viewport can be flipped, which is unsupported by glViewport gl_Position.xy *= viewport_flip.xy; } -})"; - - return {std::move(out), std::move(program.second)}; +} +)"; + return out; } -ProgramResult GenerateGeometryShader(const Device& device, const ShaderSetup& setup) { - const std::string id = fmt::format("{:016x}", setup.program.unique_identifier); - - std::string out = "// Shader Unique Id: GS" + id + "\n\n"; - out += GetCommonDeclarations(); - +std::string GenerateGeometryShader(const Device& device, const ShaderIR& ir) { + std::string out = GetCommonDeclarations(); out += R"( layout (std140, binding = EMULATION_UBO_BINDING) uniform gs_config { vec4 viewport_flip; @@ -84,25 +64,18 @@ layout (std140, binding = EMULATION_UBO_BINDING) uniform gs_config { }; )"; - - const ShaderIR program_ir(setup.program.code, PROGRAM_OFFSET, setup.program.size_a, settings); - ProgramResult program = Decompile(device, program_ir, ProgramType::Geometry, "geometry"); - out += program.first; + out += Decompile(device, ir, ProgramType::Geometry, "geometry"); out += R"( void main() { execute_geometry(); -};)"; - - return {std::move(out), std::move(program.second)}; +} +)"; + return out; } -ProgramResult GenerateFragmentShader(const Device& device, const ShaderSetup& setup) { - const std::string id = fmt::format("{:016x}", setup.program.unique_identifier); - - std::string out = "// Shader Unique Id: FS" + id + "\n\n"; - out += GetCommonDeclarations(); - +std::string GenerateFragmentShader(const Device& device, const ShaderIR& ir) { + std::string out = GetCommonDeclarations(); out += R"( layout (location = 0) out vec4 FragColor0; layout (location = 1) out vec4 FragColor1; @@ -119,36 +92,25 @@ layout (std140, binding = EMULATION_UBO_BINDING) uniform fs_config { }; )"; - - const ShaderIR program_ir(setup.program.code, PROGRAM_OFFSET, setup.program.size_a, settings); - ProgramResult program = Decompile(device, program_ir, ProgramType::Fragment, "fragment"); - out += program.first; + out += Decompile(device, ir, ProgramType::Fragment, "fragment"); out += R"( void main() { execute_fragment(); } - )"; - return {std::move(out), std::move(program.second)}; + return out; } -ProgramResult GenerateComputeShader(const Device& device, const ShaderSetup& setup) { - const std::string id = fmt::format("{:016x}", setup.program.unique_identifier); - - std::string out = "// Shader Unique Id: CS" + id + "\n\n"; - out += GetCommonDeclarations(); - - const ShaderIR program_ir(setup.program.code, COMPUTE_OFFSET, setup.program.size_a, settings); - ProgramResult program = Decompile(device, program_ir, ProgramType::Compute, "compute"); - out += program.first; - +std::string GenerateComputeShader(const Device& device, const ShaderIR& ir) { + std::string out = GetCommonDeclarations(); + out += Decompile(device, ir, ProgramType::Compute, "compute"); out += R"( void main() { execute_compute(); } )"; - return {std::move(out), std::move(program.second)}; + return out; } } // namespace OpenGL::GLShader diff --git a/src/video_core/renderer_opengl/gl_shader_gen.h b/src/video_core/renderer_opengl/gl_shader_gen.h index 3833e88ab..cba2be9f9 100644 --- a/src/video_core/renderer_opengl/gl_shader_gen.h +++ b/src/video_core/renderer_opengl/gl_shader_gen.h @@ -17,44 +17,18 @@ class Device; namespace OpenGL::GLShader { using VideoCommon::Shader::ProgramCode; - -struct ShaderSetup { - explicit ShaderSetup(ProgramCode program_code) { - program.code = std::move(program_code); - } - - struct { - ProgramCode code; - ProgramCode code_b; // Used for dual vertex shaders - u64 unique_identifier; - std::size_t size_a; - std::size_t size_b; - } program; - - /// Used in scenarios where we have a dual vertex shaders - void SetProgramB(ProgramCode program_b) { - program.code_b = std::move(program_b); - has_program_b = true; - } - - bool IsDualProgram() const { - return has_program_b; - } - -private: - bool has_program_b{}; -}; +using VideoCommon::Shader::ShaderIR; /// Generates the GLSL vertex shader program source code for the given VS program -ProgramResult GenerateVertexShader(const Device& device, const ShaderSetup& setup); +std::string GenerateVertexShader(const Device& device, const ShaderIR& ir, const ShaderIR* ir_b); /// Generates the GLSL geometry shader program source code for the given GS program -ProgramResult GenerateGeometryShader(const Device& device, const ShaderSetup& setup); +std::string GenerateGeometryShader(const Device& device, const ShaderIR& ir); /// Generates the GLSL fragment shader program source code for the given FS program -ProgramResult GenerateFragmentShader(const Device& device, const ShaderSetup& setup); +std::string GenerateFragmentShader(const Device& device, const ShaderIR& ir); /// Generates the GLSL compute shader program source code for the given CS program -ProgramResult GenerateComputeShader(const Device& device, const ShaderSetup& setup); +std::string GenerateComputeShader(const Device& device, const ShaderIR& ir); } // namespace OpenGL::GLShader diff --git a/src/video_core/renderer_opengl/gl_texture_cache.cpp b/src/video_core/renderer_opengl/gl_texture_cache.cpp index 2f9bfd7e4..55b3e58b2 100644 --- a/src/video_core/renderer_opengl/gl_texture_cache.cpp +++ b/src/video_core/renderer_opengl/gl_texture_cache.cpp @@ -131,6 +131,7 @@ constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex_format {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_8X6_SRGB {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_6X5 {GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, ComponentType::UNorm, false}, // ASTC_2D_6X5_SRGB + {GL_RGB9_E5, GL_RGB, GL_UNSIGNED_INT_5_9_9_9_REV, ComponentType::Float, false}, // E5B9G9R9F // Depth formats {GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT, ComponentType::Float, false}, // Z32F diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp index 0d943a826..42cf068b6 100644 --- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp +++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp @@ -1704,6 +1704,13 @@ public: return expr.value ? decomp.v_true : decomp.v_false; } + Id operator()(const ExprGprEqual& expr) { + const Id target = decomp.Constant(decomp.t_uint, expr.value); + const Id gpr = decomp.BitcastTo<Type::Uint>( + decomp.Emit(decomp.OpLoad(decomp.t_float, decomp.registers.at(expr.gpr)))); + return decomp.Emit(decomp.OpLogicalEqual(decomp.t_uint, gpr, target)); + } + Id Visit(const Expr& node) { return std::visit(*this, *node); } diff --git a/src/video_core/shader/ast.cpp b/src/video_core/shader/ast.cpp index e43aecc18..3f96d9076 100644 --- a/src/video_core/shader/ast.cpp +++ b/src/video_core/shader/ast.cpp @@ -228,6 +228,10 @@ public: inner += expr.value ? "true" : "false"; } + void operator()(const ExprGprEqual& expr) { + inner += "( gpr_" + std::to_string(expr.gpr) + " == " + std::to_string(expr.value) + ')'; + } + const std::string& GetResult() const { return inner; } diff --git a/src/video_core/shader/const_buffer_locker.cpp b/src/video_core/shader/const_buffer_locker.cpp new file mode 100644 index 000000000..fe467608e --- /dev/null +++ b/src/video_core/shader/const_buffer_locker.cpp @@ -0,0 +1,110 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <algorithm> +#include <memory> +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/engines/maxwell_3d.h" +#include "video_core/shader/const_buffer_locker.h" + +namespace VideoCommon::Shader { + +using Tegra::Engines::SamplerDescriptor; + +ConstBufferLocker::ConstBufferLocker(Tegra::Engines::ShaderType shader_stage) + : stage{shader_stage} {} + +ConstBufferLocker::ConstBufferLocker(Tegra::Engines::ShaderType shader_stage, + Tegra::Engines::ConstBufferEngineInterface& engine) + : stage{shader_stage}, engine{&engine} {} + +ConstBufferLocker::~ConstBufferLocker() = default; + +std::optional<u32> ConstBufferLocker::ObtainKey(u32 buffer, u32 offset) { + const std::pair<u32, u32> key = {buffer, offset}; + const auto iter = keys.find(key); + if (iter != keys.end()) { + return iter->second; + } + if (!engine) { + return std::nullopt; + } + const u32 value = engine->AccessConstBuffer32(stage, buffer, offset); + keys.emplace(key, value); + return value; +} + +std::optional<SamplerDescriptor> ConstBufferLocker::ObtainBoundSampler(u32 offset) { + const u32 key = offset; + const auto iter = bound_samplers.find(key); + if (iter != bound_samplers.end()) { + return iter->second; + } + if (!engine) { + return std::nullopt; + } + const SamplerDescriptor value = engine->AccessBoundSampler(stage, offset); + bound_samplers.emplace(key, value); + return value; +} + +std::optional<Tegra::Engines::SamplerDescriptor> ConstBufferLocker::ObtainBindlessSampler( + u32 buffer, u32 offset) { + const std::pair key = {buffer, offset}; + const auto iter = bindless_samplers.find(key); + if (iter != bindless_samplers.end()) { + return iter->second; + } + if (!engine) { + return std::nullopt; + } + const SamplerDescriptor value = engine->AccessBindlessSampler(stage, buffer, offset); + bindless_samplers.emplace(key, value); + return value; +} + +void ConstBufferLocker::InsertKey(u32 buffer, u32 offset, u32 value) { + keys.insert_or_assign({buffer, offset}, value); +} + +void ConstBufferLocker::InsertBoundSampler(u32 offset, SamplerDescriptor sampler) { + bound_samplers.insert_or_assign(offset, sampler); +} + +void ConstBufferLocker::InsertBindlessSampler(u32 buffer, u32 offset, SamplerDescriptor sampler) { + bindless_samplers.insert_or_assign({buffer, offset}, sampler); +} + +bool ConstBufferLocker::IsConsistent() const { + if (!engine) { + return false; + } + return std::all_of(keys.begin(), keys.end(), + [this](const auto& pair) { + const auto [cbuf, offset] = pair.first; + const auto value = pair.second; + return value == engine->AccessConstBuffer32(stage, cbuf, offset); + }) && + std::all_of(bound_samplers.begin(), bound_samplers.end(), + [this](const auto& sampler) { + const auto [key, value] = sampler; + return value == engine->AccessBoundSampler(stage, key); + }) && + std::all_of(bindless_samplers.begin(), bindless_samplers.end(), + [this](const auto& sampler) { + const auto [cbuf, offset] = sampler.first; + const auto value = sampler.second; + return value == engine->AccessBindlessSampler(stage, cbuf, offset); + }); +} + +bool ConstBufferLocker::HasEqualKeys(const ConstBufferLocker& rhs) const { + return keys == rhs.keys && bound_samplers == rhs.bound_samplers && + bindless_samplers == rhs.bindless_samplers; +} + +} // namespace VideoCommon::Shader diff --git a/src/video_core/shader/const_buffer_locker.h b/src/video_core/shader/const_buffer_locker.h new file mode 100644 index 000000000..600e2f3c3 --- /dev/null +++ b/src/video_core/shader/const_buffer_locker.h @@ -0,0 +1,80 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <unordered_map> +#include "common/common_types.h" +#include "common/hash.h" +#include "video_core/engines/const_buffer_engine_interface.h" + +namespace VideoCommon::Shader { + +using KeyMap = std::unordered_map<std::pair<u32, u32>, u32, Common::PairHash>; +using BoundSamplerMap = std::unordered_map<u32, Tegra::Engines::SamplerDescriptor>; +using BindlessSamplerMap = + std::unordered_map<std::pair<u32, u32>, Tegra::Engines::SamplerDescriptor, Common::PairHash>; + +/** + * The ConstBufferLocker is a class use to interface the 3D and compute engines with the shader + * compiler. with it, the shader can obtain required data from GPU state and store it for disk + * shader compilation. + **/ +class ConstBufferLocker { +public: + explicit ConstBufferLocker(Tegra::Engines::ShaderType shader_stage); + + explicit ConstBufferLocker(Tegra::Engines::ShaderType shader_stage, + Tegra::Engines::ConstBufferEngineInterface& engine); + + ~ConstBufferLocker(); + + /// Retrieves a key from the locker, if it's registered, it will give the registered value, if + /// not it will obtain it from maxwell3d and register it. + std::optional<u32> ObtainKey(u32 buffer, u32 offset); + + std::optional<Tegra::Engines::SamplerDescriptor> ObtainBoundSampler(u32 offset); + + std::optional<Tegra::Engines::SamplerDescriptor> ObtainBindlessSampler(u32 buffer, u32 offset); + + /// Inserts a key. + void InsertKey(u32 buffer, u32 offset, u32 value); + + /// Inserts a bound sampler key. + void InsertBoundSampler(u32 offset, Tegra::Engines::SamplerDescriptor sampler); + + /// Inserts a bindless sampler key. + void InsertBindlessSampler(u32 buffer, u32 offset, Tegra::Engines::SamplerDescriptor sampler); + + /// Checks keys and samplers against engine's current const buffers. Returns true if they are + /// the same value, false otherwise; + bool IsConsistent() const; + + /// Returns true if the keys are equal to the other ones in the locker. + bool HasEqualKeys(const ConstBufferLocker& rhs) const; + + /// Gives an getter to the const buffer keys in the database. + const KeyMap& GetKeys() const { + return keys; + } + + /// Gets samplers database. + const BoundSamplerMap& GetBoundSamplers() const { + return bound_samplers; + } + + /// Gets bindless samplers database. + const BindlessSamplerMap& GetBindlessSamplers() const { + return bindless_samplers; + } + +private: + const Tegra::Engines::ShaderType stage; + Tegra::Engines::ConstBufferEngineInterface* engine = nullptr; + KeyMap keys; + BoundSamplerMap bound_samplers; + BindlessSamplerMap bindless_samplers; +}; + +} // namespace VideoCommon::Shader diff --git a/src/video_core/shader/control_flow.cpp b/src/video_core/shader/control_flow.cpp index 9d21f45de..d47c63d9f 100644 --- a/src/video_core/shader/control_flow.cpp +++ b/src/video_core/shader/control_flow.cpp @@ -35,14 +35,20 @@ struct BlockStack { std::stack<u32> pbk_stack{}; }; -struct BlockBranchInfo { - Condition condition{}; - s32 address{exit_branch}; - bool kill{}; - bool is_sync{}; - bool is_brk{}; - bool ignore{}; -}; +template <typename T, typename... Args> +BlockBranchInfo MakeBranchInfo(Args&&... args) { + static_assert(std::is_convertible_v<T, BranchData>); + return std::make_shared<BranchData>(T(std::forward<Args>(args)...)); +} + +bool BlockBranchIsIgnored(BlockBranchInfo first) { + bool ignore = false; + if (std::holds_alternative<SingleBranch>(*first)) { + const auto branch = std::get_if<SingleBranch>(first.get()); + ignore = branch->ignore; + } + return ignore; +} struct BlockInfo { u32 start{}; @@ -56,10 +62,11 @@ struct BlockInfo { }; struct CFGRebuildState { - explicit CFGRebuildState(const ProgramCode& program_code, const std::size_t program_size, - const u32 start) - : start{start}, program_code{program_code}, program_size{program_size} {} + explicit CFGRebuildState(const ProgramCode& program_code, u32 start, ConstBufferLocker& locker) + : program_code{program_code}, start{start}, locker{locker} {} + const ProgramCode& program_code; + ConstBufferLocker& locker; u32 start{}; std::vector<BlockInfo> block_info{}; std::list<u32> inspect_queries{}; @@ -69,8 +76,6 @@ struct CFGRebuildState { std::map<u32, u32> ssy_labels{}; std::map<u32, u32> pbk_labels{}; std::unordered_map<u32, BlockStack> stacks{}; - const ProgramCode& program_code; - const std::size_t program_size; ASTManager* manager; }; @@ -124,10 +129,116 @@ enum class ParseResult : u32 { AbnormalFlow, }; +struct BranchIndirectInfo { + u32 buffer{}; + u32 offset{}; + u32 entries{}; + s32 relative_position{}; +}; + +std::optional<BranchIndirectInfo> TrackBranchIndirectInfo(const CFGRebuildState& state, + u32 start_address, u32 current_position) { + const u32 shader_start = state.start; + u32 pos = current_position; + BranchIndirectInfo result{}; + u64 track_register = 0; + + // Step 0 Get BRX Info + const Instruction instr = {state.program_code[pos]}; + const auto opcode = OpCode::Decode(instr); + if (opcode->get().GetId() != OpCode::Id::BRX) { + return std::nullopt; + } + if (instr.brx.constant_buffer != 0) { + return std::nullopt; + } + track_register = instr.gpr8.Value(); + result.relative_position = instr.brx.GetBranchExtend(); + pos--; + bool found_track = false; + + // Step 1 Track LDC + while (pos >= shader_start) { + if (IsSchedInstruction(pos, shader_start)) { + pos--; + continue; + } + const Instruction instr = {state.program_code[pos]}; + const auto opcode = OpCode::Decode(instr); + if (opcode->get().GetId() == OpCode::Id::LD_C) { + if (instr.gpr0.Value() == track_register && + instr.ld_c.type.Value() == Tegra::Shader::UniformType::Single) { + result.buffer = instr.cbuf36.index.Value(); + result.offset = static_cast<u32>(instr.cbuf36.GetOffset()); + track_register = instr.gpr8.Value(); + pos--; + found_track = true; + break; + } + } + pos--; + } + + if (!found_track) { + return std::nullopt; + } + found_track = false; + + // Step 2 Track SHL + while (pos >= shader_start) { + if (IsSchedInstruction(pos, shader_start)) { + pos--; + continue; + } + const Instruction instr = state.program_code[pos]; + const auto opcode = OpCode::Decode(instr); + if (opcode->get().GetId() == OpCode::Id::SHL_IMM) { + if (instr.gpr0.Value() == track_register) { + track_register = instr.gpr8.Value(); + pos--; + found_track = true; + break; + } + } + pos--; + } + + if (!found_track) { + return std::nullopt; + } + found_track = false; + + // Step 3 Track IMNMX + while (pos >= shader_start) { + if (IsSchedInstruction(pos, shader_start)) { + pos--; + continue; + } + const Instruction instr = state.program_code[pos]; + const auto opcode = OpCode::Decode(instr); + if (opcode->get().GetId() == OpCode::Id::IMNMX_IMM) { + if (instr.gpr0.Value() == track_register) { + track_register = instr.gpr8.Value(); + result.entries = instr.alu.GetSignedImm20_20() + 1; + pos--; + found_track = true; + break; + } + } + pos--; + } + + if (!found_track) { + return std::nullopt; + } + return result; +} + std::pair<ParseResult, ParseInfo> ParseCode(CFGRebuildState& state, u32 address) { u32 offset = static_cast<u32>(address); - const u32 end_address = static_cast<u32>(state.program_size / sizeof(Instruction)); + const u32 end_address = static_cast<u32>(state.program_code.size()); ParseInfo parse_info{}; + SingleBranch single_branch{}; const auto insert_label = [](CFGRebuildState& state, u32 address) { const auto pair = state.labels.emplace(address); @@ -140,13 +251,14 @@ std::pair<ParseResult, ParseInfo> ParseCode(CFGRebuildState& state, u32 address) if (offset >= end_address) { // ASSERT_OR_EXECUTE can't be used, as it ignores the break ASSERT_MSG(false, "Shader passed the current limit!"); - parse_info.branch_info.address = exit_branch; - parse_info.branch_info.ignore = false; + + single_branch.address = exit_branch; + single_branch.ignore = false; break; } if (state.registered.count(offset) != 0) { - parse_info.branch_info.address = offset; - parse_info.branch_info.ignore = true; + single_branch.address = offset; + single_branch.ignore = true; break; } if (IsSchedInstruction(offset, state.start)) { @@ -163,24 +275,26 @@ std::pair<ParseResult, ParseInfo> ParseCode(CFGRebuildState& state, u32 address) switch (opcode->get().GetId()) { case OpCode::Id::EXIT: { const auto pred_index = static_cast<u32>(instr.pred.pred_index); - parse_info.branch_info.condition.predicate = - GetPredicate(pred_index, instr.negate_pred != 0); - if (parse_info.branch_info.condition.predicate == Pred::NeverExecute) { + single_branch.condition.predicate = GetPredicate(pred_index, instr.negate_pred != 0); + if (single_branch.condition.predicate == Pred::NeverExecute) { offset++; continue; } const ConditionCode cc = instr.flow_condition_code; - parse_info.branch_info.condition.cc = cc; + single_branch.condition.cc = cc; if (cc == ConditionCode::F) { offset++; continue; } - parse_info.branch_info.address = exit_branch; - parse_info.branch_info.kill = false; - parse_info.branch_info.is_sync = false; - parse_info.branch_info.is_brk = false; - parse_info.branch_info.ignore = false; + single_branch.address = exit_branch; + single_branch.kill = false; + single_branch.is_sync = false; + single_branch.is_brk = false; + single_branch.ignore = false; parse_info.end_address = offset; + parse_info.branch_info = MakeBranchInfo<SingleBranch>( + single_branch.condition, single_branch.address, single_branch.kill, + single_branch.is_sync, single_branch.is_brk, single_branch.ignore); return {ParseResult::ControlCaught, parse_info}; } @@ -189,99 +303,107 @@ std::pair<ParseResult, ParseInfo> ParseCode(CFGRebuildState& state, u32 address) return {ParseResult::AbnormalFlow, parse_info}; } const auto pred_index = static_cast<u32>(instr.pred.pred_index); - parse_info.branch_info.condition.predicate = - GetPredicate(pred_index, instr.negate_pred != 0); - if (parse_info.branch_info.condition.predicate == Pred::NeverExecute) { + single_branch.condition.predicate = GetPredicate(pred_index, instr.negate_pred != 0); + if (single_branch.condition.predicate == Pred::NeverExecute) { offset++; continue; } const ConditionCode cc = instr.flow_condition_code; - parse_info.branch_info.condition.cc = cc; + single_branch.condition.cc = cc; if (cc == ConditionCode::F) { offset++; continue; } const u32 branch_offset = offset + instr.bra.GetBranchTarget(); if (branch_offset == 0) { - parse_info.branch_info.address = exit_branch; + single_branch.address = exit_branch; } else { - parse_info.branch_info.address = branch_offset; + single_branch.address = branch_offset; } insert_label(state, branch_offset); - parse_info.branch_info.kill = false; - parse_info.branch_info.is_sync = false; - parse_info.branch_info.is_brk = false; - parse_info.branch_info.ignore = false; + single_branch.kill = false; + single_branch.is_sync = false; + single_branch.is_brk = false; + single_branch.ignore = false; parse_info.end_address = offset; + parse_info.branch_info = MakeBranchInfo<SingleBranch>( + single_branch.condition, single_branch.address, single_branch.kill, + single_branch.is_sync, single_branch.is_brk, single_branch.ignore); return {ParseResult::ControlCaught, parse_info}; } case OpCode::Id::SYNC: { const auto pred_index = static_cast<u32>(instr.pred.pred_index); - parse_info.branch_info.condition.predicate = - GetPredicate(pred_index, instr.negate_pred != 0); - if (parse_info.branch_info.condition.predicate == Pred::NeverExecute) { + single_branch.condition.predicate = GetPredicate(pred_index, instr.negate_pred != 0); + if (single_branch.condition.predicate == Pred::NeverExecute) { offset++; continue; } const ConditionCode cc = instr.flow_condition_code; - parse_info.branch_info.condition.cc = cc; + single_branch.condition.cc = cc; if (cc == ConditionCode::F) { offset++; continue; } - parse_info.branch_info.address = unassigned_branch; - parse_info.branch_info.kill = false; - parse_info.branch_info.is_sync = true; - parse_info.branch_info.is_brk = false; - parse_info.branch_info.ignore = false; + single_branch.address = unassigned_branch; + single_branch.kill = false; + single_branch.is_sync = true; + single_branch.is_brk = false; + single_branch.ignore = false; parse_info.end_address = offset; + parse_info.branch_info = MakeBranchInfo<SingleBranch>( + single_branch.condition, single_branch.address, single_branch.kill, + single_branch.is_sync, single_branch.is_brk, single_branch.ignore); return {ParseResult::ControlCaught, parse_info}; } case OpCode::Id::BRK: { const auto pred_index = static_cast<u32>(instr.pred.pred_index); - parse_info.branch_info.condition.predicate = - GetPredicate(pred_index, instr.negate_pred != 0); - if (parse_info.branch_info.condition.predicate == Pred::NeverExecute) { + single_branch.condition.predicate = GetPredicate(pred_index, instr.negate_pred != 0); + if (single_branch.condition.predicate == Pred::NeverExecute) { offset++; continue; } const ConditionCode cc = instr.flow_condition_code; - parse_info.branch_info.condition.cc = cc; + single_branch.condition.cc = cc; if (cc == ConditionCode::F) { offset++; continue; } - parse_info.branch_info.address = unassigned_branch; - parse_info.branch_info.kill = false; - parse_info.branch_info.is_sync = false; - parse_info.branch_info.is_brk = true; - parse_info.branch_info.ignore = false; + single_branch.address = unassigned_branch; + single_branch.kill = false; + single_branch.is_sync = false; + single_branch.is_brk = true; + single_branch.ignore = false; parse_info.end_address = offset; + parse_info.branch_info = MakeBranchInfo<SingleBranch>( + single_branch.condition, single_branch.address, single_branch.kill, + single_branch.is_sync, single_branch.is_brk, single_branch.ignore); return {ParseResult::ControlCaught, parse_info}; } case OpCode::Id::KIL: { const auto pred_index = static_cast<u32>(instr.pred.pred_index); - parse_info.branch_info.condition.predicate = - GetPredicate(pred_index, instr.negate_pred != 0); - if (parse_info.branch_info.condition.predicate == Pred::NeverExecute) { + single_branch.condition.predicate = GetPredicate(pred_index, instr.negate_pred != 0); + if (single_branch.condition.predicate == Pred::NeverExecute) { offset++; continue; } const ConditionCode cc = instr.flow_condition_code; - parse_info.branch_info.condition.cc = cc; + single_branch.condition.cc = cc; if (cc == ConditionCode::F) { offset++; continue; } - parse_info.branch_info.address = exit_branch; - parse_info.branch_info.kill = true; - parse_info.branch_info.is_sync = false; - parse_info.branch_info.is_brk = false; - parse_info.branch_info.ignore = false; + single_branch.address = exit_branch; + single_branch.kill = true; + single_branch.is_sync = false; + single_branch.is_brk = false; + single_branch.ignore = false; parse_info.end_address = offset; + parse_info.branch_info = MakeBranchInfo<SingleBranch>( + single_branch.condition, single_branch.address, single_branch.kill, + single_branch.is_sync, single_branch.is_brk, single_branch.ignore); return {ParseResult::ControlCaught, parse_info}; } @@ -298,6 +420,29 @@ std::pair<ParseResult, ParseInfo> ParseCode(CFGRebuildState& state, u32 address) break; } case OpCode::Id::BRX: { + auto tmp = TrackBranchIndirectInfo(state, address, offset); + if (tmp) { + auto result = *tmp; + std::vector<CaseBranch> branches{}; + s32 pc_target = offset + result.relative_position; + for (u32 i = 0; i < result.entries; i++) { + auto k = state.locker.ObtainKey(result.buffer, result.offset + i * 4); + if (!k) { + return {ParseResult::AbnormalFlow, parse_info}; + } + u32 value = *k; + u32 target = static_cast<u32>((value >> 3) + pc_target); + insert_label(state, target); + branches.emplace_back(value, target); + } + parse_info.end_address = offset; + parse_info.branch_info = MakeBranchInfo<MultiBranch>( + static_cast<u32>(instr.gpr8.Value()), std::move(branches)); + + return {ParseResult::ControlCaught, parse_info}; + } else { + LOG_WARNING(HW_GPU, "BRX Track Unsuccesful"); + } return {ParseResult::AbnormalFlow, parse_info}; } default: @@ -306,10 +451,13 @@ std::pair<ParseResult, ParseInfo> ParseCode(CFGRebuildState& state, u32 address) offset++; } - parse_info.branch_info.kill = false; - parse_info.branch_info.is_sync = false; - parse_info.branch_info.is_brk = false; + single_branch.kill = false; + single_branch.is_sync = false; + single_branch.is_brk = false; parse_info.end_address = offset - 1; + parse_info.branch_info = MakeBranchInfo<SingleBranch>( + single_branch.condition, single_branch.address, single_branch.kill, single_branch.is_sync, + single_branch.is_brk, single_branch.ignore); return {ParseResult::BlockEnd, parse_info}; } @@ -333,9 +481,10 @@ bool TryInspectAddress(CFGRebuildState& state) { BlockInfo& current_block = state.block_info[block_index]; current_block.end = address - 1; new_block.branch = current_block.branch; - BlockBranchInfo forward_branch{}; - forward_branch.address = address; - forward_branch.ignore = true; + BlockBranchInfo forward_branch = MakeBranchInfo<SingleBranch>(); + const auto branch = std::get_if<SingleBranch>(forward_branch.get()); + branch->address = address; + branch->ignore = true; current_block.branch = forward_branch; return true; } @@ -350,12 +499,15 @@ bool TryInspectAddress(CFGRebuildState& state) { BlockInfo& block_info = CreateBlockInfo(state, address, parse_info.end_address); block_info.branch = parse_info.branch_info; - if (parse_info.branch_info.condition.IsUnconditional()) { + if (std::holds_alternative<SingleBranch>(*block_info.branch)) { + const auto branch = std::get_if<SingleBranch>(block_info.branch.get()); + if (branch->condition.IsUnconditional()) { + return true; + } + const u32 fallthrough_address = parse_info.end_address + 1; + state.inspect_queries.push_front(fallthrough_address); return true; } - - const u32 fallthrough_address = parse_info.end_address + 1; - state.inspect_queries.push_front(fallthrough_address); return true; } @@ -393,31 +545,42 @@ bool TryQuery(CFGRebuildState& state) { state.queries.pop_front(); gather_labels(q2.ssy_stack, state.ssy_labels, block); gather_labels(q2.pbk_stack, state.pbk_labels, block); - if (!block.branch.condition.IsUnconditional()) { - q2.address = block.end + 1; - state.queries.push_back(q2); - } + if (std::holds_alternative<SingleBranch>(*block.branch)) { + const auto branch = std::get_if<SingleBranch>(block.branch.get()); + if (!branch->condition.IsUnconditional()) { + q2.address = block.end + 1; + state.queries.push_back(q2); + } - Query conditional_query{q2}; - if (block.branch.is_sync) { - if (block.branch.address == unassigned_branch) { - block.branch.address = conditional_query.ssy_stack.top(); + Query conditional_query{q2}; + if (branch->is_sync) { + if (branch->address == unassigned_branch) { + branch->address = conditional_query.ssy_stack.top(); + } + conditional_query.ssy_stack.pop(); } - conditional_query.ssy_stack.pop(); - } - if (block.branch.is_brk) { - if (block.branch.address == unassigned_branch) { - block.branch.address = conditional_query.pbk_stack.top(); + if (branch->is_brk) { + if (branch->address == unassigned_branch) { + branch->address = conditional_query.pbk_stack.top(); + } + conditional_query.pbk_stack.pop(); } - conditional_query.pbk_stack.pop(); + conditional_query.address = branch->address; + state.queries.push_back(std::move(conditional_query)); + return true; + } + const auto multi_branch = std::get_if<MultiBranch>(block.branch.get()); + for (const auto& branch_case : multi_branch->branches) { + Query conditional_query{q2}; + conditional_query.address = branch_case.address; + state.queries.push_back(std::move(conditional_query)); } - conditional_query.address = block.branch.address; - state.queries.push_back(std::move(conditional_query)); return true; } + } // Anonymous namespace -void InsertBranch(ASTManager& mm, const BlockBranchInfo& branch) { +void InsertBranch(ASTManager& mm, const BlockBranchInfo& branch_info) { const auto get_expr = ([&](const Condition& cond) -> Expr { Expr result{}; if (cond.cc != ConditionCode::T) { @@ -444,15 +607,24 @@ void InsertBranch(ASTManager& mm, const BlockBranchInfo& branch) { } return MakeExpr<ExprBoolean>(true); }); - if (branch.address < 0) { - if (branch.kill) { - mm.InsertReturn(get_expr(branch.condition), true); + if (std::holds_alternative<SingleBranch>(*branch_info)) { + const auto branch = std::get_if<SingleBranch>(branch_info.get()); + if (branch->address < 0) { + if (branch->kill) { + mm.InsertReturn(get_expr(branch->condition), true); + return; + } + mm.InsertReturn(get_expr(branch->condition), false); return; } - mm.InsertReturn(get_expr(branch.condition), false); + mm.InsertGoto(get_expr(branch->condition), branch->address); return; } - mm.InsertGoto(get_expr(branch.condition), branch.address); + const auto multi_branch = std::get_if<MultiBranch>(branch_info.get()); + for (const auto& branch_case : multi_branch->branches) { + mm.InsertGoto(MakeExpr<ExprGprEqual>(multi_branch->gpr, branch_case.cmp_value), + branch_case.address); + } } void DecompileShader(CFGRebuildState& state) { @@ -464,25 +636,26 @@ void DecompileShader(CFGRebuildState& state) { if (state.labels.count(block.start) != 0) { state.manager->InsertLabel(block.start); } - u32 end = block.branch.ignore ? block.end + 1 : block.end; + const bool ignore = BlockBranchIsIgnored(block.branch); + u32 end = ignore ? block.end + 1 : block.end; state.manager->InsertBlock(block.start, end); - if (!block.branch.ignore) { + if (!ignore) { InsertBranch(*state.manager, block.branch); } } state.manager->Decompile(); } -std::unique_ptr<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code, - std::size_t program_size, u32 start_address, - const CompilerSettings& settings) { +std::unique_ptr<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code, u32 start_address, + const CompilerSettings& settings, + ConstBufferLocker& locker) { auto result_out = std::make_unique<ShaderCharacteristics>(); if (settings.depth == CompileDepth::BruteForce) { result_out->settings.depth = CompileDepth::BruteForce; return result_out; } - CFGRebuildState state{program_code, program_size, start_address}; + CFGRebuildState state{program_code, start_address, locker}; // Inspect Code and generate blocks state.labels.clear(); state.labels.emplace(start_address); @@ -547,11 +720,9 @@ std::unique_ptr<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code, ShaderBlock new_block{}; new_block.start = block.start; new_block.end = block.end; - new_block.ignore_branch = block.branch.ignore; + new_block.ignore_branch = BlockBranchIsIgnored(block.branch); if (!new_block.ignore_branch) { - new_block.branch.cond = block.branch.condition; - new_block.branch.kills = block.branch.kill; - new_block.branch.address = block.branch.address; + new_block.branch = block.branch; } result_out->end = std::max(result_out->end, block.end); result_out->blocks.push_back(new_block); diff --git a/src/video_core/shader/control_flow.h b/src/video_core/shader/control_flow.h index 37e987d62..5304998b9 100644 --- a/src/video_core/shader/control_flow.h +++ b/src/video_core/shader/control_flow.h @@ -7,6 +7,7 @@ #include <list> #include <optional> #include <set> +#include <variant> #include "video_core/engines/shader_bytecode.h" #include "video_core/shader/ast.h" @@ -37,29 +38,61 @@ struct Condition { } }; -struct ShaderBlock { - struct Branch { - Condition cond{}; - bool kills{}; - s32 address{}; +class SingleBranch { +public: + SingleBranch() = default; + SingleBranch(Condition condition, s32 address, bool kill, bool is_sync, bool is_brk, + bool ignore) + : condition{condition}, address{address}, kill{kill}, is_sync{is_sync}, is_brk{is_brk}, + ignore{ignore} {} + + bool operator==(const SingleBranch& b) const { + return std::tie(condition, address, kill, is_sync, is_brk, ignore) == + std::tie(b.condition, b.address, b.kill, b.is_sync, b.is_brk, b.ignore); + } + + bool operator!=(const SingleBranch& b) const { + return !operator==(b); + } + + Condition condition{}; + s32 address{exit_branch}; + bool kill{}; + bool is_sync{}; + bool is_brk{}; + bool ignore{}; +}; - bool operator==(const Branch& b) const { - return std::tie(cond, kills, address) == std::tie(b.cond, b.kills, b.address); - } +struct CaseBranch { + CaseBranch(u32 cmp_value, u32 address) : cmp_value{cmp_value}, address{address} {} + u32 cmp_value; + u32 address; +}; + +class MultiBranch { +public: + MultiBranch(u32 gpr, std::vector<CaseBranch>&& branches) + : gpr{gpr}, branches{std::move(branches)} {} + + u32 gpr{}; + std::vector<CaseBranch> branches{}; +}; + +using BranchData = std::variant<SingleBranch, MultiBranch>; +using BlockBranchInfo = std::shared_ptr<BranchData>; - bool operator!=(const Branch& b) const { - return !operator==(b); - } - }; +bool BlockBranchInfoAreEqual(BlockBranchInfo first, BlockBranchInfo second); +struct ShaderBlock { u32 start{}; u32 end{}; bool ignore_branch{}; - Branch branch{}; + BlockBranchInfo branch{}; bool operator==(const ShaderBlock& sb) const { - return std::tie(start, end, ignore_branch, branch) == - std::tie(sb.start, sb.end, sb.ignore_branch, sb.branch); + return std::tie(start, end, ignore_branch) == + std::tie(sb.start, sb.end, sb.ignore_branch) && + BlockBranchInfoAreEqual(branch, sb.branch); } bool operator!=(const ShaderBlock& sb) const { @@ -76,8 +109,8 @@ struct ShaderCharacteristics { CompilerSettings settings{}; }; -std::unique_ptr<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code, - std::size_t program_size, u32 start_address, - const CompilerSettings& settings); +std::unique_ptr<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code, u32 start_address, + const CompilerSettings& settings, + ConstBufferLocker& locker); } // namespace VideoCommon::Shader diff --git a/src/video_core/shader/decode.cpp b/src/video_core/shader/decode.cpp index 2626b1616..21fb9cb83 100644 --- a/src/video_core/shader/decode.cpp +++ b/src/video_core/shader/decode.cpp @@ -33,7 +33,7 @@ constexpr bool IsSchedInstruction(u32 offset, u32 main_offset) { return (absolute_offset % SchedPeriod) == 0; } -} // namespace +} // Anonymous namespace class ASTDecoder { public: @@ -102,7 +102,7 @@ void ShaderIR::Decode() { std::memcpy(&header, program_code.data(), sizeof(Tegra::Shader::Header)); decompiled = false; - auto info = ScanFlow(program_code, program_size, main_offset, settings); + auto info = ScanFlow(program_code, main_offset, settings, locker); auto& shader_info = *info; coverage_begin = shader_info.start; coverage_end = shader_info.end; @@ -155,7 +155,7 @@ void ShaderIR::Decode() { [[fallthrough]]; case CompileDepth::BruteForce: { coverage_begin = main_offset; - const u32 shader_end = static_cast<u32>(program_size / sizeof(u64)); + const std::size_t shader_end = program_code.size(); coverage_end = shader_end; for (u32 label = main_offset; label < shader_end; label++) { basic_blocks.insert({label, DecodeRange(label, label + 1)}); @@ -198,24 +198,39 @@ void ShaderIR::InsertControlFlow(NodeBlock& bb, const ShaderBlock& block) { } return result; }; - if (block.branch.address < 0) { - if (block.branch.kills) { - Node n = Operation(OperationCode::Discard); - n = apply_conditions(block.branch.cond, n); + if (std::holds_alternative<SingleBranch>(*block.branch)) { + auto branch = std::get_if<SingleBranch>(block.branch.get()); + if (branch->address < 0) { + if (branch->kill) { + Node n = Operation(OperationCode::Discard); + n = apply_conditions(branch->condition, n); + bb.push_back(n); + global_code.push_back(n); + return; + } + Node n = Operation(OperationCode::Exit); + n = apply_conditions(branch->condition, n); bb.push_back(n); global_code.push_back(n); return; } - Node n = Operation(OperationCode::Exit); - n = apply_conditions(block.branch.cond, n); + Node n = Operation(OperationCode::Branch, Immediate(branch->address)); + n = apply_conditions(branch->condition, n); bb.push_back(n); global_code.push_back(n); return; } - Node n = Operation(OperationCode::Branch, Immediate(block.branch.address)); - n = apply_conditions(block.branch.cond, n); - bb.push_back(n); - global_code.push_back(n); + auto multi_branch = std::get_if<MultiBranch>(block.branch.get()); + Node op_a = GetRegister(multi_branch->gpr); + for (auto& branch_case : multi_branch->branches) { + Node n = Operation(OperationCode::Branch, Immediate(branch_case.address)); + Node op_b = Immediate(branch_case.cmp_value); + Node condition = + GetPredicateComparisonInteger(Tegra::Shader::PredCondition::Equal, false, op_a, op_b); + auto result = Conditional(condition, {n}); + bb.push_back(result); + global_code.push_back(result); + } } u32 ShaderIR::DecodeInstr(NodeBlock& bb, u32 pc) { diff --git a/src/video_core/shader/decode/arithmetic_integer.cpp b/src/video_core/shader/decode/arithmetic_integer.cpp index b73f6536e..a33d242e9 100644 --- a/src/video_core/shader/decode/arithmetic_integer.cpp +++ b/src/video_core/shader/decode/arithmetic_integer.cpp @@ -144,7 +144,7 @@ u32 ShaderIR::DecodeArithmeticInteger(NodeBlock& bb, u32 pc) { case OpCode::Id::ICMP_IMM: { const Node zero = Immediate(0); - const auto [op_b, test] = [&]() -> std::pair<Node, Node> { + const auto [op_rhs, test] = [&]() -> std::pair<Node, Node> { switch (opcode->get().GetId()) { case OpCode::Id::ICMP_CR: return {GetConstBuffer(instr.cbuf34.index, instr.cbuf34.offset), @@ -161,10 +161,10 @@ u32 ShaderIR::DecodeArithmeticInteger(NodeBlock& bb, u32 pc) { return {zero, zero}; } }(); - const Node op_a = GetRegister(instr.gpr8); + const Node op_lhs = GetRegister(instr.gpr8); const Node comparison = GetPredicateComparisonInteger(instr.icmp.cond, instr.icmp.is_signed != 0, test, zero); - SetRegister(bb, instr.gpr0, Operation(OperationCode::Select, comparison, op_a, op_b)); + SetRegister(bb, instr.gpr0, Operation(OperationCode::Select, comparison, op_lhs, op_rhs)); break; } case OpCode::Id::LOP_C: diff --git a/src/video_core/shader/decode/image.cpp b/src/video_core/shader/decode/image.cpp index 95ec1cdd9..b02d2cb95 100644 --- a/src/video_core/shader/decode/image.cpp +++ b/src/video_core/shader/decode/image.cpp @@ -144,8 +144,8 @@ u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) { Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type) { const auto offset{static_cast<std::size_t>(image.index.Value())}; - if (const auto image = TryUseExistingImage(offset, type)) { - return *image; + if (const auto existing_image = TryUseExistingImage(offset, type)) { + return *existing_image; } const std::size_t next_index{used_images.size()}; diff --git a/src/video_core/shader/decode/other.cpp b/src/video_core/shader/decode/other.cpp index d46e0f823..116b95f76 100644 --- a/src/video_core/shader/decode/other.cpp +++ b/src/video_core/shader/decode/other.cpp @@ -67,7 +67,7 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) { break; } case OpCode::Id::MOV_SYS: { - const Node value = [&]() { + const Node value = [this, instr] { switch (instr.sys20) { case SystemVariable::Ydirection: return Operation(OperationCode::YNegate); diff --git a/src/video_core/shader/decode/shift.cpp b/src/video_core/shader/decode/shift.cpp index f6ee68a54..d419e9c45 100644 --- a/src/video_core/shader/decode/shift.cpp +++ b/src/video_core/shader/decode/shift.cpp @@ -18,7 +18,7 @@ u32 ShaderIR::DecodeShift(NodeBlock& bb, u32 pc) { const auto opcode = OpCode::Decode(instr); Node op_a = GetRegister(instr.gpr8); - Node op_b = [&]() { + Node op_b = [this, instr] { if (instr.is_b_imm) { return Immediate(instr.alu.GetSignedImm20_20()); } else if (instr.is_b_gpr) { diff --git a/src/video_core/shader/decode/texture.cpp b/src/video_core/shader/decode/texture.cpp index 0b934a069..d61e656b7 100644 --- a/src/video_core/shader/decode/texture.cpp +++ b/src/video_core/shader/decode/texture.cpp @@ -141,7 +141,7 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { const Node component = Immediate(static_cast<u32>(instr.tld4s.component)); const auto& sampler = - GetSampler(instr.sampler, TextureType::Texture2D, false, depth_compare); + GetSampler(instr.sampler, {{TextureType::Texture2D, false, depth_compare}}); Node4 values; for (u32 element = 0; element < values.size(); ++element) { @@ -150,7 +150,7 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy)); } - WriteTexsInstructionFloat(bb, instr, values); + WriteTexsInstructionFloat(bb, instr, values, true); break; } case OpCode::Id::TXQ_B: @@ -165,10 +165,7 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { // Sadly, not all texture instructions specify the type of texture their sampler // uses. This must be fixed at a later instance. const auto& sampler = - is_bindless - ? GetBindlessSampler(instr.gpr8, Tegra::Shader::TextureType::Texture2D, false, - false) - : GetSampler(instr.sampler, Tegra::Shader::TextureType::Texture2D, false, false); + is_bindless ? GetBindlessSampler(instr.gpr8, {}) : GetSampler(instr.sampler, {}); u32 indexer = 0; switch (instr.txq.query_type) { @@ -207,9 +204,9 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { auto texture_type = instr.tmml.texture_type.Value(); const bool is_array = instr.tmml.array != 0; - const auto& sampler = is_bindless - ? GetBindlessSampler(instr.gpr20, texture_type, is_array, false) - : GetSampler(instr.sampler, texture_type, is_array, false); + const auto& sampler = + is_bindless ? GetBindlessSampler(instr.gpr20, {{texture_type, is_array, false}}) + : GetSampler(instr.sampler, {{texture_type, is_array, false}}); std::vector<Node> coords; @@ -285,9 +282,26 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) { return pc; } -const Sampler& ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, TextureType type, - bool is_array, bool is_shadow) { - const auto offset = static_cast<std::size_t>(sampler.index.Value()); +const Sampler& ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, + std::optional<SamplerInfo> sampler_info) { + const auto offset = static_cast<u32>(sampler.index.Value()); + + Tegra::Shader::TextureType type; + bool is_array; + bool is_shadow; + if (sampler_info) { + type = sampler_info->type; + is_array = sampler_info->is_array; + is_shadow = sampler_info->is_shadow; + } else if (auto sampler = locker.ObtainBoundSampler(offset); sampler) { + type = sampler->texture_type.Value(); + is_array = sampler->is_array.Value() != 0; + is_shadow = sampler->is_shadow.Value() != 0; + } else { + type = Tegra::Shader::TextureType::Texture2D; + is_array = false; + is_shadow = false; + } // If this sampler has already been used, return the existing mapping. const auto itr = @@ -303,15 +317,31 @@ const Sampler& ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, Textu const std::size_t next_index = used_samplers.size(); const Sampler entry{offset, next_index, type, is_array, is_shadow}; return *used_samplers.emplace(entry).first; -} +} // namespace VideoCommon::Shader -const Sampler& ShaderIR::GetBindlessSampler(const Tegra::Shader::Register& reg, TextureType type, - bool is_array, bool is_shadow) { +const Sampler& ShaderIR::GetBindlessSampler(const Tegra::Shader::Register& reg, + std::optional<SamplerInfo> sampler_info) { const Node sampler_register = GetRegister(reg); const auto [base_sampler, cbuf_index, cbuf_offset] = TrackCbuf(sampler_register, global_code, static_cast<s64>(global_code.size())); ASSERT(base_sampler != nullptr); const auto cbuf_key = (static_cast<u64>(cbuf_index) << 32) | static_cast<u64>(cbuf_offset); + Tegra::Shader::TextureType type; + bool is_array; + bool is_shadow; + if (sampler_info) { + type = sampler_info->type; + is_array = sampler_info->is_array; + is_shadow = sampler_info->is_shadow; + } else if (auto sampler = locker.ObtainBindlessSampler(cbuf_index, cbuf_offset); sampler) { + type = sampler->texture_type.Value(); + is_array = sampler->is_array.Value() != 0; + is_shadow = sampler->is_shadow.Value() != 0; + } else { + type = Tegra::Shader::TextureType::Texture2D; + is_array = false; + is_shadow = false; + } // If this sampler has already been used, return the existing mapping. const auto itr = @@ -344,14 +374,14 @@ void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const } } -void ShaderIR::WriteTexsInstructionFloat(NodeBlock& bb, Instruction instr, - const Node4& components) { +void ShaderIR::WriteTexsInstructionFloat(NodeBlock& bb, Instruction instr, const Node4& components, + bool ignore_mask) { // TEXS has two destination registers and a swizzle. The first two elements in the swizzle // go into gpr0+0 and gpr0+1, and the rest goes into gpr28+0 and gpr28+1 u32 dest_elem = 0; for (u32 component = 0; component < 4; ++component) { - if (!instr.texs.IsComponentEnabled(component)) + if (!instr.texs.IsComponentEnabled(component) && !ignore_mask) continue; SetTemporary(bb, dest_elem++, components[component]); } @@ -411,9 +441,9 @@ Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type, (texture_type == TextureType::TextureCube && is_array && is_shadow), "This method is not supported."); - const auto& sampler = is_bindless - ? GetBindlessSampler(*bindless_reg, texture_type, is_array, is_shadow) - : GetSampler(instr.sampler, texture_type, is_array, is_shadow); + const auto& sampler = + is_bindless ? GetBindlessSampler(*bindless_reg, {{texture_type, is_array, is_shadow}}) + : GetSampler(instr.sampler, {{texture_type, is_array, is_shadow}}); const bool lod_needed = process_mode == TextureProcessMode::LZ || process_mode == TextureProcessMode::LL || @@ -577,7 +607,7 @@ Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool de dc = GetRegister(parameter_register++); } - const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, depth_compare); + const auto& sampler = GetSampler(instr.sampler, {{texture_type, is_array, depth_compare}}); Node4 values; for (u32 element = 0; element < values.size(); ++element) { @@ -610,7 +640,7 @@ Node4 ShaderIR::GetTldCode(Tegra::Shader::Instruction instr) { // const Node aoffi_register{is_aoffi ? GetRegister(gpr20_cursor++) : nullptr}; // const Node multisample{is_multisample ? GetRegister(gpr20_cursor++) : nullptr}; - const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false); + const auto& sampler = GetSampler(instr.sampler, {{texture_type, is_array, false}}); Node4 values; for (u32 element = 0; element < values.size(); ++element) { @@ -646,7 +676,7 @@ Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is // When lod is used always is in gpr20 const Node lod = lod_enabled ? GetRegister(instr.gpr20) : Immediate(0); - const auto& sampler = GetSampler(instr.sampler, texture_type, is_array, false); + const auto& sampler = GetSampler(instr.sampler, {{texture_type, is_array, false}}); Node4 values; for (u32 element = 0; element < values.size(); ++element) { diff --git a/src/video_core/shader/decode/video.cpp b/src/video_core/shader/decode/video.cpp index 97fc6f9b1..b047cf870 100644 --- a/src/video_core/shader/decode/video.cpp +++ b/src/video_core/shader/decode/video.cpp @@ -23,7 +23,7 @@ u32 ShaderIR::DecodeVideo(NodeBlock& bb, u32 pc) { const Node op_a = GetVideoOperand(GetRegister(instr.gpr8), instr.video.is_byte_chunk_a, instr.video.signed_a, instr.video.type_a, instr.video.byte_height_a); - const Node op_b = [&]() { + const Node op_b = [this, instr] { if (instr.video.use_register_b) { return GetVideoOperand(GetRegister(instr.gpr20), instr.video.is_byte_chunk_b, instr.video.signed_b, instr.video.type_b, diff --git a/src/video_core/shader/decode/warp.cpp b/src/video_core/shader/decode/warp.cpp index a8e481b3c..fa8a250cc 100644 --- a/src/video_core/shader/decode/warp.cpp +++ b/src/video_core/shader/decode/warp.cpp @@ -46,9 +46,10 @@ u32 ShaderIR::DecodeWarp(NodeBlock& bb, u32 pc) { break; } case OpCode::Id::SHFL: { - Node mask = instr.shfl.is_mask_imm ? Immediate(static_cast<u32>(instr.shfl.mask_imm)) - : GetRegister(instr.gpr39); - Node width = [&] { + Node width = [this, instr] { + Node mask = instr.shfl.is_mask_imm ? Immediate(static_cast<u32>(instr.shfl.mask_imm)) + : GetRegister(instr.gpr39); + // Convert the obscure SHFL mask back into GL_NV_shader_thread_shuffle's width. This has // been done reversing Nvidia's math. It won't work on all cases due to SHFL having // different parameters that don't properly map to GLSL's interface, but it should work diff --git a/src/video_core/shader/expr.h b/src/video_core/shader/expr.h index d3dcd00ec..4e8264367 100644 --- a/src/video_core/shader/expr.h +++ b/src/video_core/shader/expr.h @@ -17,13 +17,14 @@ using Tegra::Shader::Pred; class ExprAnd; class ExprBoolean; class ExprCondCode; +class ExprGprEqual; class ExprNot; class ExprOr; class ExprPredicate; class ExprVar; -using ExprData = - std::variant<ExprVar, ExprCondCode, ExprPredicate, ExprNot, ExprOr, ExprAnd, ExprBoolean>; +using ExprData = std::variant<ExprVar, ExprCondCode, ExprPredicate, ExprNot, ExprOr, ExprAnd, + ExprBoolean, ExprGprEqual>; using Expr = std::shared_ptr<ExprData>; class ExprAnd final { @@ -118,6 +119,22 @@ public: bool value; }; +class ExprGprEqual final { +public: + ExprGprEqual(u32 gpr, u32 value) : gpr{gpr}, value{value} {} + + bool operator==(const ExprGprEqual& b) const { + return gpr == b.gpr && value == b.value; + } + + bool operator!=(const ExprGprEqual& b) const { + return !operator==(b); + } + + u32 gpr; + u32 value; +}; + template <typename T, typename... Args> Expr MakeExpr(Args&&... args) { static_assert(std::is_convertible_v<T, ExprData>); diff --git a/src/video_core/shader/shader_ir.cpp b/src/video_core/shader/shader_ir.cpp index c1f2b88c8..1d9825c76 100644 --- a/src/video_core/shader/shader_ir.cpp +++ b/src/video_core/shader/shader_ir.cpp @@ -2,8 +2,9 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include <algorithm> +#include <array> #include <cmath> -#include <unordered_map> #include "common/assert.h" #include "common/common_types.h" @@ -22,10 +23,9 @@ using Tegra::Shader::PredCondition; using Tegra::Shader::PredOperation; using Tegra::Shader::Register; -ShaderIR::ShaderIR(const ProgramCode& program_code, u32 main_offset, const std::size_t size, - CompilerSettings settings) - : program_code{program_code}, main_offset{main_offset}, program_size{size}, basic_blocks{}, - program_manager{true, true}, settings{settings} { +ShaderIR::ShaderIR(const ProgramCode& program_code, u32 main_offset, CompilerSettings settings, + ConstBufferLocker& locker) + : program_code{program_code}, main_offset{main_offset}, settings{settings}, locker{locker} { Decode(); } @@ -271,21 +271,24 @@ Node ShaderIR::GetSaturatedHalfFloat(Node value, bool saturate) { } Node ShaderIR::GetPredicateComparisonFloat(PredCondition condition, Node op_a, Node op_b) { - const std::unordered_map<PredCondition, OperationCode> PredicateComparisonTable = { - {PredCondition::LessThan, OperationCode::LogicalFLessThan}, - {PredCondition::Equal, OperationCode::LogicalFEqual}, - {PredCondition::LessEqual, OperationCode::LogicalFLessEqual}, - {PredCondition::GreaterThan, OperationCode::LogicalFGreaterThan}, - {PredCondition::NotEqual, OperationCode::LogicalFNotEqual}, - {PredCondition::GreaterEqual, OperationCode::LogicalFGreaterEqual}, - {PredCondition::LessThanWithNan, OperationCode::LogicalFLessThan}, - {PredCondition::NotEqualWithNan, OperationCode::LogicalFNotEqual}, - {PredCondition::LessEqualWithNan, OperationCode::LogicalFLessEqual}, - {PredCondition::GreaterThanWithNan, OperationCode::LogicalFGreaterThan}, - {PredCondition::GreaterEqualWithNan, OperationCode::LogicalFGreaterEqual}}; - - const auto comparison{PredicateComparisonTable.find(condition)}; - UNIMPLEMENTED_IF_MSG(comparison == PredicateComparisonTable.end(), + static constexpr std::array comparison_table{ + std::pair{PredCondition::LessThan, OperationCode::LogicalFLessThan}, + std::pair{PredCondition::Equal, OperationCode::LogicalFEqual}, + std::pair{PredCondition::LessEqual, OperationCode::LogicalFLessEqual}, + std::pair{PredCondition::GreaterThan, OperationCode::LogicalFGreaterThan}, + std::pair{PredCondition::NotEqual, OperationCode::LogicalFNotEqual}, + std::pair{PredCondition::GreaterEqual, OperationCode::LogicalFGreaterEqual}, + std::pair{PredCondition::LessThanWithNan, OperationCode::LogicalFLessThan}, + std::pair{PredCondition::NotEqualWithNan, OperationCode::LogicalFNotEqual}, + std::pair{PredCondition::LessEqualWithNan, OperationCode::LogicalFLessEqual}, + std::pair{PredCondition::GreaterThanWithNan, OperationCode::LogicalFGreaterThan}, + std::pair{PredCondition::GreaterEqualWithNan, OperationCode::LogicalFGreaterEqual}, + }; + + const auto comparison = + std::find_if(comparison_table.cbegin(), comparison_table.cend(), + [condition](const auto entry) { return condition == entry.first; }); + UNIMPLEMENTED_IF_MSG(comparison == comparison_table.cend(), "Unknown predicate comparison operation"); Node predicate = Operation(comparison->second, NO_PRECISE, op_a, op_b); @@ -306,21 +309,24 @@ Node ShaderIR::GetPredicateComparisonFloat(PredCondition condition, Node op_a, N Node ShaderIR::GetPredicateComparisonInteger(PredCondition condition, bool is_signed, Node op_a, Node op_b) { - const std::unordered_map<PredCondition, OperationCode> PredicateComparisonTable = { - {PredCondition::LessThan, OperationCode::LogicalILessThan}, - {PredCondition::Equal, OperationCode::LogicalIEqual}, - {PredCondition::LessEqual, OperationCode::LogicalILessEqual}, - {PredCondition::GreaterThan, OperationCode::LogicalIGreaterThan}, - {PredCondition::NotEqual, OperationCode::LogicalINotEqual}, - {PredCondition::GreaterEqual, OperationCode::LogicalIGreaterEqual}, - {PredCondition::LessThanWithNan, OperationCode::LogicalILessThan}, - {PredCondition::NotEqualWithNan, OperationCode::LogicalINotEqual}, - {PredCondition::LessEqualWithNan, OperationCode::LogicalILessEqual}, - {PredCondition::GreaterThanWithNan, OperationCode::LogicalIGreaterThan}, - {PredCondition::GreaterEqualWithNan, OperationCode::LogicalIGreaterEqual}}; - - const auto comparison{PredicateComparisonTable.find(condition)}; - UNIMPLEMENTED_IF_MSG(comparison == PredicateComparisonTable.end(), + static constexpr std::array comparison_table{ + std::pair{PredCondition::LessThan, OperationCode::LogicalILessThan}, + std::pair{PredCondition::Equal, OperationCode::LogicalIEqual}, + std::pair{PredCondition::LessEqual, OperationCode::LogicalILessEqual}, + std::pair{PredCondition::GreaterThan, OperationCode::LogicalIGreaterThan}, + std::pair{PredCondition::NotEqual, OperationCode::LogicalINotEqual}, + std::pair{PredCondition::GreaterEqual, OperationCode::LogicalIGreaterEqual}, + std::pair{PredCondition::LessThanWithNan, OperationCode::LogicalILessThan}, + std::pair{PredCondition::NotEqualWithNan, OperationCode::LogicalINotEqual}, + std::pair{PredCondition::LessEqualWithNan, OperationCode::LogicalILessEqual}, + std::pair{PredCondition::GreaterThanWithNan, OperationCode::LogicalIGreaterThan}, + std::pair{PredCondition::GreaterEqualWithNan, OperationCode::LogicalIGreaterEqual}, + }; + + const auto comparison = + std::find_if(comparison_table.cbegin(), comparison_table.cend(), + [condition](const auto entry) { return condition == entry.first; }); + UNIMPLEMENTED_IF_MSG(comparison == comparison_table.cend(), "Unknown predicate comparison operation"); Node predicate = SignedOperation(comparison->second, is_signed, NO_PRECISE, std::move(op_a), @@ -337,36 +343,43 @@ Node ShaderIR::GetPredicateComparisonInteger(PredCondition condition, bool is_si Node ShaderIR::GetPredicateComparisonHalf(Tegra::Shader::PredCondition condition, Node op_a, Node op_b) { - const std::unordered_map<PredCondition, OperationCode> PredicateComparisonTable = { - {PredCondition::LessThan, OperationCode::Logical2HLessThan}, - {PredCondition::Equal, OperationCode::Logical2HEqual}, - {PredCondition::LessEqual, OperationCode::Logical2HLessEqual}, - {PredCondition::GreaterThan, OperationCode::Logical2HGreaterThan}, - {PredCondition::NotEqual, OperationCode::Logical2HNotEqual}, - {PredCondition::GreaterEqual, OperationCode::Logical2HGreaterEqual}, - {PredCondition::LessThanWithNan, OperationCode::Logical2HLessThanWithNan}, - {PredCondition::NotEqualWithNan, OperationCode::Logical2HNotEqualWithNan}, - {PredCondition::LessEqualWithNan, OperationCode::Logical2HLessEqualWithNan}, - {PredCondition::GreaterThanWithNan, OperationCode::Logical2HGreaterThanWithNan}, - {PredCondition::GreaterEqualWithNan, OperationCode::Logical2HGreaterEqualWithNan}}; - - const auto comparison{PredicateComparisonTable.find(condition)}; - UNIMPLEMENTED_IF_MSG(comparison == PredicateComparisonTable.end(), + static constexpr std::array comparison_table{ + std::pair{PredCondition::LessThan, OperationCode::Logical2HLessThan}, + std::pair{PredCondition::Equal, OperationCode::Logical2HEqual}, + std::pair{PredCondition::LessEqual, OperationCode::Logical2HLessEqual}, + std::pair{PredCondition::GreaterThan, OperationCode::Logical2HGreaterThan}, + std::pair{PredCondition::NotEqual, OperationCode::Logical2HNotEqual}, + std::pair{PredCondition::GreaterEqual, OperationCode::Logical2HGreaterEqual}, + std::pair{PredCondition::LessThanWithNan, OperationCode::Logical2HLessThanWithNan}, + std::pair{PredCondition::NotEqualWithNan, OperationCode::Logical2HNotEqualWithNan}, + std::pair{PredCondition::LessEqualWithNan, OperationCode::Logical2HLessEqualWithNan}, + std::pair{PredCondition::GreaterThanWithNan, OperationCode::Logical2HGreaterThanWithNan}, + std::pair{PredCondition::GreaterEqualWithNan, OperationCode::Logical2HGreaterEqualWithNan}, + }; + + const auto comparison = + std::find_if(comparison_table.cbegin(), comparison_table.cend(), + [condition](const auto entry) { return condition == entry.first; }); + UNIMPLEMENTED_IF_MSG(comparison == comparison_table.cend(), "Unknown predicate comparison operation"); return Operation(comparison->second, NO_PRECISE, std::move(op_a), std::move(op_b)); } OperationCode ShaderIR::GetPredicateCombiner(PredOperation operation) { - const std::unordered_map<PredOperation, OperationCode> PredicateOperationTable = { - {PredOperation::And, OperationCode::LogicalAnd}, - {PredOperation::Or, OperationCode::LogicalOr}, - {PredOperation::Xor, OperationCode::LogicalXor}, + static constexpr std::array operation_table{ + OperationCode::LogicalAnd, + OperationCode::LogicalOr, + OperationCode::LogicalXor, }; - const auto op = PredicateOperationTable.find(operation); - UNIMPLEMENTED_IF_MSG(op == PredicateOperationTable.end(), "Unknown predicate operation"); - return op->second; + const auto index = static_cast<std::size_t>(operation); + if (index >= operation_table.size()) { + UNIMPLEMENTED_MSG("Unknown predicate operation."); + return {}; + } + + return operation_table[index]; } Node ShaderIR::GetConditionCode(Tegra::Shader::ConditionCode cc) const { diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h index 91cd0a534..1fd44bde1 100644 --- a/src/video_core/shader/shader_ir.h +++ b/src/video_core/shader/shader_ir.h @@ -17,6 +17,7 @@ #include "video_core/engines/shader_header.h" #include "video_core/shader/ast.h" #include "video_core/shader/compiler_settings.h" +#include "video_core/shader/const_buffer_locker.h" #include "video_core/shader/node.h" namespace VideoCommon::Shader { @@ -66,8 +67,8 @@ struct GlobalMemoryUsage { class ShaderIR final { public: - explicit ShaderIR(const ProgramCode& program_code, u32 main_offset, std::size_t size, - CompilerSettings settings); + explicit ShaderIR(const ProgramCode& program_code, u32 main_offset, CompilerSettings settings, + ConstBufferLocker& locker); ~ShaderIR(); const std::map<u32, NodeBlock>& GetBasicBlocks() const { @@ -172,6 +173,13 @@ public: private: friend class ASTDecoder; + + struct SamplerInfo { + Tegra::Shader::TextureType type; + bool is_array; + bool is_shadow; + }; + void Decode(); NodeBlock DecodeRange(u32 begin, u32 end); @@ -296,12 +304,11 @@ private: /// Accesses a texture sampler const Sampler& GetSampler(const Tegra::Shader::Sampler& sampler, - Tegra::Shader::TextureType type, bool is_array, bool is_shadow); + std::optional<SamplerInfo> sampler_info); // Accesses a texture sampler for a bindless texture. const Sampler& GetBindlessSampler(const Tegra::Shader::Register& reg, - Tegra::Shader::TextureType type, bool is_array, - bool is_shadow); + std::optional<SamplerInfo> sampler_info); /// Accesses an image. Image& GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type); @@ -322,7 +329,7 @@ private: const Node4& components); void WriteTexsInstructionFloat(NodeBlock& bb, Tegra::Shader::Instruction instr, - const Node4& components); + const Node4& components, bool ignore_mask = false); void WriteTexsInstructionHalfFloat(NodeBlock& bb, Tegra::Shader::Instruction instr, const Node4& components); @@ -377,7 +384,9 @@ private: const ProgramCode& program_code; const u32 main_offset; - const std::size_t program_size; + const CompilerSettings settings; + ConstBufferLocker& locker; + bool decompiled{}; bool disable_flow_stack{}; @@ -386,8 +395,7 @@ private: std::map<u32, NodeBlock> basic_blocks; NodeBlock global_code; - ASTManager program_manager; - CompilerSettings settings{}; + ASTManager program_manager{true, true}; std::set<u32> used_registers; std::set<Tegra::Shader::Pred> used_predicates; diff --git a/src/video_core/surface.cpp b/src/video_core/surface.cpp index 9a3c05288..621136b6e 100644 --- a/src/video_core/surface.cpp +++ b/src/video_core/surface.cpp @@ -315,6 +315,14 @@ PixelFormat PixelFormatFromTextureFormat(Tegra::Texture::TextureFormat format, break; } break; + case Tegra::Texture::TextureFormat::E5B9G9R9_SHAREDEXP: + switch (component_type) { + case Tegra::Texture::ComponentType::FLOAT: + return PixelFormat::E5B9G9R9F; + default: + break; + } + break; case Tegra::Texture::TextureFormat::ZF32: return PixelFormat::Z32F; case Tegra::Texture::TextureFormat::Z16: diff --git a/src/video_core/surface.h b/src/video_core/surface.h index 97668f802..d3bcd38c5 100644 --- a/src/video_core/surface.h +++ b/src/video_core/surface.h @@ -86,19 +86,20 @@ enum class PixelFormat { ASTC_2D_8X6_SRGB = 68, ASTC_2D_6X5 = 69, ASTC_2D_6X5_SRGB = 70, + E5B9G9R9F = 71, MaxColorFormat, // Depth formats - Z32F = 71, - Z16 = 72, + Z32F = 72, + Z16 = 73, MaxDepthFormat, // DepthStencil formats - Z24S8 = 73, - S8Z24 = 74, - Z32FS8 = 75, + Z24S8 = 74, + S8Z24 = 75, + Z32FS8 = 76, MaxDepthStencilFormat, @@ -207,6 +208,7 @@ constexpr std::array<u32, MaxPixelFormat> compression_factor_shift_table = {{ 2, // ASTC_2D_8X6_SRGB 2, // ASTC_2D_6X5 2, // ASTC_2D_6X5_SRGB + 0, // E5B9G9R9F 0, // Z32F 0, // Z16 0, // Z24S8 @@ -302,6 +304,7 @@ constexpr std::array<u32, MaxPixelFormat> block_width_table = {{ 8, // ASTC_2D_8X6_SRGB 6, // ASTC_2D_6X5 6, // ASTC_2D_6X5_SRGB + 1, // E5B9G9R9F 1, // Z32F 1, // Z16 1, // Z24S8 @@ -389,6 +392,7 @@ constexpr std::array<u32, MaxPixelFormat> block_height_table = {{ 6, // ASTC_2D_8X6_SRGB 5, // ASTC_2D_6X5 5, // ASTC_2D_6X5_SRGB + 1, // E5B9G9R9F 1, // Z32F 1, // Z16 1, // Z24S8 @@ -476,6 +480,7 @@ constexpr std::array<u32, MaxPixelFormat> bpp_table = {{ 128, // ASTC_2D_8X6_SRGB 128, // ASTC_2D_6X5 128, // ASTC_2D_6X5_SRGB + 32, // E5B9G9R9F 32, // Z32F 16, // Z16 32, // Z24S8 @@ -578,6 +583,7 @@ constexpr std::array<SurfaceCompression, MaxPixelFormat> compression_type_table SurfaceCompression::Converted, // ASTC_2D_8X6_SRGB SurfaceCompression::Converted, // ASTC_2D_6X5 SurfaceCompression::Converted, // ASTC_2D_6X5_SRGB + SurfaceCompression::None, // E5B9G9R9F SurfaceCompression::None, // Z32F SurfaceCompression::None, // Z16 SurfaceCompression::None, // Z24S8 diff --git a/src/video_core/textures/astc.cpp b/src/video_core/textures/astc.cpp index a9b8f69af..58b608a36 100644 --- a/src/video_core/textures/astc.cpp +++ b/src/video_core/textures/astc.cpp @@ -422,7 +422,7 @@ static TexelWeightParams DecodeBlockInfo(InputBitStream& strm) { TexelWeightParams params; // Read the entire block mode all at once - uint16_t modeBits = strm.ReadBits(11); + uint16_t modeBits = static_cast<uint16_t>(strm.ReadBits(11)); // Does this match the void extent block mode? if ((modeBits & 0x01FF) == 0x1FC) { @@ -625,10 +625,10 @@ static void FillVoidExtentLDR(InputBitStream& strm, uint32_t* const outBuf, uint } // Decode the RGBA components and renormalize them to the range [0, 255] - uint16_t r = strm.ReadBits(16); - uint16_t g = strm.ReadBits(16); - uint16_t b = strm.ReadBits(16); - uint16_t a = strm.ReadBits(16); + uint16_t r = static_cast<uint16_t>(strm.ReadBits(16)); + uint16_t g = static_cast<uint16_t>(strm.ReadBits(16)); + uint16_t b = static_cast<uint16_t>(strm.ReadBits(16)); + uint16_t a = static_cast<uint16_t>(strm.ReadBits(16)); uint32_t rgba = (r >> 8) | (g & 0xFF00) | (static_cast<uint32_t>(b) & 0xFF00) << 8 | (static_cast<uint32_t>(a) & 0xFF00) << 16; @@ -681,9 +681,10 @@ protected: public: Pixel() = default; - Pixel(ChannelType a, ChannelType r, ChannelType g, ChannelType b, unsigned bitDepth = 8) + Pixel(uint32_t a, uint32_t r, uint32_t g, uint32_t b, unsigned bitDepth = 8) : m_BitDepth{uint8_t(bitDepth), uint8_t(bitDepth), uint8_t(bitDepth), uint8_t(bitDepth)}, - color{a, r, g, b} {} + color{static_cast<ChannelType>(a), static_cast<ChannelType>(r), + static_cast<ChannelType>(g), static_cast<ChannelType>(b)} {} // Changes the depth of each pixel. This scales the values to // the appropriate bit depth by either truncating the least diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp index cd8180f8b..c5b9aa08f 100644 --- a/src/yuzu/debugger/wait_tree.cpp +++ b/src/yuzu/debugger/wait_tree.cpp @@ -66,10 +66,7 @@ std::vector<std::unique_ptr<WaitTreeThread>> WaitTreeItem::MakeThreadItemList() }; const auto& system = Core::System::GetInstance(); - add_threads(system.Scheduler(0).GetThreadList()); - add_threads(system.Scheduler(1).GetThreadList()); - add_threads(system.Scheduler(2).GetThreadList()); - add_threads(system.Scheduler(3).GetThreadList()); + add_threads(system.GlobalScheduler().GetThreadList()); return item_list; } |