diff options
Diffstat (limited to 'src/video_core')
22 files changed, 276 insertions, 154 deletions
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h index ef1618990..c97eeb792 100644 --- a/src/video_core/engines/maxwell_3d.h +++ b/src/video_core/engines/maxwell_3d.h @@ -647,7 +647,7 @@ public: GetX() + GetWidth(), // right GetY() // bottom }; - }; + } f32 GetX() const { return std::max(0.0f, translate_x - std::fabs(scale_x)); diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp index a2d3d7823..e88290754 100644 --- a/src/video_core/engines/maxwell_dma.cpp +++ b/src/video_core/engines/maxwell_dma.cpp @@ -94,7 +94,8 @@ void MaxwellDMA::CopyPitchToPitch() { } void MaxwellDMA::CopyBlockLinearToPitch() { - ASSERT(regs.src_params.block_size.depth == 0); + UNIMPLEMENTED_IF(regs.src_params.block_size.depth != 0); + UNIMPLEMENTED_IF(regs.src_params.layer != 0); // Optimized path for micro copies. const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count; @@ -123,17 +124,12 @@ void MaxwellDMA::CopyBlockLinearToPitch() { write_buffer.resize(dst_size); } - if (Settings::IsGPULevelExtreme()) { - memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size); - memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size); - } else { - memory_manager.ReadBlockUnsafe(regs.offset_in, read_buffer.data(), src_size); - memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size); - } + memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size); + memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size); UnswizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_out, width, bytes_per_pixel, - read_buffer.data() + src_layer_size * src_params.layer, write_buffer.data(), - block_height, src_params.origin.x, src_params.origin.y); + block_height, src_params.origin.x, src_params.origin.y, write_buffer.data(), + read_buffer.data()); memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size); } @@ -198,7 +194,6 @@ void MaxwellDMA::FastCopyBlockLinearToPitch() { if (read_buffer.size() < src_size) { read_buffer.resize(src_size); } - if (write_buffer.size() < dst_size) { write_buffer.resize(dst_size); } @@ -212,8 +207,8 @@ void MaxwellDMA::FastCopyBlockLinearToPitch() { } UnswizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_out, regs.src_params.width, - bytes_per_pixel, read_buffer.data(), write_buffer.data(), - regs.src_params.block_size.height, pos_x, pos_y); + bytes_per_pixel, regs.src_params.block_size.height, pos_x, pos_y, + write_buffer.data(), read_buffer.data()); memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size); } diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index cb284db77..4af5824cd 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -177,15 +177,7 @@ RasterizerOpenGL::RasterizerOpenGL(Core::System& system, Core::Frontend::EmuWind } if (device.UseAsynchronousShaders()) { - // Max worker threads we should allow - constexpr u32 MAX_THREADS = 4; - // Deduce how many threads we can use - const u32 threads_used = std::thread::hardware_concurrency() / 4; - // Always allow at least 1 thread regardless of our settings - const auto max_worker_count = std::max(1U, threads_used); - // Don't use more than MAX_THREADS - const auto worker_count = std::min(max_worker_count, MAX_THREADS); - async_shaders.AllocateWorkers(worker_count); + async_shaders.AllocateWorkers(); } } diff --git a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp index 2dcc2b0eb..52fbab3c1 100644 --- a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp @@ -73,7 +73,7 @@ ShaderDiskCacheEntry::ShaderDiskCacheEntry() = default; ShaderDiskCacheEntry::~ShaderDiskCacheEntry() = default; -bool ShaderDiskCacheEntry::Load(FileUtil::IOFile& file) { +bool ShaderDiskCacheEntry::Load(Common::FS::IOFile& file) { if (file.ReadBytes(&type, sizeof(u32)) != sizeof(u32)) { return false; } @@ -144,7 +144,7 @@ bool ShaderDiskCacheEntry::Load(FileUtil::IOFile& file) { return true; } -bool ShaderDiskCacheEntry::Save(FileUtil::IOFile& file) const { +bool ShaderDiskCacheEntry::Save(Common::FS::IOFile& file) const { if (file.WriteObject(static_cast<u32>(type)) != 1 || file.WriteObject(static_cast<u32>(code.size())) != 1 || file.WriteObject(static_cast<u32>(code_b.size())) != 1) { @@ -217,7 +217,7 @@ std::optional<std::vector<ShaderDiskCacheEntry>> ShaderDiskCacheOpenGL::LoadTran return {}; } - FileUtil::IOFile file(GetTransferablePath(), "rb"); + Common::FS::IOFile file(GetTransferablePath(), "rb"); if (!file.IsOpen()) { LOG_INFO(Render_OpenGL, "No transferable shader cache found"); is_usable = true; @@ -262,7 +262,7 @@ std::vector<ShaderDiskCachePrecompiled> ShaderDiskCacheOpenGL::LoadPrecompiled() return {}; } - FileUtil::IOFile file(GetPrecompiledPath(), "rb"); + Common::FS::IOFile file(GetPrecompiledPath(), "rb"); if (!file.IsOpen()) { LOG_INFO(Render_OpenGL, "No precompiled shader cache found"); return {}; @@ -279,7 +279,7 @@ std::vector<ShaderDiskCachePrecompiled> ShaderDiskCacheOpenGL::LoadPrecompiled() } std::optional<std::vector<ShaderDiskCachePrecompiled>> ShaderDiskCacheOpenGL::LoadPrecompiledFile( - FileUtil::IOFile& file) { + Common::FS::IOFile& file) { // Read compressed file from disk and decompress to virtual precompiled cache file std::vector<u8> compressed(file.GetSize()); file.ReadBytes(compressed.data(), compressed.size()); @@ -317,7 +317,7 @@ std::optional<std::vector<ShaderDiskCachePrecompiled>> ShaderDiskCacheOpenGL::Lo } void ShaderDiskCacheOpenGL::InvalidateTransferable() { - if (!FileUtil::Delete(GetTransferablePath())) { + if (!Common::FS::Delete(GetTransferablePath())) { LOG_ERROR(Render_OpenGL, "Failed to invalidate transferable file={}", GetTransferablePath()); } @@ -328,7 +328,7 @@ void ShaderDiskCacheOpenGL::InvalidatePrecompiled() { // Clear virtaul precompiled cache file precompiled_cache_virtual_file.Resize(0); - if (!FileUtil::Delete(GetPrecompiledPath())) { + if (!Common::FS::Delete(GetPrecompiledPath())) { LOG_ERROR(Render_OpenGL, "Failed to invalidate precompiled file={}", GetPrecompiledPath()); } } @@ -344,7 +344,7 @@ void ShaderDiskCacheOpenGL::SaveEntry(const ShaderDiskCacheEntry& entry) { return; } - FileUtil::IOFile file = AppendTransferableFile(); + Common::FS::IOFile file = AppendTransferableFile(); if (!file.IsOpen()) { return; } @@ -386,15 +386,15 @@ void ShaderDiskCacheOpenGL::SavePrecompiled(u64 unique_identifier, GLuint progra } } -FileUtil::IOFile ShaderDiskCacheOpenGL::AppendTransferableFile() const { +Common::FS::IOFile ShaderDiskCacheOpenGL::AppendTransferableFile() const { if (!EnsureDirectories()) { return {}; } const auto transferable_path{GetTransferablePath()}; - const bool existed = FileUtil::Exists(transferable_path); + const bool existed = Common::FS::Exists(transferable_path); - FileUtil::IOFile file(transferable_path, "ab"); + Common::FS::IOFile file(transferable_path, "ab"); if (!file.IsOpen()) { LOG_ERROR(Render_OpenGL, "Failed to open transferable cache in path={}", transferable_path); return {}; @@ -426,7 +426,7 @@ void ShaderDiskCacheOpenGL::SaveVirtualPrecompiledFile() { Common::Compression::CompressDataZSTDDefault(uncompressed.data(), uncompressed.size()); const auto precompiled_path{GetPrecompiledPath()}; - FileUtil::IOFile file(precompiled_path, "wb"); + Common::FS::IOFile file(precompiled_path, "wb"); if (!file.IsOpen()) { LOG_ERROR(Render_OpenGL, "Failed to open precompiled cache in path={}", precompiled_path); @@ -440,24 +440,24 @@ void ShaderDiskCacheOpenGL::SaveVirtualPrecompiledFile() { bool ShaderDiskCacheOpenGL::EnsureDirectories() const { const auto CreateDir = [](const std::string& dir) { - if (!FileUtil::CreateDir(dir)) { + if (!Common::FS::CreateDir(dir)) { LOG_ERROR(Render_OpenGL, "Failed to create directory={}", dir); return false; } return true; }; - return CreateDir(FileUtil::GetUserPath(FileUtil::UserPath::ShaderDir)) && + return CreateDir(Common::FS::GetUserPath(Common::FS::UserPath::ShaderDir)) && CreateDir(GetBaseDir()) && CreateDir(GetTransferableDir()) && CreateDir(GetPrecompiledDir()); } std::string ShaderDiskCacheOpenGL::GetTransferablePath() const { - return FileUtil::SanitizePath(GetTransferableDir() + DIR_SEP_CHR + GetTitleID() + ".bin"); + return Common::FS::SanitizePath(GetTransferableDir() + DIR_SEP_CHR + GetTitleID() + ".bin"); } std::string ShaderDiskCacheOpenGL::GetPrecompiledPath() const { - return FileUtil::SanitizePath(GetPrecompiledDir() + DIR_SEP_CHR + GetTitleID() + ".bin"); + return Common::FS::SanitizePath(GetPrecompiledDir() + DIR_SEP_CHR + GetTitleID() + ".bin"); } std::string ShaderDiskCacheOpenGL::GetTransferableDir() const { @@ -469,7 +469,7 @@ std::string ShaderDiskCacheOpenGL::GetPrecompiledDir() const { } std::string ShaderDiskCacheOpenGL::GetBaseDir() const { - return FileUtil::GetUserPath(FileUtil::UserPath::ShaderDir) + DIR_SEP "opengl"; + return Common::FS::GetUserPath(Common::FS::UserPath::ShaderDir) + DIR_SEP "opengl"; } std::string ShaderDiskCacheOpenGL::GetTitleID() const { diff --git a/src/video_core/renderer_opengl/gl_shader_disk_cache.h b/src/video_core/renderer_opengl/gl_shader_disk_cache.h index a79cef0e9..db2bb73bc 100644 --- a/src/video_core/renderer_opengl/gl_shader_disk_cache.h +++ b/src/video_core/renderer_opengl/gl_shader_disk_cache.h @@ -25,7 +25,7 @@ namespace Core { class System; } -namespace FileUtil { +namespace Common::FS { class IOFile; } @@ -38,9 +38,9 @@ struct ShaderDiskCacheEntry { ShaderDiskCacheEntry(); ~ShaderDiskCacheEntry(); - bool Load(FileUtil::IOFile& file); + bool Load(Common::FS::IOFile& file); - bool Save(FileUtil::IOFile& file) const; + bool Save(Common::FS::IOFile& file) const; bool HasProgramA() const { return !code.empty() && !code_b.empty(); @@ -97,10 +97,10 @@ public: private: /// Loads the transferable cache. Returns empty on failure. std::optional<std::vector<ShaderDiskCachePrecompiled>> LoadPrecompiledFile( - FileUtil::IOFile& file); + Common::FS::IOFile& file); /// Opens current game's transferable file and write it's header if it doesn't exist - FileUtil::IOFile AppendTransferableFile() const; + Common::FS::IOFile AppendTransferableFile() const; /// Save precompiled header to precompiled_cache_in_memory void SavePrecompiledHeaderToVirtualPrecompiledCache(); diff --git a/src/video_core/renderer_vulkan/nsight_aftermath_tracker.cpp b/src/video_core/renderer_vulkan/nsight_aftermath_tracker.cpp index 435c8c1b8..5b01020ec 100644 --- a/src/video_core/renderer_vulkan/nsight_aftermath_tracker.cpp +++ b/src/video_core/renderer_vulkan/nsight_aftermath_tracker.cpp @@ -65,10 +65,10 @@ bool NsightAftermathTracker::Initialize() { return false; } - dump_dir = FileUtil::GetUserPath(FileUtil::UserPath::LogDir) + "gpucrash"; + dump_dir = Common::FS::GetUserPath(Common::FS::UserPath::LogDir) + "gpucrash"; - (void)FileUtil::DeleteDirRecursively(dump_dir); - if (!FileUtil::CreateDir(dump_dir)) { + (void)Common::FS::DeleteDirRecursively(dump_dir); + if (!Common::FS::CreateDir(dump_dir)) { LOG_ERROR(Render_Vulkan, "Failed to create Nsight Aftermath dump directory"); return false; } @@ -106,7 +106,7 @@ void NsightAftermathTracker::SaveShader(const std::vector<u32>& spirv) const { return; } - FileUtil::IOFile file(fmt::format("{}/source_{:016x}.spv", dump_dir, hash.hash), "wb"); + Common::FS::IOFile file(fmt::format("{}/source_{:016x}.spv", dump_dir, hash.hash), "wb"); if (!file.IsOpen()) { LOG_ERROR(Render_Vulkan, "Failed to dump SPIR-V module with hash={:016x}", hash.hash); return; @@ -156,12 +156,12 @@ void NsightAftermathTracker::OnGpuCrashDumpCallback(const void* gpu_crash_dump, }(); std::string_view dump_view(static_cast<const char*>(gpu_crash_dump), gpu_crash_dump_size); - if (FileUtil::WriteStringToFile(false, base_name, dump_view) != gpu_crash_dump_size) { + if (Common::FS::WriteStringToFile(false, base_name, dump_view) != gpu_crash_dump_size) { LOG_ERROR(Render_Vulkan, "Failed to write dump file"); return; } const std::string_view json_view(json.data(), json.size()); - if (FileUtil::WriteStringToFile(true, base_name + ".json", json_view) != json.size()) { + if (Common::FS::WriteStringToFile(true, base_name + ".json", json_view) != json.size()) { LOG_ERROR(Render_Vulkan, "Failed to write JSON"); return; } @@ -180,7 +180,7 @@ void NsightAftermathTracker::OnShaderDebugInfoCallback(const void* shader_debug_ const std::string path = fmt::format("{}/shader_{:016x}{:016x}.nvdbg", dump_dir, identifier.id[0], identifier.id[1]); - FileUtil::IOFile file(path, "wb"); + Common::FS::IOFile file(path, "wb"); if (!file.IsOpen()) { LOG_ERROR(Render_Vulkan, "Failed to create file {}", path); return; diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp index 2258479f5..0c62c8061 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp +++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp @@ -78,7 +78,7 @@ Common::DynamicLibrary OpenVulkanLibrary() { if (!libvulkan_env || !library.Open(libvulkan_env)) { // Use the libvulkan.dylib from the application bundle. const std::string filename = - FileUtil::GetBundleDirectory() + "/Contents/Frameworks/libvulkan.dylib"; + Common::FS::GetBundleDirectory() + "/Contents/Frameworks/libvulkan.dylib"; library.Open(filename.c_str()); } #else diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp index 0c03e4d83..ebcfaa0e3 100644 --- a/src/video_core/renderer_vulkan/vk_device.cpp +++ b/src/video_core/renderer_vulkan/vk_device.cpp @@ -382,6 +382,8 @@ bool VKDevice::Create() { graphics_queue = logical.GetQueue(graphics_family); present_queue = logical.GetQueue(present_family); + + use_asynchronous_shaders = Settings::values.use_asynchronous_shaders.GetValue(); return true; } diff --git a/src/video_core/renderer_vulkan/vk_device.h b/src/video_core/renderer_vulkan/vk_device.h index 529744f2d..26a233db1 100644 --- a/src/video_core/renderer_vulkan/vk_device.h +++ b/src/video_core/renderer_vulkan/vk_device.h @@ -202,6 +202,11 @@ public: return reported_extensions; } + /// Returns true if the setting for async shader compilation is enabled. + bool UseAsynchronousShaders() const { + return use_asynchronous_shaders; + } + /// Checks if the physical device is suitable. static bool IsSuitable(vk::PhysicalDevice physical, VkSurfaceKHR surface); @@ -252,6 +257,9 @@ private: bool ext_extended_dynamic_state{}; ///< Support for VK_EXT_extended_dynamic_state. bool nv_device_diagnostics_config{}; ///< Support for VK_NV_device_diagnostics_config. + // Asynchronous Graphics Pipeline setting + bool use_asynchronous_shaders{}; ///< Setting to use asynchronous shaders/graphics pipeline + // Telemetry parameters std::string vendor_name; ///< Device's driver name. std::vector<std::string> reported_extensions; ///< Reported Vulkan extensions. diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.cpp b/src/video_core/renderer_vulkan/vk_fence_manager.cpp index a02be5487..d7f65d435 100644 --- a/src/video_core/renderer_vulkan/vk_fence_manager.cpp +++ b/src/video_core/renderer_vulkan/vk_fence_manager.cpp @@ -29,7 +29,7 @@ void InnerFence::Queue() { } ASSERT(!event); - event = device.GetLogical().CreateEvent(); + event = device.GetLogical().CreateNewEvent(); ticks = scheduler.Ticks(); scheduler.RequestOutsideRenderPassOperationContext(); diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp index aaf930b90..2e46c6278 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp @@ -78,15 +78,14 @@ VKGraphicsPipeline::VKGraphicsPipeline(const VKDevice& device, VKScheduler& sche const GraphicsPipelineCacheKey& key, vk::Span<VkDescriptorSetLayoutBinding> bindings, const SPIRVProgram& program) - : device{device}, scheduler{scheduler}, fixed_state{key.fixed_state}, hash{key.Hash()}, + : device{device}, scheduler{scheduler}, cache_key{key}, hash{cache_key.Hash()}, descriptor_set_layout{CreateDescriptorSetLayout(bindings)}, descriptor_allocator{descriptor_pool, *descriptor_set_layout}, update_descriptor_queue{update_descriptor_queue}, layout{CreatePipelineLayout()}, descriptor_template{CreateDescriptorUpdateTemplate(program)}, modules{CreateShaderModules( program)}, - renderpass{renderpass_cache.GetRenderPass(key.renderpass_params)}, pipeline{CreatePipeline( - key.renderpass_params, - program)} {} + renderpass{renderpass_cache.GetRenderPass(cache_key.renderpass_params)}, + pipeline{CreatePipeline(cache_key.renderpass_params, program)} {} VKGraphicsPipeline::~VKGraphicsPipeline() = default; @@ -181,7 +180,7 @@ std::vector<vk::ShaderModule> VKGraphicsPipeline::CreateShaderModules( vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpass_params, const SPIRVProgram& program) const { - const auto& state = fixed_state; + const auto& state = cache_key.fixed_state; const auto& viewport_swizzles = state.viewport_swizzles; FixedPipelineState::DynamicState dynamic; diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h index a1d699a6c..58aa35efd 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h @@ -19,7 +19,27 @@ namespace Vulkan { using Maxwell = Tegra::Engines::Maxwell3D::Regs; -struct GraphicsPipelineCacheKey; +struct GraphicsPipelineCacheKey { + RenderPassParams renderpass_params; + u32 padding; + std::array<GPUVAddr, Maxwell::MaxShaderProgram> shaders; + FixedPipelineState fixed_state; + + std::size_t Hash() const noexcept; + + bool operator==(const GraphicsPipelineCacheKey& rhs) const noexcept; + + bool operator!=(const GraphicsPipelineCacheKey& rhs) const noexcept { + return !operator==(rhs); + } + + std::size_t Size() const noexcept { + return sizeof(renderpass_params) + sizeof(padding) + sizeof(shaders) + fixed_state.Size(); + } +}; +static_assert(std::has_unique_object_representations_v<GraphicsPipelineCacheKey>); +static_assert(std::is_trivially_copyable_v<GraphicsPipelineCacheKey>); +static_assert(std::is_trivially_constructible_v<GraphicsPipelineCacheKey>); class VKDescriptorPool; class VKDevice; @@ -54,6 +74,10 @@ public: return renderpass; } + GraphicsPipelineCacheKey GetCacheKey() const { + return cache_key; + } + private: vk::DescriptorSetLayout CreateDescriptorSetLayout( vk::Span<VkDescriptorSetLayoutBinding> bindings) const; @@ -70,7 +94,7 @@ private: const VKDevice& device; VKScheduler& scheduler; - const FixedPipelineState fixed_state; + const GraphicsPipelineCacheKey cache_key; const u64 hash; vk::DescriptorSetLayout descriptor_set_layout; diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp index 418c62bc4..cfdcdd6ab 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp @@ -28,6 +28,7 @@ #include "video_core/shader/compiler_settings.h" #include "video_core/shader/memory_util.h" #include "video_core/shader_cache.h" +#include "video_core/shader_notify.h" namespace Vulkan { @@ -205,24 +206,43 @@ std::array<Shader*, Maxwell::MaxShaderProgram> VKPipelineCache::GetShaders() { return last_shaders = shaders; } -VKGraphicsPipeline& VKPipelineCache::GetGraphicsPipeline(const GraphicsPipelineCacheKey& key) { +VKGraphicsPipeline* VKPipelineCache::GetGraphicsPipeline( + const GraphicsPipelineCacheKey& key, VideoCommon::Shader::AsyncShaders& async_shaders) { MICROPROFILE_SCOPE(Vulkan_PipelineCache); if (last_graphics_pipeline && last_graphics_key == key) { - return *last_graphics_pipeline; + return last_graphics_pipeline; } last_graphics_key = key; + if (device.UseAsynchronousShaders() && async_shaders.IsShaderAsync(system.GPU())) { + std::unique_lock lock{pipeline_cache}; + const auto [pair, is_cache_miss] = graphics_cache.try_emplace(key); + if (is_cache_miss) { + system.GPU().ShaderNotify().MarkSharderBuilding(); + LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash()); + const auto [program, bindings] = DecompileShaders(key.fixed_state); + async_shaders.QueueVulkanShader(this, device, scheduler, descriptor_pool, + update_descriptor_queue, renderpass_cache, bindings, + program, key); + } + last_graphics_pipeline = pair->second.get(); + return last_graphics_pipeline; + } + const auto [pair, is_cache_miss] = graphics_cache.try_emplace(key); auto& entry = pair->second; if (is_cache_miss) { + system.GPU().ShaderNotify().MarkSharderBuilding(); LOG_INFO(Render_Vulkan, "Compile 0x{:016X}", key.Hash()); - const auto [program, bindings] = DecompileShaders(key); + const auto [program, bindings] = DecompileShaders(key.fixed_state); entry = std::make_unique<VKGraphicsPipeline>(device, scheduler, descriptor_pool, update_descriptor_queue, renderpass_cache, key, bindings, program); + system.GPU().ShaderNotify().MarkShaderComplete(); } - return *(last_graphics_pipeline = entry.get()); + last_graphics_pipeline = entry.get(); + return last_graphics_pipeline; } VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCacheKey& key) { @@ -277,6 +297,12 @@ VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCach return *entry; } +void VKPipelineCache::EmplacePipeline(std::unique_ptr<VKGraphicsPipeline> pipeline) { + system.GPU().ShaderNotify().MarkShaderComplete(); + std::unique_lock lock{pipeline_cache}; + graphics_cache.at(pipeline->GetCacheKey()) = std::move(pipeline); +} + void VKPipelineCache::OnShaderRemoval(Shader* shader) { bool finished = false; const auto Finish = [&] { @@ -312,8 +338,7 @@ void VKPipelineCache::OnShaderRemoval(Shader* shader) { } std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>> -VKPipelineCache::DecompileShaders(const GraphicsPipelineCacheKey& key) { - const auto& fixed_state = key.fixed_state; +VKPipelineCache::DecompileShaders(const FixedPipelineState& fixed_state) { auto& memory_manager = system.GPU().MemoryManager(); const auto& gpu = system.GPU().Maxwell3D(); diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h index 0a3fe65fb..c04829e77 100644 --- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h +++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h @@ -22,6 +22,7 @@ #include "video_core/renderer_vulkan/vk_renderpass_cache.h" #include "video_core/renderer_vulkan/vk_shader_decompiler.h" #include "video_core/renderer_vulkan/wrapper.h" +#include "video_core/shader/async_shaders.h" #include "video_core/shader/memory_util.h" #include "video_core/shader/registry.h" #include "video_core/shader/shader_ir.h" @@ -43,28 +44,6 @@ class VKUpdateDescriptorQueue; using Maxwell = Tegra::Engines::Maxwell3D::Regs; -struct GraphicsPipelineCacheKey { - RenderPassParams renderpass_params; - u32 padding; - std::array<GPUVAddr, Maxwell::MaxShaderProgram> shaders; - FixedPipelineState fixed_state; - - std::size_t Hash() const noexcept; - - bool operator==(const GraphicsPipelineCacheKey& rhs) const noexcept; - - bool operator!=(const GraphicsPipelineCacheKey& rhs) const noexcept { - return !operator==(rhs); - } - - std::size_t Size() const noexcept { - return sizeof(renderpass_params) + sizeof(padding) + sizeof(shaders) + fixed_state.Size(); - } -}; -static_assert(std::has_unique_object_representations_v<GraphicsPipelineCacheKey>); -static_assert(std::is_trivially_copyable_v<GraphicsPipelineCacheKey>); -static_assert(std::is_trivially_constructible_v<GraphicsPipelineCacheKey>); - struct ComputePipelineCacheKey { GPUVAddr shader; u32 shared_memory_size; @@ -152,16 +131,19 @@ public: std::array<Shader*, Maxwell::MaxShaderProgram> GetShaders(); - VKGraphicsPipeline& GetGraphicsPipeline(const GraphicsPipelineCacheKey& key); + VKGraphicsPipeline* GetGraphicsPipeline(const GraphicsPipelineCacheKey& key, + VideoCommon::Shader::AsyncShaders& async_shaders); VKComputePipeline& GetComputePipeline(const ComputePipelineCacheKey& key); + void EmplacePipeline(std::unique_ptr<VKGraphicsPipeline> pipeline); + protected: void OnShaderRemoval(Shader* shader) final; private: std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>> DecompileShaders( - const GraphicsPipelineCacheKey& key); + const FixedPipelineState& fixed_state); Core::System& system; const VKDevice& device; @@ -178,6 +160,7 @@ private: GraphicsPipelineCacheKey last_graphics_key; VKGraphicsPipeline* last_graphics_pipeline = nullptr; + std::mutex pipeline_cache; std::unordered_map<GraphicsPipelineCacheKey, std::unique_ptr<VKGraphicsPipeline>> graphics_cache; std::unordered_map<ComputePipelineCacheKey, std::unique_ptr<VKComputePipeline>> compute_cache; diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 7500e8244..936f76195 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -14,6 +14,7 @@ #include "common/assert.h" #include "common/logging/log.h" #include "common/microprofile.h" +#include "common/scope_exit.h" #include "core/core.h" #include "core/settings.h" #include "video_core/engines/kepler_compute.h" @@ -400,8 +401,12 @@ RasterizerVulkan::RasterizerVulkan(Core::System& system, Core::Frontend::EmuWind buffer_cache(*this, system, device, memory_manager, scheduler, staging_pool), sampler_cache(device), fence_manager(system, *this, device, scheduler, texture_cache, buffer_cache, query_cache), - query_cache(system, *this, device, scheduler), wfi_event{device.GetLogical().CreateEvent()} { + query_cache(system, *this, device, scheduler), + wfi_event{device.GetLogical().CreateNewEvent()}, async_shaders{renderer} { scheduler.SetQueryCache(query_cache); + if (device.UseAsynchronousShaders()) { + async_shaders.AllocateWorkers(); + } } RasterizerVulkan::~RasterizerVulkan() = default; @@ -413,6 +418,8 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { query_cache.UpdateCounters(); + SCOPE_EXIT({ system.GPU().TickWork(); }); + const auto& gpu = system.GPU().Maxwell3D(); GraphicsPipelineCacheKey key; key.fixed_state.Fill(gpu.regs, device.IsExtExtendedDynamicStateSupported()); @@ -439,10 +446,15 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { key.renderpass_params = GetRenderPassParams(texceptions); key.padding = 0; - auto& pipeline = pipeline_cache.GetGraphicsPipeline(key); - scheduler.BindGraphicsPipeline(pipeline.GetHandle()); + auto* pipeline = pipeline_cache.GetGraphicsPipeline(key, async_shaders); + if (pipeline == nullptr || pipeline->GetHandle() == VK_NULL_HANDLE) { + // Async graphics pipeline was not ready. + return; + } + + scheduler.BindGraphicsPipeline(pipeline->GetHandle()); - const auto renderpass = pipeline.GetRenderPass(); + const auto renderpass = pipeline->GetRenderPass(); const auto [framebuffer, render_area] = ConfigureFramebuffers(renderpass); scheduler.RequestRenderpass(renderpass, framebuffer, render_area); @@ -452,8 +464,8 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { BeginTransformFeedback(); - const auto pipeline_layout = pipeline.GetLayout(); - const auto descriptor_set = pipeline.CommitDescriptorSet(); + const auto pipeline_layout = pipeline->GetLayout(); + const auto descriptor_set = pipeline->CommitDescriptorSet(); scheduler.Record([pipeline_layout, descriptor_set, draw_params](vk::CommandBuffer cmdbuf) { if (descriptor_set) { cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout, @@ -463,8 +475,6 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) { }); EndTransformFeedback(); - - system.GPU().TickWork(); } void RasterizerVulkan::Clear() { diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index 923178b0b..f640ba649 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h @@ -32,6 +32,7 @@ #include "video_core/renderer_vulkan/vk_texture_cache.h" #include "video_core/renderer_vulkan/vk_update_descriptor.h" #include "video_core/renderer_vulkan/wrapper.h" +#include "video_core/shader/async_shaders.h" namespace Core { class System; @@ -136,6 +137,14 @@ public: u32 pixel_stride) override; void SetupDirtyFlags() override; + VideoCommon::Shader::AsyncShaders& GetAsyncShaders() { + return async_shaders; + } + + const VideoCommon::Shader::AsyncShaders& GetAsyncShaders() const { + return async_shaders; + } + /// Maximum supported size that a constbuffer can have in bytes. static constexpr std::size_t MaxConstbufferSize = 0x10000; static_assert(MaxConstbufferSize % (4 * sizeof(float)) == 0, @@ -297,6 +306,7 @@ private: vk::Buffer default_buffer; VKMemoryCommit default_buffer_commit; vk::Event wfi_event; + VideoCommon::Shader::AsyncShaders async_shaders; std::array<View, Maxwell::NumRenderTargets> color_attachments; View zeta_attachment; diff --git a/src/video_core/renderer_vulkan/wrapper.cpp b/src/video_core/renderer_vulkan/wrapper.cpp index 43ace243b..013865aa4 100644 --- a/src/video_core/renderer_vulkan/wrapper.cpp +++ b/src/video_core/renderer_vulkan/wrapper.cpp @@ -644,7 +644,7 @@ ShaderModule Device::CreateShaderModule(const VkShaderModuleCreateInfo& ci) cons return ShaderModule(object, handle, *dld); } -Event Device::CreateEvent() const { +Event Device::CreateNewEvent() const { static constexpr VkEventCreateInfo ci{ .sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO, .pNext = nullptr, diff --git a/src/video_core/renderer_vulkan/wrapper.h b/src/video_core/renderer_vulkan/wrapper.h index 31885ef42..b9d3fedc1 100644 --- a/src/video_core/renderer_vulkan/wrapper.h +++ b/src/video_core/renderer_vulkan/wrapper.h @@ -721,7 +721,7 @@ public: ShaderModule CreateShaderModule(const VkShaderModuleCreateInfo& ci) const; - Event CreateEvent() const; + Event CreateNewEvent() const; SwapchainKHR CreateSwapchainKHR(const VkSwapchainCreateInfoKHR& ci) const; diff --git a/src/video_core/shader/async_shaders.cpp b/src/video_core/shader/async_shaders.cpp index b7f66d7ee..f815584f7 100644 --- a/src/video_core/shader/async_shaders.cpp +++ b/src/video_core/shader/async_shaders.cpp @@ -2,7 +2,6 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -#include <chrono> #include <condition_variable> #include <mutex> #include <thread> @@ -20,9 +19,18 @@ AsyncShaders::~AsyncShaders() { KillWorkers(); } -void AsyncShaders::AllocateWorkers(std::size_t num_workers) { - // If we're already have workers queued or don't want to queue workers, ignore - if (num_workers == worker_threads.size() || num_workers == 0) { +void AsyncShaders::AllocateWorkers() { + // Max worker threads we should allow + constexpr u32 MAX_THREADS = 4; + // Deduce how many threads we can use + const u32 threads_used = std::thread::hardware_concurrency() / 4; + // Always allow at least 1 thread regardless of our settings + const auto max_worker_count = std::max(1U, threads_used); + // Don't use more than MAX_THREADS + const auto num_workers = std::min(max_worker_count, MAX_THREADS); + + // If we already have workers queued, ignore + if (num_workers == worker_threads.size()) { return; } @@ -34,8 +42,8 @@ void AsyncShaders::AllocateWorkers(std::size_t num_workers) { // Create workers for (std::size_t i = 0; i < num_workers; i++) { context_list.push_back(emu_window.CreateSharedContext()); - worker_threads.push_back(std::move( - std::thread(&AsyncShaders::ShaderCompilerThread, this, context_list[i].get()))); + worker_threads.push_back( + std::thread(&AsyncShaders::ShaderCompilerThread, this, context_list[i].get())); } } @@ -111,24 +119,50 @@ void AsyncShaders::QueueOpenGLShader(const OpenGL::Device& device, VideoCommon::Shader::CompilerSettings compiler_settings, const VideoCommon::Shader::Registry& registry, VAddr cpu_addr) { - WorkerParams params{device.UseAssemblyShaders() ? AsyncShaders::Backend::GLASM - : AsyncShaders::Backend::OpenGL, - device, - shader_type, - uid, - std::move(code), - std::move(code_b), - main_offset, - compiler_settings, - registry, - cpu_addr}; + WorkerParams params{ + .backend = device.UseAssemblyShaders() ? Backend::GLASM : Backend::OpenGL, + .device = &device, + .shader_type = shader_type, + .uid = uid, + .code = std::move(code), + .code_b = std::move(code_b), + .main_offset = main_offset, + .compiler_settings = compiler_settings, + .registry = registry, + .cpu_address = cpu_addr, + }; std::unique_lock lock(queue_mutex); - pending_queue.push_back(std::move(params)); + pending_queue.push(std::move(params)); + cv.notify_one(); +} + +void AsyncShaders::QueueVulkanShader(Vulkan::VKPipelineCache* pp_cache, + const Vulkan::VKDevice& device, Vulkan::VKScheduler& scheduler, + Vulkan::VKDescriptorPool& descriptor_pool, + Vulkan::VKUpdateDescriptorQueue& update_descriptor_queue, + Vulkan::VKRenderPassCache& renderpass_cache, + std::vector<VkDescriptorSetLayoutBinding> bindings, + Vulkan::SPIRVProgram program, + Vulkan::GraphicsPipelineCacheKey key) { + WorkerParams params{ + .backend = Backend::Vulkan, + .pp_cache = pp_cache, + .vk_device = &device, + .scheduler = &scheduler, + .descriptor_pool = &descriptor_pool, + .update_descriptor_queue = &update_descriptor_queue, + .renderpass_cache = &renderpass_cache, + .bindings = bindings, + .program = program, + .key = key, + }; + + std::unique_lock lock(queue_mutex); + pending_queue.push(std::move(params)); cv.notify_one(); } void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context) { - using namespace std::chrono_literals; while (!is_thread_exiting.load(std::memory_order_relaxed)) { std::unique_lock lock{queue_mutex}; cv.wait(lock, [this] { return HasWorkQueued() || is_thread_exiting; }); @@ -144,18 +178,17 @@ void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context if (pending_queue.empty()) { continue; } + // Pull work from queue WorkerParams work = std::move(pending_queue.front()); - pending_queue.pop_front(); - + pending_queue.pop(); lock.unlock(); - if (work.backend == AsyncShaders::Backend::OpenGL || - work.backend == AsyncShaders::Backend::GLASM) { - const ShaderIR ir(work.code, work.main_offset, work.compiler_settings, work.registry); + if (work.backend == Backend::OpenGL || work.backend == Backend::GLASM) { + const ShaderIR ir(work.code, work.main_offset, work.compiler_settings, *work.registry); const auto scope = context->Acquire(); auto program = - OpenGL::BuildShader(work.device, work.shader_type, work.uid, ir, work.registry); + OpenGL::BuildShader(*work.device, work.shader_type, work.uid, ir, *work.registry); Result result{}; result.backend = work.backend; result.cpu_address = work.cpu_address; @@ -164,9 +197,9 @@ void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context result.code_b = std::move(work.code_b); result.shader_type = work.shader_type; - if (work.backend == AsyncShaders::Backend::OpenGL) { + if (work.backend == Backend::OpenGL) { result.program.opengl = std::move(program->source_program); - } else if (work.backend == AsyncShaders::Backend::GLASM) { + } else if (work.backend == Backend::GLASM) { result.program.glasm = std::move(program->assembly_program); } @@ -174,6 +207,13 @@ void AsyncShaders::ShaderCompilerThread(Core::Frontend::GraphicsContext* context std::unique_lock complete_lock(completed_mutex); finished_work.push_back(std::move(result)); } + } else if (work.backend == Backend::Vulkan) { + auto pipeline = std::make_unique<Vulkan::VKGraphicsPipeline>( + *work.vk_device, *work.scheduler, *work.descriptor_pool, + *work.update_descriptor_queue, *work.renderpass_cache, work.key, work.bindings, + work.program); + + work.pp_cache->EmplacePipeline(std::move(pipeline)); } } } diff --git a/src/video_core/shader/async_shaders.h b/src/video_core/shader/async_shaders.h index 2f5ee94ad..d5ae814d5 100644 --- a/src/video_core/shader/async_shaders.h +++ b/src/video_core/shader/async_shaders.h @@ -14,6 +14,10 @@ #include "video_core/renderer_opengl/gl_device.h" #include "video_core/renderer_opengl/gl_resource_manager.h" #include "video_core/renderer_opengl/gl_shader_decompiler.h" +#include "video_core/renderer_vulkan/vk_device.h" +#include "video_core/renderer_vulkan/vk_pipeline_cache.h" +#include "video_core/renderer_vulkan/vk_scheduler.h" +#include "video_core/renderer_vulkan/vk_update_descriptor.h" namespace Core::Frontend { class EmuWindow; @@ -24,6 +28,10 @@ namespace Tegra { class GPU; } +namespace Vulkan { +class VKPipelineCache; +} + namespace VideoCommon::Shader { class AsyncShaders { @@ -31,6 +39,7 @@ public: enum class Backend { OpenGL, GLASM, + Vulkan, }; struct ResultPrograms { @@ -52,7 +61,7 @@ public: ~AsyncShaders(); /// Start up shader worker threads - void AllocateWorkers(std::size_t num_workers); + void AllocateWorkers(); /// Clear the shader queue and kill all worker threads void FreeWorkers(); @@ -76,6 +85,14 @@ public: VideoCommon::Shader::CompilerSettings compiler_settings, const VideoCommon::Shader::Registry& registry, VAddr cpu_addr); + void QueueVulkanShader(Vulkan::VKPipelineCache* pp_cache, const Vulkan::VKDevice& device, + Vulkan::VKScheduler& scheduler, + Vulkan::VKDescriptorPool& descriptor_pool, + Vulkan::VKUpdateDescriptorQueue& update_descriptor_queue, + Vulkan::VKRenderPassCache& renderpass_cache, + std::vector<VkDescriptorSetLayoutBinding> bindings, + Vulkan::SPIRVProgram program, Vulkan::GraphicsPipelineCacheKey key); + private: void ShaderCompilerThread(Core::Frontend::GraphicsContext* context); @@ -83,16 +100,28 @@ private: bool HasWorkQueued(); struct WorkerParams { - AsyncShaders::Backend backend; - OpenGL::Device device; + Backend backend; + // For OGL + const OpenGL::Device* device; Tegra::Engines::ShaderType shader_type; u64 uid; std::vector<u64> code; std::vector<u64> code_b; u32 main_offset; VideoCommon::Shader::CompilerSettings compiler_settings; - VideoCommon::Shader::Registry registry; + std::optional<VideoCommon::Shader::Registry> registry; VAddr cpu_address; + + // For Vulkan + Vulkan::VKPipelineCache* pp_cache; + const Vulkan::VKDevice* vk_device; + Vulkan::VKScheduler* scheduler; + Vulkan::VKDescriptorPool* descriptor_pool; + Vulkan::VKUpdateDescriptorQueue* update_descriptor_queue; + Vulkan::VKRenderPassCache* renderpass_cache; + std::vector<VkDescriptorSetLayoutBinding> bindings; + Vulkan::SPIRVProgram program; + Vulkan::GraphicsPipelineCacheKey key; }; std::condition_variable cv; @@ -101,7 +130,7 @@ private: std::atomic<bool> is_thread_exiting{}; std::vector<std::unique_ptr<Core::Frontend::GraphicsContext>> context_list; std::vector<std::thread> worker_threads; - std::deque<WorkerParams> pending_queue; + std::queue<WorkerParams> pending_queue; std::vector<AsyncShaders::Result> finished_work; Core::Frontend::EmuWindow& emu_window; }; diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp index 474ae620a..16d46a018 100644 --- a/src/video_core/textures/decoders.cpp +++ b/src/video_core/textures/decoders.cpp @@ -228,24 +228,30 @@ void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 } } -void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32 swizzled_width, - u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data, - u32 block_height_bit, u32 offset_x, u32 offset_y) { - const u32 block_height = 1U << block_height_bit; - for (u32 line = 0; line < subrect_height; ++line) { - const u32 y2 = line + offset_y; - const u32 gob_address_y = (y2 / (GOB_SIZE_Y * block_height)) * GOB_SIZE * block_height + - ((y2 % (GOB_SIZE_Y * block_height)) / GOB_SIZE_Y) * GOB_SIZE; - const auto& table = LEGACY_SWIZZLE_TABLE[y2 % GOB_SIZE_Y]; - for (u32 x = 0; x < subrect_width; ++x) { - const u32 x2 = (x + offset_x) * bytes_per_pixel; - const u32 gob_address = gob_address_y + (x2 / GOB_SIZE_X) * GOB_SIZE * block_height; - const u32 swizzled_offset = gob_address + table[x2 % GOB_SIZE_X]; - const u32 unswizzled_offset = line * dest_pitch + x * bytes_per_pixel; - u8* dest_line = unswizzled_data + unswizzled_offset; - u8* source_addr = swizzled_data + swizzled_offset; +void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 bytes_per_pixel, + u32 block_height, u32 origin_x, u32 origin_y, u8* output, const u8* input) { + const u32 stride = width * bytes_per_pixel; + const u32 gobs_in_x = (stride + GOB_SIZE_X - 1) / GOB_SIZE_X; + const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height); + + const u32 block_height_mask = (1U << block_height) - 1; + const u32 x_shift = static_cast<u32>(GOB_SIZE_SHIFT) + block_height; + + for (u32 line = 0; line < line_count; ++line) { + const u32 src_y = line + origin_y; + const auto& table = LEGACY_SWIZZLE_TABLE[src_y % GOB_SIZE_Y]; + + const u32 block_y = src_y >> GOB_SIZE_Y_SHIFT; + const u32 src_offset_y = (block_y >> block_height) * block_size + + ((block_y & block_height_mask) << GOB_SIZE_SHIFT); + for (u32 column = 0; column < line_length_in; ++column) { + const u32 src_x = (column + origin_x) * bytes_per_pixel; + const u32 src_offset_x = (src_x >> GOB_SIZE_X_SHIFT) << x_shift; + + const u32 swizzled_offset = src_offset_y + src_offset_x + table[src_x % GOB_SIZE_X]; + const u32 unswizzled_offset = line * pitch + column * bytes_per_pixel; - std::memcpy(dest_line, source_addr, bytes_per_pixel); + std::memcpy(output + unswizzled_offset, input + swizzled_offset, bytes_per_pixel); } } } @@ -261,7 +267,7 @@ void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 widt const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth); const u32 block_height_mask = (1U << block_height) - 1; - const u32 x_shift = Common::CountTrailingZeroes32(GOB_SIZE << (block_height + block_depth)); + const u32 x_shift = static_cast<u32>(GOB_SIZE_SHIFT) + block_height + block_depth; for (u32 line = 0; line < line_count; ++line) { const auto& table = LEGACY_SWIZZLE_TABLE[line % GOB_SIZE_Y]; diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h index d6fe35d37..01e156bc8 100644 --- a/src/video_core/textures/decoders.h +++ b/src/video_core/textures/decoders.h @@ -48,9 +48,8 @@ void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 u32 block_height_bit, u32 offset_x, u32 offset_y); /// Copies a tiled subrectangle into a linear surface. -void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32 swizzled_width, - u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data, u32 block_height, - u32 offset_x, u32 offset_y); +void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 bytes_per_pixel, + u32 block_height, u32 origin_x, u32 origin_y, u8* output, const u8* input); /// @brief Swizzles a 2D array of pixels into a 3D texture /// @param line_length_in Number of pixels per line |