diff options
Diffstat (limited to 'src/video_core')
26 files changed, 1543 insertions, 167 deletions
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index 1db0d031d..6036d6ed3 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -101,6 +101,22 @@ add_library(video_core STATIC video_core.h ) +if (ENABLE_VULKAN) + target_sources(video_core PRIVATE + renderer_vulkan/declarations.h + renderer_vulkan/vk_device.cpp + renderer_vulkan/vk_device.h + renderer_vulkan/vk_memory_manager.cpp + renderer_vulkan/vk_memory_manager.h + renderer_vulkan/vk_resource_manager.cpp + renderer_vulkan/vk_resource_manager.h + renderer_vulkan/vk_scheduler.cpp + renderer_vulkan/vk_scheduler.h) + + target_include_directories(video_core PRIVATE ../../externals/Vulkan-Headers/include) + target_compile_definitions(video_core PRIVATE HAS_VULKAN) +endif() + create_target_directory_groups(video_core) target_link_libraries(video_core PUBLIC common core) diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp index eb9bf1878..669541b4b 100644 --- a/src/video_core/dma_pusher.cpp +++ b/src/video_core/dma_pusher.cpp @@ -33,18 +33,36 @@ void DmaPusher::DispatchCalls() { } bool DmaPusher::Step() { - if (dma_get != dma_put) { - // Push buffer non-empty, read a word - const auto address = gpu.MemoryManager().GpuToCpuAddress(dma_get); - ASSERT_MSG(address, "Invalid GPU address"); + if (!ib_enable || dma_pushbuffer.empty()) { + // pushbuffer empty and IB empty or nonexistent - nothing to do + return false; + } - const CommandHeader command_header{Memory::Read32(*address)}; + const CommandList& command_list{dma_pushbuffer.front()}; + const CommandListHeader& command_list_header{command_list[dma_pushbuffer_subindex++]}; + GPUVAddr dma_get = command_list_header.addr; + GPUVAddr dma_put = dma_get + command_list_header.size * sizeof(u32); + bool non_main = command_list_header.is_non_main; - dma_get += sizeof(u32); + if (dma_pushbuffer_subindex >= command_list.size()) { + // We've gone through the current list, remove it from the queue + dma_pushbuffer.pop(); + dma_pushbuffer_subindex = 0; + } - if (!non_main) { - dma_mget = dma_get; - } + if (command_list_header.size == 0) { + return true; + } + + // Push buffer non-empty, read a word + const auto address = gpu.MemoryManager().GpuToCpuAddress(dma_get); + ASSERT_MSG(address, "Invalid GPU address"); + + command_headers.resize(command_list_header.size); + + Memory::ReadBlock(*address, command_headers.data(), command_list_header.size * sizeof(u32)); + + for (const CommandHeader& command_header : command_headers) { // now, see if we're in the middle of a command if (dma_state.length_pending) { @@ -91,22 +109,11 @@ bool DmaPusher::Step() { break; } } - } else if (ib_enable && !dma_pushbuffer.empty()) { - // Current pushbuffer empty, but we have more IB entries to read - const CommandList& command_list{dma_pushbuffer.front()}; - const CommandListHeader& command_list_header{command_list[dma_pushbuffer_subindex++]}; - dma_get = command_list_header.addr; - dma_put = dma_get + command_list_header.size * sizeof(u32); - non_main = command_list_header.is_non_main; - - if (dma_pushbuffer_subindex >= command_list.size()) { - // We've gone through the current list, remove it from the queue - dma_pushbuffer.pop(); - dma_pushbuffer_subindex = 0; - } - } else { - // Otherwise, pushbuffer empty and IB empty or nonexistent - nothing to do - return {}; + } + + if (!non_main) { + // TODO (degasus): This is dead code, as dma_mget is never read. + dma_mget = dma_put; } return true; diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h index 1097e5c49..27a36348c 100644 --- a/src/video_core/dma_pusher.h +++ b/src/video_core/dma_pusher.h @@ -75,6 +75,8 @@ private: GPU& gpu; + std::vector<CommandHeader> command_headers; ///< Buffer for list of commands fetched at once + std::queue<CommandList> dma_pushbuffer; ///< Queue of command lists to be processed std::size_t dma_pushbuffer_subindex{}; ///< Index within a command list within the pushbuffer @@ -89,11 +91,8 @@ private: DmaState dma_state{}; bool dma_increment_once{}; - GPUVAddr dma_put{}; ///< pushbuffer current end address - GPUVAddr dma_get{}; ///< pushbuffer current read address GPUVAddr dma_mget{}; ///< main pushbuffer last read address bool ib_enable{true}; ///< IB mode enabled - bool non_main{}; ///< non-main pushbuffer active }; } // namespace Tegra diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp index 5c1029ddf..4f6126116 100644 --- a/src/video_core/engines/kepler_memory.cpp +++ b/src/video_core/engines/kepler_memory.cpp @@ -2,6 +2,7 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" #include "core/memory.h" @@ -11,9 +12,9 @@ namespace Tegra::Engines { -KeplerMemory::KeplerMemory(VideoCore::RasterizerInterface& rasterizer, +KeplerMemory::KeplerMemory(Core::System& system, VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager) - : memory_manager(memory_manager), rasterizer{rasterizer} {} + : system{system}, memory_manager(memory_manager), rasterizer{rasterizer} {} KeplerMemory::~KeplerMemory() = default; @@ -50,7 +51,7 @@ void KeplerMemory::ProcessData(u32 data) { rasterizer.InvalidateRegion(*dest_address, sizeof(u32)); Memory::Write32(*dest_address, data); - Core::System::GetInstance().GPU().Maxwell3D().dirty_flags.OnMemoryWrite(); + system.GPU().Maxwell3D().dirty_flags.OnMemoryWrite(); state.write_offset++; } diff --git a/src/video_core/engines/kepler_memory.h b/src/video_core/engines/kepler_memory.h index fe9ebc5b9..f680c2ad9 100644 --- a/src/video_core/engines/kepler_memory.h +++ b/src/video_core/engines/kepler_memory.h @@ -5,13 +5,16 @@ #pragma once #include <array> -#include "common/assert.h" #include "common/bit_field.h" #include "common/common_funcs.h" #include "common/common_types.h" #include "video_core/gpu.h" #include "video_core/memory_manager.h" +namespace Core { +class System; +} + namespace VideoCore { class RasterizerInterface; } @@ -23,7 +26,8 @@ namespace Tegra::Engines { class KeplerMemory final { public: - KeplerMemory(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager); + KeplerMemory(Core::System& system, VideoCore::RasterizerInterface& rasterizer, + MemoryManager& memory_manager); ~KeplerMemory(); /// Write the value to the register identified by method. @@ -76,6 +80,7 @@ public: } state{}; private: + Core::System& system; MemoryManager& memory_manager; VideoCore::RasterizerInterface& rasterizer; diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index 19b6b14b2..2d2136067 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp @@ -19,8 +19,10 @@ namespace Tegra::Engines { /// First register id that is actually a Macro call. constexpr u32 MacroRegistersStart = 0xE00; -Maxwell3D::Maxwell3D(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager) - : memory_manager(memory_manager), rasterizer{rasterizer}, macro_interpreter(*this) { +Maxwell3D::Maxwell3D(Core::System& system, VideoCore::RasterizerInterface& rasterizer, + MemoryManager& memory_manager) + : memory_manager(memory_manager), system{system}, rasterizer{rasterizer}, + macro_interpreter(*this) { InitializeRegisterDefaults(); } @@ -103,7 +105,7 @@ void Maxwell3D::CallMacroMethod(u32 method, std::vector<u32> parameters) { } void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) { - auto debug_context = Core::System::GetInstance().GetGPUDebugContext(); + auto debug_context = system.GetGPUDebugContext(); // It is an error to write to a register other than the current macro's ARG register before it // has finished execution. @@ -317,7 +319,7 @@ void Maxwell3D::ProcessQueryGet() { LongQueryResult query_result{}; query_result.value = result; // TODO(Subv): Generate a real GPU timestamp and write it here instead of CoreTiming - query_result.timestamp = Core::Timing::GetTicks(); + query_result.timestamp = system.CoreTiming().GetTicks(); Memory::WriteBlock(*address, &query_result, sizeof(query_result)); } dirty_flags.OnMemoryWrite(); @@ -334,7 +336,7 @@ void Maxwell3D::DrawArrays() { regs.vertex_buffer.count); ASSERT_MSG(!(regs.index_array.count && regs.vertex_buffer.count), "Both indexed and direct?"); - auto debug_context = Core::System::GetInstance().GetGPUDebugContext(); + auto debug_context = system.GetGPUDebugContext(); if (debug_context) { debug_context->OnEvent(Tegra::DebugContext::Event::IncomingPrimitiveBatch, nullptr); diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h index 1f76aa670..0e3873ffd 100644 --- a/src/video_core/engines/maxwell_3d.h +++ b/src/video_core/engines/maxwell_3d.h @@ -17,6 +17,10 @@ #include "video_core/memory_manager.h" #include "video_core/textures/texture.h" +namespace Core { +class System; +} + namespace VideoCore { class RasterizerInterface; } @@ -28,7 +32,8 @@ namespace Tegra::Engines { class Maxwell3D final { public: - explicit Maxwell3D(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager); + explicit Maxwell3D(Core::System& system, VideoCore::RasterizerInterface& rasterizer, + MemoryManager& memory_manager); ~Maxwell3D() = default; /// Register structure of the Maxwell3D engine. @@ -1131,6 +1136,8 @@ public: private: void InitializeRegisterDefaults(); + Core::System& system; + VideoCore::RasterizerInterface& rasterizer; /// Start offsets of each macro in macro_memory diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp index d6c41a5ae..529a14ec7 100644 --- a/src/video_core/engines/maxwell_dma.cpp +++ b/src/video_core/engines/maxwell_dma.cpp @@ -2,6 +2,7 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include "common/assert.h" #include "core/core.h" #include "core/memory.h" #include "video_core/engines/maxwell_3d.h" @@ -11,8 +12,9 @@ namespace Tegra::Engines { -MaxwellDMA::MaxwellDMA(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager) - : memory_manager(memory_manager), rasterizer{rasterizer} {} +MaxwellDMA::MaxwellDMA(Core::System& system, VideoCore::RasterizerInterface& rasterizer, + MemoryManager& memory_manager) + : memory_manager(memory_manager), system{system}, rasterizer{rasterizer} {} void MaxwellDMA::CallMethod(const GPU::MethodCall& method_call) { ASSERT_MSG(method_call.method < Regs::NUM_REGS, @@ -59,7 +61,7 @@ void MaxwellDMA::HandleCopy() { } // All copies here update the main memory, so mark all rasterizer states as invalid. - Core::System::GetInstance().GPU().Maxwell3D().dirty_flags.OnMemoryWrite(); + system.GPU().Maxwell3D().dirty_flags.OnMemoryWrite(); if (regs.exec.is_dst_linear && regs.exec.is_src_linear) { // When the enable_2d bit is disabled, the copy is performed as if we were copying a 1D diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h index 1f8cd65d2..cf75aeb12 100644 --- a/src/video_core/engines/maxwell_dma.h +++ b/src/video_core/engines/maxwell_dma.h @@ -5,13 +5,16 @@ #pragma once #include <array> -#include "common/assert.h" #include "common/bit_field.h" #include "common/common_funcs.h" #include "common/common_types.h" #include "video_core/gpu.h" #include "video_core/memory_manager.h" +namespace Core { +class System; +} + namespace VideoCore { class RasterizerInterface; } @@ -20,7 +23,8 @@ namespace Tegra::Engines { class MaxwellDMA final { public: - explicit MaxwellDMA(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager); + explicit MaxwellDMA(Core::System& system, VideoCore::RasterizerInterface& rasterizer, + MemoryManager& memory_manager); ~MaxwellDMA() = default; /// Write the value to the register identified by method. @@ -137,6 +141,8 @@ public: MemoryManager& memory_manager; private: + Core::System& system; + VideoCore::RasterizerInterface& rasterizer; /// Performs the copy from the source buffer to the destination buffer as configured in the diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 3d00c308b..ac30d1a89 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -3,6 +3,7 @@ // Refer to the license.txt file included. #include "common/assert.h" +#include "core/core.h" #include "core/core_timing.h" #include "core/memory.h" #include "video_core/engines/fermi_2d.h" @@ -27,14 +28,14 @@ u32 FramebufferConfig::BytesPerPixel(PixelFormat format) { UNREACHABLE(); } -GPU::GPU(VideoCore::RasterizerInterface& rasterizer) { +GPU::GPU(Core::System& system, VideoCore::RasterizerInterface& rasterizer) { memory_manager = std::make_unique<Tegra::MemoryManager>(); dma_pusher = std::make_unique<Tegra::DmaPusher>(*this); - maxwell_3d = std::make_unique<Engines::Maxwell3D>(rasterizer, *memory_manager); + maxwell_3d = std::make_unique<Engines::Maxwell3D>(system, rasterizer, *memory_manager); fermi_2d = std::make_unique<Engines::Fermi2D>(rasterizer, *memory_manager); kepler_compute = std::make_unique<Engines::KeplerCompute>(*memory_manager); - maxwell_dma = std::make_unique<Engines::MaxwellDMA>(rasterizer, *memory_manager); - kepler_memory = std::make_unique<Engines::KeplerMemory>(rasterizer, *memory_manager); + maxwell_dma = std::make_unique<Engines::MaxwellDMA>(system, rasterizer, *memory_manager); + kepler_memory = std::make_unique<Engines::KeplerMemory>(system, rasterizer, *memory_manager); } GPU::~GPU() = default; @@ -283,7 +284,7 @@ void GPU::ProcessSemaphoreTriggerMethod() { block.sequence = regs.semaphore_sequence; // TODO(Kmather73): Generate a real GPU timestamp and write it here instead of // CoreTiming - block.timestamp = Core::Timing::GetTicks(); + block.timestamp = Core::System::GetInstance().CoreTiming().GetTicks(); Memory::WriteBlock(*address, &block, sizeof(block)); } else { const auto address = diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index a482196ea..0f5bfdcbf 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h @@ -6,12 +6,15 @@ #include <array> #include <memory> -#include <vector> #include "common/common_types.h" #include "core/hle/service/nvflinger/buffer_queue.h" #include "video_core/dma_pusher.h" #include "video_core/memory_manager.h" +namespace Core { +class System; +} + namespace VideoCore { class RasterizerInterface; } @@ -118,7 +121,7 @@ enum class EngineID { class GPU final { public: - explicit GPU(VideoCore::RasterizerInterface& rasterizer); + explicit GPU(Core::System& system, VideoCore::RasterizerInterface& rasterizer); ~GPU(); struct MethodCall { diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index 59f671048..e6d47ce41 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp @@ -423,7 +423,7 @@ void SwizzleFunc(const MortonSwizzleMode& mode, const SurfaceParams& params, for (u32 i = 0; i < params.depth; i++) { MortonSwizzle(mode, params.pixel_format, params.MipWidth(mip_level), params.MipBlockHeight(mip_level), params.MipHeight(mip_level), - params.MipBlockDepth(mip_level), params.tile_width_spacing, 1, + params.MipBlockDepth(mip_level), 1, params.tile_width_spacing, gl_buffer.data() + offset_gl, gl_size, params.addr + offset); offset += layer_size; offset_gl += gl_size; @@ -1257,7 +1257,11 @@ Surface RasterizerCacheOpenGL::RecreateSurface(const Surface& old_surface, case SurfaceTarget::TextureCubemap: case SurfaceTarget::Texture2DArray: case SurfaceTarget::TextureCubeArray: - FastLayeredCopySurface(old_surface, new_surface); + if (old_params.pixel_format == new_params.pixel_format) + FastLayeredCopySurface(old_surface, new_surface); + else { + AccurateCopySurface(old_surface, new_surface); + } break; default: LOG_CRITICAL(Render_OpenGL, "Unimplemented surface target={}", diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h index b81882d04..89d733c50 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.h @@ -36,7 +36,6 @@ using PixelFormat = VideoCore::Surface::PixelFormat; using ComponentType = VideoCore::Surface::ComponentType; struct SurfaceParams { - enum class SurfaceClass { Uploaded, RenderTarget, @@ -169,20 +168,27 @@ struct SurfaceParams { } u32 MipBlockDepth(u32 mip_level) const { - if (mip_level == 0) + if (mip_level == 0) { return block_depth; - if (is_layered) + } + + if (is_layered) { return 1; - u32 depth = MipDepth(mip_level); + } + + const u32 mip_depth = MipDepth(mip_level); u32 bd = 32; - while (bd > 1 && depth * 2 <= bd) { + while (bd > 1 && mip_depth * 2 <= bd) { bd >>= 1; } + if (bd == 32) { - u32 bh = MipBlockHeight(mip_level); - if (bh >= 4) + const u32 bh = MipBlockHeight(mip_level); + if (bh >= 4) { return 16; + } } + return bd; } diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp index f4140624e..72ff6ac6a 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp @@ -606,17 +606,8 @@ private: std::string VisitOperand(Operation operation, std::size_t operand_index, Type type) { std::string value = VisitOperand(operation, operand_index); - switch (type) { - case Type::Bool: - case Type::Bool2: - case Type::Float: - return value; - case Type::Int: - return "ftoi(" + value + ')'; - case Type::Uint: - return "ftou(" + value + ')'; - case Type::HalfFloat: + case Type::HalfFloat: { const auto half_meta = std::get_if<MetaHalfArithmetic>(&operation.GetMeta()); if (!half_meta) { value = "toHalf2(" + value + ')'; @@ -633,6 +624,26 @@ private: return "vec2(toHalf2(" + value + ")[1])"; } } + default: + return CastOperand(value, type); + } + } + + std::string CastOperand(const std::string& value, Type type) const { + switch (type) { + case Type::Bool: + case Type::Bool2: + case Type::Float: + return value; + case Type::Int: + return "ftoi(" + value + ')'; + case Type::Uint: + return "ftou(" + value + ')'; + case Type::HalfFloat: + // Can't be handled as a stand-alone value + UNREACHABLE(); + return value; + } UNREACHABLE(); return value; } @@ -640,6 +651,7 @@ private: std::string BitwiseCastResult(std::string value, Type type, bool needs_parenthesis = false) { switch (type) { case Type::Bool: + case Type::Bool2: case Type::Float: if (needs_parenthesis) { return '(' + value + ')'; @@ -711,7 +723,7 @@ private: const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); ASSERT(meta); - const auto count = static_cast<u32>(operation.GetOperandsCount()); + const std::size_t count = operation.GetOperandsCount(); const bool has_array = meta->sampler.IsArray(); const bool has_shadow = meta->sampler.IsShadow(); @@ -722,10 +734,10 @@ private: expr += coord_constructors.at(count + (has_array ? 1 : 0) + (has_shadow ? 1 : 0) - 1); expr += '('; - for (u32 i = 0; i < count; ++i) { + for (std::size_t i = 0; i < count; ++i) { expr += Visit(operation[i]); - const u32 next = i + 1; + const std::size_t next = i + 1; if (next < count || has_array || has_shadow) expr += ", "; } @@ -1196,25 +1208,26 @@ private: const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); ASSERT(meta); UNIMPLEMENTED_IF(meta->sampler.IsArray()); - UNIMPLEMENTED_IF(!meta->extras.empty()); - - const auto count = static_cast<u32>(operation.GetOperandsCount()); + const std::size_t count = operation.GetOperandsCount(); std::string expr = "texelFetch("; expr += GetSampler(meta->sampler); expr += ", "; - expr += constructors.at(count - 1); + expr += constructors.at(operation.GetOperandsCount() - 1); expr += '('; - for (u32 i = 0; i < count; ++i) { + for (std::size_t i = 0; i < count; ++i) { expr += VisitOperand(operation, i, Type::Int); - - const u32 next = i + 1; + const std::size_t next = i + 1; if (next == count) expr += ')'; - if (next < count) + else if (next < count) expr += ", "; } + for (std::size_t i = 0; i < meta->extras.size(); ++i) { + expr += ", "; + expr += CastOperand(Visit(meta->extras.at(i)), Type::Int); + } expr += ')'; return expr + GetSwizzle(meta->element); diff --git a/src/video_core/renderer_opengl/gl_state.cpp b/src/video_core/renderer_opengl/gl_state.cpp index 81af803bc..219f08053 100644 --- a/src/video_core/renderer_opengl/gl_state.cpp +++ b/src/video_core/renderer_opengl/gl_state.cpp @@ -11,7 +11,9 @@ namespace OpenGL { OpenGLState OpenGLState::cur_state; + bool OpenGLState::s_rgb_used; + OpenGLState::OpenGLState() { // These all match default OpenGL values geometry_shaders.enabled = false; @@ -112,7 +114,6 @@ void OpenGLState::ApplyDefaultState() { } void OpenGLState::ApplySRgb() const { - // sRGB if (framebuffer_srgb.enabled != cur_state.framebuffer_srgb.enabled) { if (framebuffer_srgb.enabled) { // Track if sRGB is used @@ -125,23 +126,20 @@ void OpenGLState::ApplySRgb() const { } void OpenGLState::ApplyCulling() const { - // Culling - const bool cull_changed = cull.enabled != cur_state.cull.enabled; - if (cull_changed) { + if (cull.enabled != cur_state.cull.enabled) { if (cull.enabled) { glEnable(GL_CULL_FACE); } else { glDisable(GL_CULL_FACE); } } - if (cull.enabled) { - if (cull_changed || cull.mode != cur_state.cull.mode) { - glCullFace(cull.mode); - } - if (cull_changed || cull.front_face != cur_state.cull.front_face) { - glFrontFace(cull.front_face); - } + if (cull.mode != cur_state.cull.mode) { + glCullFace(cull.mode); + } + + if (cull.front_face != cur_state.cull.front_face) { + glFrontFace(cull.front_face); } } @@ -172,72 +170,63 @@ void OpenGLState::ApplyColorMask() const { } void OpenGLState::ApplyDepth() const { - // Depth test - const bool depth_test_changed = depth.test_enabled != cur_state.depth.test_enabled; - if (depth_test_changed) { + if (depth.test_enabled != cur_state.depth.test_enabled) { if (depth.test_enabled) { glEnable(GL_DEPTH_TEST); } else { glDisable(GL_DEPTH_TEST); } } - if (depth.test_enabled && - (depth_test_changed || depth.test_func != cur_state.depth.test_func)) { + + if (depth.test_func != cur_state.depth.test_func) { glDepthFunc(depth.test_func); } - // Depth mask + if (depth.write_mask != cur_state.depth.write_mask) { glDepthMask(depth.write_mask); } } void OpenGLState::ApplyPrimitiveRestart() const { - const bool primitive_restart_changed = - primitive_restart.enabled != cur_state.primitive_restart.enabled; - if (primitive_restart_changed) { + if (primitive_restart.enabled != cur_state.primitive_restart.enabled) { if (primitive_restart.enabled) { glEnable(GL_PRIMITIVE_RESTART); } else { glDisable(GL_PRIMITIVE_RESTART); } } - if (primitive_restart_changed || - (primitive_restart.enabled && - primitive_restart.index != cur_state.primitive_restart.index)) { + + if (primitive_restart.index != cur_state.primitive_restart.index) { glPrimitiveRestartIndex(primitive_restart.index); } } void OpenGLState::ApplyStencilTest() const { - const bool stencil_test_changed = stencil.test_enabled != cur_state.stencil.test_enabled; - if (stencil_test_changed) { + if (stencil.test_enabled != cur_state.stencil.test_enabled) { if (stencil.test_enabled) { glEnable(GL_STENCIL_TEST); } else { glDisable(GL_STENCIL_TEST); } } - if (stencil.test_enabled) { - auto config_stencil = [stencil_test_changed](GLenum face, const auto& config, - const auto& prev_config) { - if (stencil_test_changed || config.test_func != prev_config.test_func || - config.test_ref != prev_config.test_ref || - config.test_mask != prev_config.test_mask) { - glStencilFuncSeparate(face, config.test_func, config.test_ref, config.test_mask); - } - if (stencil_test_changed || config.action_depth_fail != prev_config.action_depth_fail || - config.action_depth_pass != prev_config.action_depth_pass || - config.action_stencil_fail != prev_config.action_stencil_fail) { - glStencilOpSeparate(face, config.action_stencil_fail, config.action_depth_fail, - config.action_depth_pass); - } - if (config.write_mask != prev_config.write_mask) { - glStencilMaskSeparate(face, config.write_mask); - } - }; - config_stencil(GL_FRONT, stencil.front, cur_state.stencil.front); - config_stencil(GL_BACK, stencil.back, cur_state.stencil.back); - } + + const auto ConfigStencil = [](GLenum face, const auto& config, const auto& prev_config) { + if (config.test_func != prev_config.test_func || config.test_ref != prev_config.test_ref || + config.test_mask != prev_config.test_mask) { + glStencilFuncSeparate(face, config.test_func, config.test_ref, config.test_mask); + } + if (config.action_depth_fail != prev_config.action_depth_fail || + config.action_depth_pass != prev_config.action_depth_pass || + config.action_stencil_fail != prev_config.action_stencil_fail) { + glStencilOpSeparate(face, config.action_stencil_fail, config.action_depth_fail, + config.action_depth_pass); + } + if (config.write_mask != prev_config.write_mask) { + glStencilMaskSeparate(face, config.write_mask); + } + }; + ConfigStencil(GL_FRONT, stencil.front, cur_state.stencil.front); + ConfigStencil(GL_BACK, stencil.back, cur_state.stencil.back); } // Viewport does not affects glClearBuffer so emulate viewport using scissor test void OpenGLState::EmulateViewportWithScissor() { @@ -278,19 +267,18 @@ void OpenGLState::ApplyViewport() const { updated.depth_range_far != current.depth_range_far) { glDepthRangeIndexed(i, updated.depth_range_near, updated.depth_range_far); } - const bool scissor_changed = updated.scissor.enabled != current.scissor.enabled; - if (scissor_changed) { + + if (updated.scissor.enabled != current.scissor.enabled) { if (updated.scissor.enabled) { glEnablei(GL_SCISSOR_TEST, i); } else { glDisablei(GL_SCISSOR_TEST, i); } } - if (updated.scissor.enabled && - (scissor_changed || updated.scissor.x != current.scissor.x || - updated.scissor.y != current.scissor.y || - updated.scissor.width != current.scissor.width || - updated.scissor.height != current.scissor.height)) { + + if (updated.scissor.x != current.scissor.x || updated.scissor.y != current.scissor.y || + updated.scissor.width != current.scissor.width || + updated.scissor.height != current.scissor.height) { glScissorIndexed(i, updated.scissor.x, updated.scissor.y, updated.scissor.width, updated.scissor.height); } @@ -302,22 +290,23 @@ void OpenGLState::ApplyViewport() const { updated.height != current.height) { glViewport(updated.x, updated.y, updated.width, updated.height); } + if (updated.depth_range_near != current.depth_range_near || updated.depth_range_far != current.depth_range_far) { glDepthRange(updated.depth_range_near, updated.depth_range_far); } - const bool scissor_changed = updated.scissor.enabled != current.scissor.enabled; - if (scissor_changed) { + + if (updated.scissor.enabled != current.scissor.enabled) { if (updated.scissor.enabled) { glEnable(GL_SCISSOR_TEST); } else { glDisable(GL_SCISSOR_TEST); } } - if (updated.scissor.enabled && (scissor_changed || updated.scissor.x != current.scissor.x || - updated.scissor.y != current.scissor.y || - updated.scissor.width != current.scissor.width || - updated.scissor.height != current.scissor.height)) { + + if (updated.scissor.x != current.scissor.x || updated.scissor.y != current.scissor.y || + updated.scissor.width != current.scissor.width || + updated.scissor.height != current.scissor.height) { glScissor(updated.scissor.x, updated.scissor.y, updated.scissor.width, updated.scissor.height); } @@ -327,8 +316,7 @@ void OpenGLState::ApplyViewport() const { void OpenGLState::ApplyGlobalBlending() const { const Blend& current = cur_state.blend[0]; const Blend& updated = blend[0]; - const bool blend_changed = updated.enabled != current.enabled; - if (blend_changed) { + if (updated.enabled != current.enabled) { if (updated.enabled) { glEnable(GL_BLEND); } else { @@ -338,15 +326,14 @@ void OpenGLState::ApplyGlobalBlending() const { if (!updated.enabled) { return; } - if (blend_changed || updated.src_rgb_func != current.src_rgb_func || + if (updated.src_rgb_func != current.src_rgb_func || updated.dst_rgb_func != current.dst_rgb_func || updated.src_a_func != current.src_a_func || updated.dst_a_func != current.dst_a_func) { glBlendFuncSeparate(updated.src_rgb_func, updated.dst_rgb_func, updated.src_a_func, updated.dst_a_func); } - if (blend_changed || updated.rgb_equation != current.rgb_equation || - updated.a_equation != current.a_equation) { + if (updated.rgb_equation != current.rgb_equation || updated.a_equation != current.a_equation) { glBlendEquationSeparate(updated.rgb_equation, updated.a_equation); } } @@ -354,26 +341,22 @@ void OpenGLState::ApplyGlobalBlending() const { void OpenGLState::ApplyTargetBlending(std::size_t target, bool force) const { const Blend& updated = blend[target]; const Blend& current = cur_state.blend[target]; - const bool blend_changed = updated.enabled != current.enabled || force; - if (blend_changed) { + if (updated.enabled != current.enabled || force) { if (updated.enabled) { glEnablei(GL_BLEND, static_cast<GLuint>(target)); } else { glDisablei(GL_BLEND, static_cast<GLuint>(target)); } } - if (!updated.enabled) { - return; - } - if (blend_changed || updated.src_rgb_func != current.src_rgb_func || + + if (updated.src_rgb_func != current.src_rgb_func || updated.dst_rgb_func != current.dst_rgb_func || updated.src_a_func != current.src_a_func || updated.dst_a_func != current.dst_a_func) { glBlendFuncSeparatei(static_cast<GLuint>(target), updated.src_rgb_func, updated.dst_rgb_func, updated.src_a_func, updated.dst_a_func); } - if (blend_changed || updated.rgb_equation != current.rgb_equation || - updated.a_equation != current.a_equation) { + if (updated.rgb_equation != current.rgb_equation || updated.a_equation != current.a_equation) { glBlendEquationSeparatei(static_cast<GLuint>(target), updated.rgb_equation, updated.a_equation); } @@ -397,8 +380,7 @@ void OpenGLState::ApplyBlending() const { } void OpenGLState::ApplyLogicOp() const { - const bool logic_op_changed = logic_op.enabled != cur_state.logic_op.enabled; - if (logic_op_changed) { + if (logic_op.enabled != cur_state.logic_op.enabled) { if (logic_op.enabled) { glEnable(GL_COLOR_LOGIC_OP); } else { @@ -406,14 +388,12 @@ void OpenGLState::ApplyLogicOp() const { } } - if (logic_op.enabled && - (logic_op_changed || logic_op.operation != cur_state.logic_op.operation)) { + if (logic_op.operation != cur_state.logic_op.operation) { glLogicOp(logic_op.operation); } } void OpenGLState::ApplyPolygonOffset() const { - const bool fill_enable_changed = polygon_offset.fill_enable != cur_state.polygon_offset.fill_enable; const bool line_enable_changed = @@ -448,9 +428,7 @@ void OpenGLState::ApplyPolygonOffset() const { } } - if ((polygon_offset.fill_enable || polygon_offset.line_enable || polygon_offset.point_enable) && - (factor_changed || units_changed || clamp_changed)) { - + if (factor_changed || units_changed || clamp_changed) { if (GLAD_GL_EXT_polygon_offset_clamp && polygon_offset.clamp != 0) { glPolygonOffsetClamp(polygon_offset.factor, polygon_offset.units, polygon_offset.clamp); } else { @@ -528,9 +506,9 @@ void OpenGLState::ApplyDepthClamp() const { depth_clamp.near_plane == cur_state.depth_clamp.near_plane) { return; } - if (depth_clamp.far_plane != depth_clamp.near_plane) { - UNIMPLEMENTED_MSG("Unimplemented Depth Clamp Separation!"); - } + UNIMPLEMENTED_IF_MSG(depth_clamp.far_plane != depth_clamp.near_plane, + "Unimplemented Depth Clamp Separation!"); + if (depth_clamp.far_plane || depth_clamp.near_plane) { glEnable(GL_DEPTH_CLAMP); } else { diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp index cca2ed708..272fc2e8e 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.cpp +++ b/src/video_core/renderer_opengl/renderer_opengl.cpp @@ -137,7 +137,7 @@ void RendererOpenGL::SwapBuffers( render_window.PollEvents(); - system.FrameLimiter().DoFrameLimiting(Core::Timing::GetGlobalTimeUs()); + system.FrameLimiter().DoFrameLimiting(system.CoreTiming().GetGlobalTimeUs()); system.GetPerfStats().BeginSystemFrame(); // Restore the rasterizer state @@ -380,7 +380,8 @@ void RendererOpenGL::CaptureScreenshot() { GLuint renderbuffer; glGenRenderbuffers(1, &renderbuffer); glBindRenderbuffer(GL_RENDERBUFFER, renderbuffer); - glRenderbufferStorage(GL_RENDERBUFFER, GL_RGB8, layout.width, layout.height); + glRenderbufferStorage(GL_RENDERBUFFER, state.GetsRGBUsed() ? GL_SRGB8 : GL_RGB8, layout.width, + layout.height); glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, renderbuffer); DrawScreen(layout); diff --git a/src/video_core/renderer_vulkan/declarations.h b/src/video_core/renderer_vulkan/declarations.h new file mode 100644 index 000000000..ba25b5bc7 --- /dev/null +++ b/src/video_core/renderer_vulkan/declarations.h @@ -0,0 +1,45 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <vulkan/vulkan.hpp> + +namespace Vulkan { + +// vulkan.hpp unique handlers use DispatchLoaderStatic +template <typename T> +using UniqueHandle = vk::UniqueHandle<T, vk::DispatchLoaderDynamic>; + +using UniqueAccelerationStructureNV = UniqueHandle<vk::AccelerationStructureNV>; +using UniqueBuffer = UniqueHandle<vk::Buffer>; +using UniqueBufferView = UniqueHandle<vk::BufferView>; +using UniqueCommandBuffer = UniqueHandle<vk::CommandBuffer>; +using UniqueCommandPool = UniqueHandle<vk::CommandPool>; +using UniqueDescriptorPool = UniqueHandle<vk::DescriptorPool>; +using UniqueDescriptorSet = UniqueHandle<vk::DescriptorSet>; +using UniqueDescriptorSetLayout = UniqueHandle<vk::DescriptorSetLayout>; +using UniqueDescriptorUpdateTemplate = UniqueHandle<vk::DescriptorUpdateTemplate>; +using UniqueDevice = UniqueHandle<vk::Device>; +using UniqueDeviceMemory = UniqueHandle<vk::DeviceMemory>; +using UniqueEvent = UniqueHandle<vk::Event>; +using UniqueFence = UniqueHandle<vk::Fence>; +using UniqueFramebuffer = UniqueHandle<vk::Framebuffer>; +using UniqueImage = UniqueHandle<vk::Image>; +using UniqueImageView = UniqueHandle<vk::ImageView>; +using UniqueIndirectCommandsLayoutNVX = UniqueHandle<vk::IndirectCommandsLayoutNVX>; +using UniqueObjectTableNVX = UniqueHandle<vk::ObjectTableNVX>; +using UniquePipeline = UniqueHandle<vk::Pipeline>; +using UniquePipelineCache = UniqueHandle<vk::PipelineCache>; +using UniquePipelineLayout = UniqueHandle<vk::PipelineLayout>; +using UniqueQueryPool = UniqueHandle<vk::QueryPool>; +using UniqueRenderPass = UniqueHandle<vk::RenderPass>; +using UniqueSampler = UniqueHandle<vk::Sampler>; +using UniqueSamplerYcbcrConversion = UniqueHandle<vk::SamplerYcbcrConversion>; +using UniqueSemaphore = UniqueHandle<vk::Semaphore>; +using UniqueShaderModule = UniqueHandle<vk::ShaderModule>; +using UniqueSwapchainKHR = UniqueHandle<vk::SwapchainKHR>; +using UniqueValidationCacheEXT = UniqueHandle<vk::ValidationCacheEXT>; + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp new file mode 100644 index 000000000..78a4e5f0e --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_device.cpp @@ -0,0 +1,231 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <map> +#include <optional> +#include <set> +#include <vector> +#include "common/assert.h" +#include "video_core/renderer_vulkan/declarations.h" +#include "video_core/renderer_vulkan/vk_device.h" + +namespace Vulkan { + +namespace Alternatives { + +constexpr std::array<vk::Format, 3> Depth24UnormS8Uint = { + vk::Format::eD32SfloatS8Uint, vk::Format::eD16UnormS8Uint, {}}; +constexpr std::array<vk::Format, 3> Depth16UnormS8Uint = { + vk::Format::eD24UnormS8Uint, vk::Format::eD32SfloatS8Uint, {}}; + +} // namespace Alternatives + +constexpr const vk::Format* GetFormatAlternatives(vk::Format format) { + switch (format) { + case vk::Format::eD24UnormS8Uint: + return Alternatives::Depth24UnormS8Uint.data(); + case vk::Format::eD16UnormS8Uint: + return Alternatives::Depth16UnormS8Uint.data(); + default: + return nullptr; + } +} + +constexpr vk::FormatFeatureFlags GetFormatFeatures(vk::FormatProperties properties, + FormatType format_type) { + switch (format_type) { + case FormatType::Linear: + return properties.linearTilingFeatures; + case FormatType::Optimal: + return properties.optimalTilingFeatures; + case FormatType::Buffer: + return properties.bufferFeatures; + default: + return {}; + } +} + +VKDevice::VKDevice(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical, + vk::SurfaceKHR surface) + : physical{physical}, format_properties{GetFormatProperties(dldi, physical)} { + SetupFamilies(dldi, surface); + SetupProperties(dldi); +} + +VKDevice::~VKDevice() = default; + +bool VKDevice::Create(const vk::DispatchLoaderDynamic& dldi, vk::Instance instance) { + const auto queue_cis = GetDeviceQueueCreateInfos(); + vk::PhysicalDeviceFeatures device_features{}; + + const std::vector<const char*> extensions = {VK_KHR_SWAPCHAIN_EXTENSION_NAME}; + const vk::DeviceCreateInfo device_ci({}, static_cast<u32>(queue_cis.size()), queue_cis.data(), + 0, nullptr, static_cast<u32>(extensions.size()), + extensions.data(), &device_features); + vk::Device dummy_logical; + if (physical.createDevice(&device_ci, nullptr, &dummy_logical, dldi) != vk::Result::eSuccess) { + LOG_CRITICAL(Render_Vulkan, "Logical device failed to be created!"); + return false; + } + + dld.init(instance, dldi.vkGetInstanceProcAddr, dummy_logical, dldi.vkGetDeviceProcAddr); + logical = UniqueDevice( + dummy_logical, vk::ObjectDestroy<vk::NoParent, vk::DispatchLoaderDynamic>(nullptr, dld)); + + graphics_queue = logical->getQueue(graphics_family, 0, dld); + present_queue = logical->getQueue(present_family, 0, dld); + return true; +} + +vk::Format VKDevice::GetSupportedFormat(vk::Format wanted_format, + vk::FormatFeatureFlags wanted_usage, + FormatType format_type) const { + if (IsFormatSupported(wanted_format, wanted_usage, format_type)) { + return wanted_format; + } + // The wanted format is not supported by hardware, search for alternatives + const vk::Format* alternatives = GetFormatAlternatives(wanted_format); + if (alternatives == nullptr) { + LOG_CRITICAL(Render_Vulkan, + "Format={} with usage={} and type={} has no defined alternatives and host " + "hardware does not support it", + static_cast<u32>(wanted_format), static_cast<u32>(wanted_usage), + static_cast<u32>(format_type)); + UNREACHABLE(); + return wanted_format; + } + + std::size_t i = 0; + for (vk::Format alternative = alternatives[0]; alternative != vk::Format{}; + alternative = alternatives[++i]) { + if (!IsFormatSupported(alternative, wanted_usage, format_type)) + continue; + LOG_WARNING(Render_Vulkan, + "Emulating format={} with alternative format={} with usage={} and type={}", + static_cast<u32>(wanted_format), static_cast<u32>(alternative), + static_cast<u32>(wanted_usage), static_cast<u32>(format_type)); + return alternative; + } + + // No alternatives found, panic + LOG_CRITICAL(Render_Vulkan, + "Format={} with usage={} and type={} is not supported by the host hardware and " + "doesn't support any of the alternatives", + static_cast<u32>(wanted_format), static_cast<u32>(wanted_usage), + static_cast<u32>(format_type)); + UNREACHABLE(); + return wanted_format; +} + +bool VKDevice::IsFormatSupported(vk::Format wanted_format, vk::FormatFeatureFlags wanted_usage, + FormatType format_type) const { + const auto it = format_properties.find(wanted_format); + if (it == format_properties.end()) { + LOG_CRITICAL(Render_Vulkan, "Unimplemented format query={}", + static_cast<u32>(wanted_format)); + UNREACHABLE(); + return true; + } + const vk::FormatFeatureFlags supported_usage = GetFormatFeatures(it->second, format_type); + return (supported_usage & wanted_usage) == wanted_usage; +} + +bool VKDevice::IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical, + vk::SurfaceKHR surface) { + const std::string swapchain_extension = VK_KHR_SWAPCHAIN_EXTENSION_NAME; + + bool has_swapchain{}; + for (const auto& prop : physical.enumerateDeviceExtensionProperties(nullptr, dldi)) { + has_swapchain |= prop.extensionName == swapchain_extension; + } + if (!has_swapchain) { + // The device doesn't support creating swapchains. + return false; + } + + bool has_graphics{}, has_present{}; + const auto queue_family_properties = physical.getQueueFamilyProperties(dldi); + for (u32 i = 0; i < static_cast<u32>(queue_family_properties.size()); ++i) { + const auto& family = queue_family_properties[i]; + if (family.queueCount == 0) + continue; + + has_graphics |= + (family.queueFlags & vk::QueueFlagBits::eGraphics) != static_cast<vk::QueueFlagBits>(0); + has_present |= physical.getSurfaceSupportKHR(i, surface, dldi) != 0; + } + if (!has_graphics || !has_present) { + // The device doesn't have a graphics and present queue. + return false; + } + + // TODO(Rodrigo): Check if the device matches all requeriments. + const vk::PhysicalDeviceProperties props = physical.getProperties(dldi); + if (props.limits.maxUniformBufferRange < 65536) { + return false; + } + + // Device is suitable. + return true; +} + +void VKDevice::SetupFamilies(const vk::DispatchLoaderDynamic& dldi, vk::SurfaceKHR surface) { + std::optional<u32> graphics_family_, present_family_; + + const auto queue_family_properties = physical.getQueueFamilyProperties(dldi); + for (u32 i = 0; i < static_cast<u32>(queue_family_properties.size()); ++i) { + if (graphics_family_ && present_family_) + break; + + const auto& queue_family = queue_family_properties[i]; + if (queue_family.queueCount == 0) + continue; + + if (queue_family.queueFlags & vk::QueueFlagBits::eGraphics) + graphics_family_ = i; + if (physical.getSurfaceSupportKHR(i, surface, dldi)) + present_family_ = i; + } + ASSERT(graphics_family_ && present_family_); + + graphics_family = *graphics_family_; + present_family = *present_family_; +} + +void VKDevice::SetupProperties(const vk::DispatchLoaderDynamic& dldi) { + const vk::PhysicalDeviceProperties props = physical.getProperties(dldi); + device_type = props.deviceType; + uniform_buffer_alignment = static_cast<u64>(props.limits.minUniformBufferOffsetAlignment); +} + +std::vector<vk::DeviceQueueCreateInfo> VKDevice::GetDeviceQueueCreateInfos() const { + static const float QUEUE_PRIORITY = 1.f; + + std::set<u32> unique_queue_families = {graphics_family, present_family}; + std::vector<vk::DeviceQueueCreateInfo> queue_cis; + + for (u32 queue_family : unique_queue_families) + queue_cis.push_back({{}, queue_family, 1, &QUEUE_PRIORITY}); + + return queue_cis; +} + +std::map<vk::Format, vk::FormatProperties> VKDevice::GetFormatProperties( + const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical) { + std::map<vk::Format, vk::FormatProperties> format_properties; + + const auto AddFormatQuery = [&format_properties, &dldi, physical](vk::Format format) { + format_properties.emplace(format, physical.getFormatProperties(format, dldi)); + }; + AddFormatQuery(vk::Format::eA8B8G8R8UnormPack32); + AddFormatQuery(vk::Format::eR5G6B5UnormPack16); + AddFormatQuery(vk::Format::eD32Sfloat); + AddFormatQuery(vk::Format::eD16UnormS8Uint); + AddFormatQuery(vk::Format::eD24UnormS8Uint); + AddFormatQuery(vk::Format::eD32SfloatS8Uint); + + return format_properties; +} + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_device.h b/src/video_core/renderer_vulkan/vk_device.h new file mode 100644 index 000000000..e87c7a508 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_device.h @@ -0,0 +1,116 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <map> +#include <vector> +#include "common/common_types.h" +#include "video_core/renderer_vulkan/declarations.h" + +namespace Vulkan { + +/// Format usage descriptor +enum class FormatType { Linear, Optimal, Buffer }; + +/// Handles data specific to a physical device. +class VKDevice final { +public: + explicit VKDevice(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical, + vk::SurfaceKHR surface); + ~VKDevice(); + + /// Initializes the device. Returns true on success. + bool Create(const vk::DispatchLoaderDynamic& dldi, vk::Instance instance); + + /** + * Returns a format supported by the device for the passed requeriments. + * @param wanted_format The ideal format to be returned. It may not be the returned format. + * @param wanted_usage The usage that must be fulfilled even if the format is not supported. + * @param format_type Format type usage. + * @returns A format supported by the device. + */ + vk::Format GetSupportedFormat(vk::Format wanted_format, vk::FormatFeatureFlags wanted_usage, + FormatType format_type) const; + + /// Returns the dispatch loader with direct function pointers of the device + const vk::DispatchLoaderDynamic& GetDispatchLoader() const { + return dld; + } + + /// Returns the logical device + vk::Device GetLogical() const { + return logical.get(); + } + + /// Returns the physical device. + vk::PhysicalDevice GetPhysical() const { + return physical; + } + + /// Returns the main graphics queue. + vk::Queue GetGraphicsQueue() const { + return graphics_queue; + } + + /// Returns the main present queue. + vk::Queue GetPresentQueue() const { + return present_queue; + } + + /// Returns main graphics queue family index. + u32 GetGraphicsFamily() const { + return graphics_family; + } + + /// Returns main present queue family index. + u32 GetPresentFamily() const { + return present_family; + } + + /// Returns if the device is integrated with the host CPU + bool IsIntegrated() const { + return device_type == vk::PhysicalDeviceType::eIntegratedGpu; + } + + /// Returns uniform buffer alignment requeriment + u64 GetUniformBufferAlignment() const { + return uniform_buffer_alignment; + } + + /// Checks if the physical device is suitable. + static bool IsSuitable(const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical, + vk::SurfaceKHR surface); + +private: + /// Sets up queue families. + void SetupFamilies(const vk::DispatchLoaderDynamic& dldi, vk::SurfaceKHR surface); + + /// Sets up device properties. + void SetupProperties(const vk::DispatchLoaderDynamic& dldi); + + /// Returns a list of queue initialization descriptors. + std::vector<vk::DeviceQueueCreateInfo> GetDeviceQueueCreateInfos() const; + + /// Returns true if a format is supported. + bool IsFormatSupported(vk::Format wanted_format, vk::FormatFeatureFlags wanted_usage, + FormatType format_type) const; + + /// Returns the device properties for Vulkan formats. + static std::map<vk::Format, vk::FormatProperties> GetFormatProperties( + const vk::DispatchLoaderDynamic& dldi, vk::PhysicalDevice physical); + + const vk::PhysicalDevice physical; ///< Physical device + vk::DispatchLoaderDynamic dld; ///< Device function pointers + UniqueDevice logical; ///< Logical device + vk::Queue graphics_queue; ///< Main graphics queue + vk::Queue present_queue; ///< Main present queue + u32 graphics_family{}; ///< Main graphics queue family index + u32 present_family{}; ///< Main present queue family index + vk::PhysicalDeviceType device_type; ///< Physical device type + u64 uniform_buffer_alignment{}; ///< Uniform buffer alignment requeriment + std::map<vk::Format, vk::FormatProperties> format_properties; ///< Format properties dictionary +}; + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.cpp b/src/video_core/renderer_vulkan/vk_memory_manager.cpp new file mode 100644 index 000000000..17ee93b91 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_memory_manager.cpp @@ -0,0 +1,252 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <algorithm> +#include <optional> +#include <tuple> +#include <vector> +#include "common/alignment.h" +#include "common/assert.h" +#include "common/common_types.h" +#include "common/logging/log.h" +#include "video_core/renderer_vulkan/declarations.h" +#include "video_core/renderer_vulkan/vk_device.h" +#include "video_core/renderer_vulkan/vk_memory_manager.h" + +namespace Vulkan { + +// TODO(Rodrigo): Fine tune this number +constexpr u64 ALLOC_CHUNK_SIZE = 64 * 1024 * 1024; + +class VKMemoryAllocation final { +public: + explicit VKMemoryAllocation(const VKDevice& device, vk::DeviceMemory memory, + vk::MemoryPropertyFlags properties, u64 alloc_size, u32 type) + : device{device}, memory{memory}, properties{properties}, alloc_size{alloc_size}, + shifted_type{ShiftType(type)}, is_mappable{properties & + vk::MemoryPropertyFlagBits::eHostVisible} { + if (is_mappable) { + const auto dev = device.GetLogical(); + const auto& dld = device.GetDispatchLoader(); + base_address = static_cast<u8*>(dev.mapMemory(memory, 0, alloc_size, {}, dld)); + } + } + + ~VKMemoryAllocation() { + const auto dev = device.GetLogical(); + const auto& dld = device.GetDispatchLoader(); + if (is_mappable) + dev.unmapMemory(memory, dld); + dev.free(memory, nullptr, dld); + } + + VKMemoryCommit Commit(vk::DeviceSize commit_size, vk::DeviceSize alignment) { + auto found = TryFindFreeSection(free_iterator, alloc_size, static_cast<u64>(commit_size), + static_cast<u64>(alignment)); + if (!found) { + found = TryFindFreeSection(0, free_iterator, static_cast<u64>(commit_size), + static_cast<u64>(alignment)); + if (!found) { + // Signal out of memory, it'll try to do more allocations. + return nullptr; + } + } + u8* address = is_mappable ? base_address + *found : nullptr; + auto commit = std::make_unique<VKMemoryCommitImpl>(this, memory, address, *found, + *found + commit_size); + commits.push_back(commit.get()); + + // Last commit's address is highly probable to be free. + free_iterator = *found + commit_size; + + return commit; + } + + void Free(const VKMemoryCommitImpl* commit) { + ASSERT(commit); + const auto it = + std::find_if(commits.begin(), commits.end(), + [&](const auto& stored_commit) { return stored_commit == commit; }); + if (it == commits.end()) { + LOG_CRITICAL(Render_Vulkan, "Freeing unallocated commit!"); + UNREACHABLE(); + return; + } + commits.erase(it); + } + + /// Returns whether this allocation is compatible with the arguments. + bool IsCompatible(vk::MemoryPropertyFlags wanted_properties, u32 type_mask) const { + return (wanted_properties & properties) != vk::MemoryPropertyFlagBits(0) && + (type_mask & shifted_type) != 0; + } + +private: + static constexpr u32 ShiftType(u32 type) { + return 1U << type; + } + + /// A memory allocator, it may return a free region between "start" and "end" with the solicited + /// requeriments. + std::optional<u64> TryFindFreeSection(u64 start, u64 end, u64 size, u64 alignment) const { + u64 iterator = start; + while (iterator + size < end) { + const u64 try_left = Common::AlignUp(iterator, alignment); + const u64 try_right = try_left + size; + + bool overlap = false; + for (const auto& commit : commits) { + const auto [commit_left, commit_right] = commit->interval; + if (try_left < commit_right && commit_left < try_right) { + // There's an overlap, continue the search where the overlapping commit ends. + iterator = commit_right; + overlap = true; + break; + } + } + if (!overlap) { + // A free address has been found. + return try_left; + } + } + // No free regions where found, return an empty optional. + return std::nullopt; + } + + const VKDevice& device; ///< Vulkan device. + const vk::DeviceMemory memory; ///< Vulkan memory allocation handler. + const vk::MemoryPropertyFlags properties; ///< Vulkan properties. + const u64 alloc_size; ///< Size of this allocation. + const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted. + const bool is_mappable; ///< Whether the allocation is mappable. + + /// Base address of the mapped pointer. + u8* base_address{}; + + /// Hints where the next free region is likely going to be. + u64 free_iterator{}; + + /// Stores all commits done from this allocation. + std::vector<const VKMemoryCommitImpl*> commits; +}; + +VKMemoryManager::VKMemoryManager(const VKDevice& device) + : device{device}, props{device.GetPhysical().getMemoryProperties(device.GetDispatchLoader())}, + is_memory_unified{GetMemoryUnified(props)} {} + +VKMemoryManager::~VKMemoryManager() = default; + +VKMemoryCommit VKMemoryManager::Commit(const vk::MemoryRequirements& reqs, bool host_visible) { + ASSERT(reqs.size < ALLOC_CHUNK_SIZE); + + // When a host visible commit is asked, search for host visible and coherent, otherwise search + // for a fast device local type. + const vk::MemoryPropertyFlags wanted_properties = + host_visible + ? vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent + : vk::MemoryPropertyFlagBits::eDeviceLocal; + + const auto TryCommit = [&]() -> VKMemoryCommit { + for (auto& alloc : allocs) { + if (!alloc->IsCompatible(wanted_properties, reqs.memoryTypeBits)) + continue; + + if (auto commit = alloc->Commit(reqs.size, reqs.alignment); commit) { + return commit; + } + } + return {}; + }; + + if (auto commit = TryCommit(); commit) { + return commit; + } + + // Commit has failed, allocate more memory. + if (!AllocMemory(wanted_properties, reqs.memoryTypeBits, ALLOC_CHUNK_SIZE)) { + // TODO(Rodrigo): Try to use host memory. + LOG_CRITICAL(Render_Vulkan, "Ran out of memory!"); + UNREACHABLE(); + } + + // Commit again, this time it won't fail since there's a fresh allocation above. If it does, + // there's a bug. + auto commit = TryCommit(); + ASSERT(commit); + return commit; +} + +VKMemoryCommit VKMemoryManager::Commit(vk::Buffer buffer, bool host_visible) { + const auto dev = device.GetLogical(); + const auto& dld = device.GetDispatchLoader(); + const auto requeriments = dev.getBufferMemoryRequirements(buffer, dld); + auto commit = Commit(requeriments, host_visible); + dev.bindBufferMemory(buffer, commit->GetMemory(), commit->GetOffset(), dld); + return commit; +} + +VKMemoryCommit VKMemoryManager::Commit(vk::Image image, bool host_visible) { + const auto dev = device.GetLogical(); + const auto& dld = device.GetDispatchLoader(); + const auto requeriments = dev.getImageMemoryRequirements(image, dld); + auto commit = Commit(requeriments, host_visible); + dev.bindImageMemory(image, commit->GetMemory(), commit->GetOffset(), dld); + return commit; +} + +bool VKMemoryManager::AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 type_mask, + u64 size) { + const u32 type = [&]() { + for (u32 type_index = 0; type_index < props.memoryTypeCount; ++type_index) { + const auto flags = props.memoryTypes[type_index].propertyFlags; + if ((type_mask & (1U << type_index)) && (flags & wanted_properties)) { + // The type matches in type and in the wanted properties. + return type_index; + } + } + LOG_CRITICAL(Render_Vulkan, "Couldn't find a compatible memory type!"); + UNREACHABLE(); + return 0u; + }(); + + const auto dev = device.GetLogical(); + const auto& dld = device.GetDispatchLoader(); + + // Try to allocate found type. + const vk::MemoryAllocateInfo memory_ai(size, type); + vk::DeviceMemory memory; + if (const vk::Result res = dev.allocateMemory(&memory_ai, nullptr, &memory, dld); + res != vk::Result::eSuccess) { + LOG_CRITICAL(Render_Vulkan, "Device allocation failed with code {}!", vk::to_string(res)); + return false; + } + allocs.push_back( + std::make_unique<VKMemoryAllocation>(device, memory, wanted_properties, size, type)); + return true; +} + +/*static*/ bool VKMemoryManager::GetMemoryUnified(const vk::PhysicalDeviceMemoryProperties& props) { + for (u32 heap_index = 0; heap_index < props.memoryHeapCount; ++heap_index) { + if (!(props.memoryHeaps[heap_index].flags & vk::MemoryHeapFlagBits::eDeviceLocal)) { + // Memory is considered unified when heaps are device local only. + return false; + } + } + return true; +} + +VKMemoryCommitImpl::VKMemoryCommitImpl(VKMemoryAllocation* allocation, vk::DeviceMemory memory, + u8* data, u64 begin, u64 end) + : allocation{allocation}, memory{memory}, data{data}, interval(std::make_pair(begin, end)) {} + +VKMemoryCommitImpl::~VKMemoryCommitImpl() { + allocation->Free(this); +} + +u8* VKMemoryCommitImpl::GetData() const { + ASSERT_MSG(data != nullptr, "Trying to access an unmapped commit."); + return data; +} + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.h b/src/video_core/renderer_vulkan/vk_memory_manager.h new file mode 100644 index 000000000..073597b35 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_memory_manager.h @@ -0,0 +1,87 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <memory> +#include <utility> +#include <vector> +#include "common/common_types.h" +#include "video_core/renderer_vulkan/declarations.h" + +namespace Vulkan { + +class VKDevice; +class VKMemoryAllocation; +class VKMemoryCommitImpl; + +using VKMemoryCommit = std::unique_ptr<VKMemoryCommitImpl>; + +class VKMemoryManager final { +public: + explicit VKMemoryManager(const VKDevice& device); + ~VKMemoryManager(); + + /** + * Commits a memory with the specified requeriments. + * @param reqs Requeriments returned from a Vulkan call. + * @param host_visible Signals the allocator that it *must* use host visible and coherent + * memory. When passing false, it will try to allocate device local memory. + * @returns A memory commit. + */ + VKMemoryCommit Commit(const vk::MemoryRequirements& reqs, bool host_visible); + + /// Commits memory required by the buffer and binds it. + VKMemoryCommit Commit(vk::Buffer buffer, bool host_visible); + + /// Commits memory required by the image and binds it. + VKMemoryCommit Commit(vk::Image image, bool host_visible); + + /// Returns true if the memory allocations are done always in host visible and coherent memory. + bool IsMemoryUnified() const { + return is_memory_unified; + } + +private: + /// Allocates a chunk of memory. + bool AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 type_mask, u64 size); + + /// Returns true if the device uses an unified memory model. + static bool GetMemoryUnified(const vk::PhysicalDeviceMemoryProperties& props); + + const VKDevice& device; ///< Device handler. + const vk::PhysicalDeviceMemoryProperties props; ///< Physical device properties. + const bool is_memory_unified; ///< True if memory model is unified. + std::vector<std::unique_ptr<VKMemoryAllocation>> allocs; ///< Current allocations. +}; + +class VKMemoryCommitImpl final { + friend VKMemoryAllocation; + +public: + explicit VKMemoryCommitImpl(VKMemoryAllocation* allocation, vk::DeviceMemory memory, u8* data, + u64 begin, u64 end); + ~VKMemoryCommitImpl(); + + /// Returns the writeable memory map. The commit has to be mappable. + u8* GetData() const; + + /// Returns the Vulkan memory handler. + vk::DeviceMemory GetMemory() const { + return memory; + } + + /// Returns the start position of the commit relative to the allocation. + vk::DeviceSize GetOffset() const { + return static_cast<vk::DeviceSize>(interval.first); + } + +private: + std::pair<u64, u64> interval{}; ///< Interval where the commit exists. + vk::DeviceMemory memory; ///< Vulkan device memory handler. + VKMemoryAllocation* allocation{}; ///< Pointer to the large memory allocation. + u8* data{}; ///< Pointer to the host mapped memory, it has the commit offset included. +}; + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.cpp b/src/video_core/renderer_vulkan/vk_resource_manager.cpp new file mode 100644 index 000000000..1678463c7 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_resource_manager.cpp @@ -0,0 +1,285 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <algorithm> +#include <optional> +#include "common/assert.h" +#include "common/logging/log.h" +#include "video_core/renderer_vulkan/declarations.h" +#include "video_core/renderer_vulkan/vk_device.h" +#include "video_core/renderer_vulkan/vk_resource_manager.h" + +namespace Vulkan { + +// TODO(Rodrigo): Fine tune these numbers. +constexpr std::size_t COMMAND_BUFFER_POOL_SIZE = 0x1000; +constexpr std::size_t FENCES_GROW_STEP = 0x40; + +class CommandBufferPool final : public VKFencedPool { +public: + CommandBufferPool(const VKDevice& device) + : VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {} + + void Allocate(std::size_t begin, std::size_t end) { + const auto dev = device.GetLogical(); + const auto& dld = device.GetDispatchLoader(); + const u32 graphics_family = device.GetGraphicsFamily(); + + auto pool = std::make_unique<Pool>(); + + // Command buffers are going to be commited, recorded, executed every single usage cycle. + // They are also going to be reseted when commited. + const auto pool_flags = vk::CommandPoolCreateFlagBits::eTransient | + vk::CommandPoolCreateFlagBits::eResetCommandBuffer; + const vk::CommandPoolCreateInfo cmdbuf_pool_ci(pool_flags, graphics_family); + pool->handle = dev.createCommandPoolUnique(cmdbuf_pool_ci, nullptr, dld); + + const vk::CommandBufferAllocateInfo cmdbuf_ai(*pool->handle, + vk::CommandBufferLevel::ePrimary, + static_cast<u32>(COMMAND_BUFFER_POOL_SIZE)); + pool->cmdbufs = + dev.allocateCommandBuffersUnique<std::allocator<UniqueCommandBuffer>>(cmdbuf_ai, dld); + + pools.push_back(std::move(pool)); + } + + vk::CommandBuffer Commit(VKFence& fence) { + const std::size_t index = CommitResource(fence); + const auto pool_index = index / COMMAND_BUFFER_POOL_SIZE; + const auto sub_index = index % COMMAND_BUFFER_POOL_SIZE; + return *pools[pool_index]->cmdbufs[sub_index]; + } + +private: + struct Pool { + UniqueCommandPool handle; + std::vector<UniqueCommandBuffer> cmdbufs; + }; + + const VKDevice& device; + + std::vector<std::unique_ptr<Pool>> pools; +}; + +VKResource::VKResource() = default; + +VKResource::~VKResource() = default; + +VKFence::VKFence(const VKDevice& device, UniqueFence handle) + : device{device}, handle{std::move(handle)} {} + +VKFence::~VKFence() = default; + +void VKFence::Wait() { + const auto dev = device.GetLogical(); + const auto& dld = device.GetDispatchLoader(); + dev.waitForFences({*handle}, true, std::numeric_limits<u64>::max(), dld); +} + +void VKFence::Release() { + is_owned = false; +} + +void VKFence::Commit() { + is_owned = true; + is_used = true; +} + +bool VKFence::Tick(bool gpu_wait, bool owner_wait) { + if (!is_used) { + // If a fence is not used it's always free. + return true; + } + if (is_owned && !owner_wait) { + // The fence is still being owned (Release has not been called) and ownership wait has + // not been asked. + return false; + } + + const auto dev = device.GetLogical(); + const auto& dld = device.GetDispatchLoader(); + if (gpu_wait) { + // Wait for the fence if it has been requested. + dev.waitForFences({*handle}, true, std::numeric_limits<u64>::max(), dld); + } else { + if (dev.getFenceStatus(*handle, dld) != vk::Result::eSuccess) { + // Vulkan fence is not ready, not much it can do here + return false; + } + } + + // Broadcast resources their free state. + for (auto* resource : protected_resources) { + resource->OnFenceRemoval(this); + } + protected_resources.clear(); + + // Prepare fence for reusage. + dev.resetFences({*handle}, dld); + is_used = false; + return true; +} + +void VKFence::Protect(VKResource* resource) { + protected_resources.push_back(resource); +} + +void VKFence::Unprotect(const VKResource* resource) { + const auto it = std::find(protected_resources.begin(), protected_resources.end(), resource); + if (it != protected_resources.end()) { + protected_resources.erase(it); + } +} + +VKFenceWatch::VKFenceWatch() = default; + +VKFenceWatch::~VKFenceWatch() { + if (fence) { + fence->Unprotect(this); + } +} + +void VKFenceWatch::Wait() { + if (!fence) { + return; + } + fence->Wait(); + fence->Unprotect(this); + fence = nullptr; +} + +void VKFenceWatch::Watch(VKFence& new_fence) { + Wait(); + fence = &new_fence; + fence->Protect(this); +} + +bool VKFenceWatch::TryWatch(VKFence& new_fence) { + if (fence) { + return false; + } + fence = &new_fence; + fence->Protect(this); + return true; +} + +void VKFenceWatch::OnFenceRemoval(VKFence* signaling_fence) { + ASSERT_MSG(signaling_fence == fence, "Removing the wrong fence"); + fence = nullptr; +} + +VKFencedPool::VKFencedPool(std::size_t grow_step) : grow_step{grow_step} {} + +VKFencedPool::~VKFencedPool() = default; + +std::size_t VKFencedPool::CommitResource(VKFence& fence) { + const auto Search = [&](std::size_t begin, std::size_t end) -> std::optional<std::size_t> { + for (std::size_t iterator = begin; iterator < end; ++iterator) { + if (watches[iterator]->TryWatch(fence)) { + // The resource is now being watched, a free resource was successfully found. + return iterator; + } + } + return {}; + }; + // Try to find a free resource from the hinted position to the end. + auto found = Search(free_iterator, watches.size()); + if (!found) { + // Search from beginning to the hinted position. + found = Search(0, free_iterator); + if (!found) { + // Both searches failed, the pool is full; handle it. + const std::size_t free_resource = ManageOverflow(); + + // Watch will wait for the resource to be free. + watches[free_resource]->Watch(fence); + found = free_resource; + } + } + // Free iterator is hinted to the resource after the one that's been commited. + free_iterator = (*found + 1) % watches.size(); + return *found; +} + +std::size_t VKFencedPool::ManageOverflow() { + const std::size_t old_capacity = watches.size(); + Grow(); + + // The last entry is guaranted to be free, since it's the first element of the freshly + // allocated resources. + return old_capacity; +} + +void VKFencedPool::Grow() { + const std::size_t old_capacity = watches.size(); + watches.resize(old_capacity + grow_step); + std::generate(watches.begin() + old_capacity, watches.end(), + []() { return std::make_unique<VKFenceWatch>(); }); + Allocate(old_capacity, old_capacity + grow_step); +} + +VKResourceManager::VKResourceManager(const VKDevice& device) : device{device} { + GrowFences(FENCES_GROW_STEP); + command_buffer_pool = std::make_unique<CommandBufferPool>(device); +} + +VKResourceManager::~VKResourceManager() = default; + +VKFence& VKResourceManager::CommitFence() { + const auto StepFences = [&](bool gpu_wait, bool owner_wait) -> VKFence* { + const auto Tick = [=](auto& fence) { return fence->Tick(gpu_wait, owner_wait); }; + const auto hinted = fences.begin() + fences_iterator; + + auto it = std::find_if(hinted, fences.end(), Tick); + if (it == fences.end()) { + it = std::find_if(fences.begin(), hinted, Tick); + if (it == hinted) { + return nullptr; + } + } + fences_iterator = std::distance(fences.begin(), it) + 1; + if (fences_iterator >= fences.size()) + fences_iterator = 0; + + auto& fence = *it; + fence->Commit(); + return fence.get(); + }; + + VKFence* found_fence = StepFences(false, false); + if (!found_fence) { + // Try again, this time waiting. + found_fence = StepFences(true, false); + + if (!found_fence) { + // Allocate new fences and try again. + LOG_INFO(Render_Vulkan, "Allocating new fences {} -> {}", fences.size(), + fences.size() + FENCES_GROW_STEP); + + GrowFences(FENCES_GROW_STEP); + found_fence = StepFences(true, false); + ASSERT(found_fence != nullptr); + } + } + return *found_fence; +} + +vk::CommandBuffer VKResourceManager::CommitCommandBuffer(VKFence& fence) { + return command_buffer_pool->Commit(fence); +} + +void VKResourceManager::GrowFences(std::size_t new_fences_count) { + const auto dev = device.GetLogical(); + const auto& dld = device.GetDispatchLoader(); + const vk::FenceCreateInfo fence_ci; + + const std::size_t previous_size = fences.size(); + fences.resize(previous_size + new_fences_count); + + std::generate(fences.begin() + previous_size, fences.end(), [&]() { + return std::make_unique<VKFence>(device, dev.createFenceUnique(fence_ci, nullptr, dld)); + }); +} + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.h b/src/video_core/renderer_vulkan/vk_resource_manager.h new file mode 100644 index 000000000..5018dfa44 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_resource_manager.h @@ -0,0 +1,180 @@ +// Copyright 2018 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <cstddef> +#include <memory> +#include <vector> +#include "video_core/renderer_vulkan/declarations.h" + +namespace Vulkan { + +class VKDevice; +class VKFence; +class VKResourceManager; + +class CommandBufferPool; + +/// Interface for a Vulkan resource +class VKResource { +public: + explicit VKResource(); + virtual ~VKResource(); + + /** + * Signals the object that an owning fence has been signaled. + * @param signaling_fence Fence that signals its usage end. + */ + virtual void OnFenceRemoval(VKFence* signaling_fence) = 0; +}; + +/** + * Fences take ownership of objects, protecting them from GPU-side or driver-side concurrent access. + * They must be commited from the resource manager. Their usage flow is: commit the fence from the + * resource manager, protect resources with it and use them, send the fence to an execution queue + * and Wait for it if needed and then call Release. Used resources will automatically be signaled + * when they are free to be reused. + * @brief Protects resources for concurrent usage and signals its release. + */ +class VKFence { + friend class VKResourceManager; + +public: + explicit VKFence(const VKDevice& device, UniqueFence handle); + ~VKFence(); + + /** + * Waits for the fence to be signaled. + * @warning You must have ownership of the fence and it has to be previously sent to a queue to + * call this function. + */ + void Wait(); + + /** + * Releases ownership of the fence. Pass after it has been sent to an execution queue. + * Unmanaged usage of the fence after the call will result in undefined behavior because it may + * be being used for something else. + */ + void Release(); + + /// Protects a resource with this fence. + void Protect(VKResource* resource); + + /// Removes protection for a resource. + void Unprotect(const VKResource* resource); + + /// Retreives the fence. + operator vk::Fence() const { + return *handle; + } + +private: + /// Take ownership of the fence. + void Commit(); + + /** + * Updates the fence status. + * @warning Waiting for the owner might soft lock the execution. + * @param gpu_wait Wait for the fence to be signaled by the driver. + * @param owner_wait Wait for the owner to signal its freedom. + * @returns True if the fence is free. Waiting for gpu and owner will always return true. + */ + bool Tick(bool gpu_wait, bool owner_wait); + + const VKDevice& device; ///< Device handler + UniqueFence handle; ///< Vulkan fence + std::vector<VKResource*> protected_resources; ///< List of resources protected by this fence + bool is_owned = false; ///< The fence has been commited but not released yet. + bool is_used = false; ///< The fence has been commited but it has not been checked to be free. +}; + +/** + * A fence watch is used to keep track of the usage of a fence and protect a resource or set of + * resources without having to inherit VKResource from their handlers. + */ +class VKFenceWatch final : public VKResource { +public: + explicit VKFenceWatch(); + ~VKFenceWatch(); + + /// Waits for the fence to be released. + void Wait(); + + /** + * Waits for a previous fence and watches a new one. + * @param new_fence New fence to wait to. + */ + void Watch(VKFence& new_fence); + + /** + * Checks if it's currently being watched and starts watching it if it's available. + * @returns True if a watch has started, false if it's being watched. + */ + bool TryWatch(VKFence& new_fence); + + void OnFenceRemoval(VKFence* signaling_fence) override; + +private: + VKFence* fence{}; ///< Fence watching this resource. nullptr when the watch is free. +}; + +/** + * Handles a pool of resources protected by fences. Manages resource overflow allocating more + * resources. + */ +class VKFencedPool { +public: + explicit VKFencedPool(std::size_t grow_step); + virtual ~VKFencedPool(); + +protected: + /** + * Commits a free resource and protects it with a fence. It may allocate new resources. + * @param fence Fence that protects the commited resource. + * @returns Index of the resource commited. + */ + std::size_t CommitResource(VKFence& fence); + + /// Called when a chunk of resources have to be allocated. + virtual void Allocate(std::size_t begin, std::size_t end) = 0; + +private: + /// Manages pool overflow allocating new resources. + std::size_t ManageOverflow(); + + /// Allocates a new page of resources. + void Grow(); + + std::size_t grow_step = 0; ///< Number of new resources created after an overflow + std::size_t free_iterator = 0; ///< Hint to where the next free resources is likely to be found + std::vector<std::unique_ptr<VKFenceWatch>> watches; ///< Set of watched resources +}; + +/** + * The resource manager handles all resources that can be protected with a fence avoiding + * driver-side or GPU-side concurrent usage. Usage is documented in VKFence. + */ +class VKResourceManager final { +public: + explicit VKResourceManager(const VKDevice& device); + ~VKResourceManager(); + + /// Commits a fence. It has to be sent to a queue and released. + VKFence& CommitFence(); + + /// Commits an unused command buffer and protects it with a fence. + vk::CommandBuffer CommitCommandBuffer(VKFence& fence); + +private: + /// Allocates new fences. + void GrowFences(std::size_t new_fences_count); + + const VKDevice& device; ///< Device handler. + std::size_t fences_iterator = 0; ///< Index where a free fence is likely to be found. + std::vector<std::unique_ptr<VKFence>> fences; ///< Pool of fences. + std::unique_ptr<CommandBufferPool> command_buffer_pool; ///< Pool of command buffers. +}; + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp new file mode 100644 index 000000000..f1fea1871 --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp @@ -0,0 +1,60 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/assert.h" +#include "common/logging/log.h" +#include "video_core/renderer_vulkan/declarations.h" +#include "video_core/renderer_vulkan/vk_device.h" +#include "video_core/renderer_vulkan/vk_resource_manager.h" +#include "video_core/renderer_vulkan/vk_scheduler.h" + +namespace Vulkan { + +VKScheduler::VKScheduler(const VKDevice& device, VKResourceManager& resource_manager) + : device{device}, resource_manager{resource_manager} { + next_fence = &resource_manager.CommitFence(); + AllocateNewContext(); +} + +VKScheduler::~VKScheduler() = default; + +VKExecutionContext VKScheduler::GetExecutionContext() const { + return VKExecutionContext(current_fence, current_cmdbuf); +} + +VKExecutionContext VKScheduler::Flush(vk::Semaphore semaphore) { + SubmitExecution(semaphore); + current_fence->Release(); + AllocateNewContext(); + return GetExecutionContext(); +} + +VKExecutionContext VKScheduler::Finish(vk::Semaphore semaphore) { + SubmitExecution(semaphore); + current_fence->Wait(); + current_fence->Release(); + AllocateNewContext(); + return GetExecutionContext(); +} + +void VKScheduler::SubmitExecution(vk::Semaphore semaphore) { + const auto& dld = device.GetDispatchLoader(); + current_cmdbuf.end(dld); + + const auto queue = device.GetGraphicsQueue(); + const vk::SubmitInfo submit_info(0, nullptr, nullptr, 1, ¤t_cmdbuf, semaphore ? 1u : 0u, + &semaphore); + queue.submit({submit_info}, *current_fence, dld); +} + +void VKScheduler::AllocateNewContext() { + current_fence = next_fence; + current_cmdbuf = resource_manager.CommitCommandBuffer(*current_fence); + next_fence = &resource_manager.CommitFence(); + + const auto& dld = device.GetDispatchLoader(); + current_cmdbuf.begin({vk::CommandBufferUsageFlagBits::eOneTimeSubmit}, dld); +} + +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h new file mode 100644 index 000000000..cfaf5376f --- /dev/null +++ b/src/video_core/renderer_vulkan/vk_scheduler.h @@ -0,0 +1,69 @@ +// Copyright 2019 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_types.h" +#include "video_core/renderer_vulkan/declarations.h" + +namespace Vulkan { + +class VKDevice; +class VKExecutionContext; +class VKFence; +class VKResourceManager; + +/// The scheduler abstracts command buffer and fence management with an interface that's able to do +/// OpenGL-like operations on Vulkan command buffers. +class VKScheduler { +public: + explicit VKScheduler(const VKDevice& device, VKResourceManager& resource_manager); + ~VKScheduler(); + + /// Gets the current execution context. + [[nodiscard]] VKExecutionContext GetExecutionContext() const; + + /// Sends the current execution context to the GPU. It invalidates the current execution context + /// and returns a new one. + VKExecutionContext Flush(vk::Semaphore semaphore = nullptr); + + /// Sends the current execution context to the GPU and waits for it to complete. It invalidates + /// the current execution context and returns a new one. + VKExecutionContext Finish(vk::Semaphore semaphore = nullptr); + +private: + void SubmitExecution(vk::Semaphore semaphore); + + void AllocateNewContext(); + + const VKDevice& device; + VKResourceManager& resource_manager; + vk::CommandBuffer current_cmdbuf; + VKFence* current_fence = nullptr; + VKFence* next_fence = nullptr; +}; + +class VKExecutionContext { + friend class VKScheduler; + +public: + VKExecutionContext() = default; + + VKFence& GetFence() const { + return *fence; + } + + vk::CommandBuffer GetCommandBuffer() const { + return cmdbuf; + } + +private: + explicit VKExecutionContext(VKFence* fence, vk::CommandBuffer cmdbuf) + : fence{fence}, cmdbuf{cmdbuf} {} + + VKFence* fence{}; + vk::CommandBuffer cmdbuf; +}; + +} // namespace Vulkan diff --git a/src/video_core/shader/decode/memory.cpp b/src/video_core/shader/decode/memory.cpp index 9a1d1de94..38f01ca50 100644 --- a/src/video_core/shader/decode/memory.cpp +++ b/src/video_core/shader/decode/memory.cpp @@ -429,7 +429,7 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { UNIMPLEMENTED_IF_MSG(instr.tlds.UsesMiscMode(TextureMiscMode::MZ), "MZ is not implemented"); if (instr.tlds.UsesMiscMode(TextureMiscMode::NODEP)) { - LOG_WARNING(HW_GPU, "TMML.NODEP implementation is incomplete"); + LOG_WARNING(HW_GPU, "TLDS.NODEP implementation is incomplete"); } WriteTexsInstructionFloat(bb, instr, GetTldsCode(instr, texture_type, is_array)); |