diff options
Diffstat (limited to 'src/video_core')
71 files changed, 4149 insertions, 562 deletions
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index 9b13ccbab..cf9266d54 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -95,6 +95,12 @@ add_library(video_core STATIC memory_manager.h precompiled_headers.h pte_kind.h + query_cache/bank_base.h + query_cache/query_base.h + query_cache/query_cache_base.h + query_cache/query_cache.h + query_cache/query_stream.h + query_cache/types.h query_cache.h rasterizer_accelerated.cpp rasterizer_accelerated.h diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 8be7bd594..9e90c587c 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -272,13 +272,19 @@ std::pair<typename P::Buffer*, u32> BufferCache<P>::ObtainBuffer(GPUVAddr gpu_ad if (!cpu_addr) { return {&slot_buffers[NULL_BUFFER_ID], 0}; } - const BufferId buffer_id = FindBuffer(*cpu_addr, size); + return ObtainCPUBuffer(*cpu_addr, size, sync_info, post_op); +} + +template <class P> +std::pair<typename P::Buffer*, u32> BufferCache<P>::ObtainCPUBuffer( + VAddr cpu_addr, u32 size, ObtainBufferSynchronize sync_info, ObtainBufferOperation post_op) { + const BufferId buffer_id = FindBuffer(cpu_addr, size); Buffer& buffer = slot_buffers[buffer_id]; // synchronize op switch (sync_info) { case ObtainBufferSynchronize::FullSynchronize: - SynchronizeBuffer(buffer, *cpu_addr, size); + SynchronizeBuffer(buffer, cpu_addr, size); break; default: break; @@ -286,11 +292,11 @@ std::pair<typename P::Buffer*, u32> BufferCache<P>::ObtainBuffer(GPUVAddr gpu_ad switch (post_op) { case ObtainBufferOperation::MarkAsWritten: - MarkWrittenBuffer(buffer_id, *cpu_addr, size); + MarkWrittenBuffer(buffer_id, cpu_addr, size); break; case ObtainBufferOperation::DiscardWrite: { - VAddr cpu_addr_start = Common::AlignDown(*cpu_addr, 64); - VAddr cpu_addr_end = Common::AlignUp(*cpu_addr + size, 64); + VAddr cpu_addr_start = Common::AlignDown(cpu_addr, 64); + VAddr cpu_addr_end = Common::AlignUp(cpu_addr + size, 64); IntervalType interval{cpu_addr_start, cpu_addr_end}; ClearDownload(interval); common_ranges.subtract(interval); @@ -300,7 +306,7 @@ std::pair<typename P::Buffer*, u32> BufferCache<P>::ObtainBuffer(GPUVAddr gpu_ad break; } - return {&buffer, buffer.Offset(*cpu_addr)}; + return {&buffer, buffer.Offset(cpu_addr)}; } template <class P> diff --git a/src/video_core/buffer_cache/buffer_cache_base.h b/src/video_core/buffer_cache/buffer_cache_base.h index 0b7135d49..c4f6e8d12 100644 --- a/src/video_core/buffer_cache/buffer_cache_base.h +++ b/src/video_core/buffer_cache/buffer_cache_base.h @@ -295,6 +295,10 @@ public: [[nodiscard]] std::pair<Buffer*, u32> ObtainBuffer(GPUVAddr gpu_addr, u32 size, ObtainBufferSynchronize sync_info, ObtainBufferOperation post_op); + + [[nodiscard]] std::pair<Buffer*, u32> ObtainCPUBuffer(VAddr gpu_addr, u32 size, + ObtainBufferSynchronize sync_info, + ObtainBufferOperation post_op); void FlushCachedWrites(); /// Return true when there are uncommitted buffers to be downloaded @@ -335,6 +339,14 @@ public: [[nodiscard]] std::pair<Buffer*, u32> GetDrawIndirectBuffer(); + template <typename Func> + void BufferOperations(Func&& func) { + do { + channel_state->has_deleted_buffers = false; + func(); + } while (channel_state->has_deleted_buffers); + } + std::recursive_mutex mutex; Runtime& runtime; diff --git a/src/video_core/control/channel_state_cache.h b/src/video_core/control/channel_state_cache.h index 46bc9e322..5574e1fba 100644 --- a/src/video_core/control/channel_state_cache.h +++ b/src/video_core/control/channel_state_cache.h @@ -51,7 +51,7 @@ public: virtual void CreateChannel(Tegra::Control::ChannelState& channel); /// Bind a channel for execution. - void BindToChannel(s32 id); + virtual void BindToChannel(s32 id); /// Erase channel's state. void EraseChannel(s32 id); diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h index c9fab2d90..e46a8fa5c 100644 --- a/src/video_core/dma_pusher.h +++ b/src/video_core/dma_pusher.h @@ -161,7 +161,7 @@ private: u32 method_count; ///< Current method count u32 length_pending; ///< Large NI command length pending GPUVAddr dma_get; ///< Currently read segment - u64 dma_word_offset; ///< Current word ofset from address + u64 dma_word_offset; ///< Current word offset from address bool non_incrementing; ///< Current command's NI flag bool is_last_call; }; diff --git a/src/video_core/engines/draw_manager.h b/src/video_core/engines/draw_manager.h index 7c22c49f1..18d959143 100644 --- a/src/video_core/engines/draw_manager.h +++ b/src/video_core/engines/draw_manager.h @@ -46,6 +46,7 @@ public: }; struct IndirectParams { + bool is_byte_count; bool is_indexed; bool include_count; GPUVAddr count_start_address; diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index 06e349e43..32d767d85 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp @@ -20,8 +20,6 @@ namespace Tegra::Engines { -using VideoCore::QueryType; - /// First register id that is actually a Macro call. constexpr u32 MacroRegistersStart = 0xE00; @@ -500,27 +498,21 @@ void Maxwell3D::StampQueryResult(u64 payload, bool long_query) { } void Maxwell3D::ProcessQueryGet() { + VideoCommon::QueryPropertiesFlags flags{}; + if (regs.report_semaphore.query.short_query == 0) { + flags |= VideoCommon::QueryPropertiesFlags::HasTimeout; + } + const GPUVAddr sequence_address{regs.report_semaphore.Address()}; + const VideoCommon::QueryType query_type = + static_cast<VideoCommon::QueryType>(regs.report_semaphore.query.report.Value()); + const u32 payload = regs.report_semaphore.payload; + const u32 subreport = regs.report_semaphore.query.sub_report; switch (regs.report_semaphore.query.operation) { case Regs::ReportSemaphore::Operation::Release: if (regs.report_semaphore.query.short_query != 0) { - const GPUVAddr sequence_address{regs.report_semaphore.Address()}; - const u32 payload = regs.report_semaphore.payload; - std::function<void()> operation([this, sequence_address, payload] { - memory_manager.Write<u32>(sequence_address, payload); - }); - rasterizer->SignalFence(std::move(operation)); - } else { - struct LongQueryResult { - u64_le value; - u64_le timestamp; - }; - const GPUVAddr sequence_address{regs.report_semaphore.Address()}; - const u32 payload = regs.report_semaphore.payload; - [this, sequence_address, payload] { - memory_manager.Write<u64>(sequence_address + sizeof(u64), system.GPU().GetTicks()); - memory_manager.Write<u64>(sequence_address, payload); - }(); + flags |= VideoCommon::QueryPropertiesFlags::IsAFence; } + rasterizer->Query(sequence_address, query_type, flags, payload, subreport); break; case Regs::ReportSemaphore::Operation::Acquire: // TODO(Blinkhawk): Under this operation, the GPU waits for the CPU to write a value that @@ -528,11 +520,7 @@ void Maxwell3D::ProcessQueryGet() { UNIMPLEMENTED_MSG("Unimplemented query operation ACQUIRE"); break; case Regs::ReportSemaphore::Operation::ReportOnly: - if (const std::optional<u64> result = GetQueryResult()) { - // If the query returns an empty optional it means it's cached and deferred. - // In this case we have a non-empty result, so we stamp it immediately. - StampQueryResult(*result, regs.report_semaphore.query.short_query == 0); - } + rasterizer->Query(sequence_address, query_type, flags, payload, subreport); break; case Regs::ReportSemaphore::Operation::Trap: UNIMPLEMENTED_MSG("Unimplemented query operation TRAP"); @@ -544,6 +532,10 @@ void Maxwell3D::ProcessQueryGet() { } void Maxwell3D::ProcessQueryCondition() { + if (rasterizer->AccelerateConditionalRendering()) { + execute_on = true; + return; + } const GPUVAddr condition_address{regs.render_enable.Address()}; switch (regs.render_enable_override) { case Regs::RenderEnable::Override::AlwaysRender: @@ -553,10 +545,6 @@ void Maxwell3D::ProcessQueryCondition() { execute_on = false; break; case Regs::RenderEnable::Override::UseRenderEnable: { - if (rasterizer->AccelerateConditionalRendering()) { - execute_on = true; - return; - } switch (regs.render_enable.mode) { case Regs::RenderEnable::Mode::True: { execute_on = true; @@ -598,15 +586,9 @@ void Maxwell3D::ProcessQueryCondition() { } void Maxwell3D::ProcessCounterReset() { -#if ANDROID - if (!Settings::IsGPULevelHigh()) { - // This is problematic on Android, disable on GPU Normal. - return; - } -#endif switch (regs.clear_report_value) { case Regs::ClearReport::ZPassPixelCount: - rasterizer->ResetCounter(QueryType::SamplesPassed); + rasterizer->ResetCounter(VideoCommon::QueryType::ZPassPixelCount64); break; default: LOG_DEBUG(Render_OpenGL, "Unimplemented counter reset={}", regs.clear_report_value); @@ -620,28 +602,6 @@ void Maxwell3D::ProcessSyncPoint() { rasterizer->SignalSyncPoint(sync_point); } -std::optional<u64> Maxwell3D::GetQueryResult() { - switch (regs.report_semaphore.query.report) { - case Regs::ReportSemaphore::Report::Payload: - return regs.report_semaphore.payload; - case Regs::ReportSemaphore::Report::ZPassPixelCount64: -#if ANDROID - if (!Settings::IsGPULevelHigh()) { - // This is problematic on Android, disable on GPU Normal. - return 120; - } -#endif - // Deferred. - rasterizer->Query(regs.report_semaphore.Address(), QueryType::SamplesPassed, - system.GPU().GetTicks()); - return std::nullopt; - default: - LOG_DEBUG(HW_GPU, "Unimplemented query report type {}", - regs.report_semaphore.query.report.Value()); - return 1; - } -} - void Maxwell3D::ProcessCBBind(size_t stage_index) { // Bind the buffer currently in CB_ADDRESS to the specified index in the desired shader // stage. diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h index 6c19354e1..17faacc37 100644 --- a/src/video_core/engines/maxwell_3d.h +++ b/src/video_core/engines/maxwell_3d.h @@ -3182,9 +3182,6 @@ private: /// Handles writes to syncing register. void ProcessSyncPoint(); - /// Returns a query's value or an empty object if the value will be deferred through a cache. - std::optional<u64> GetQueryResult(); - void RefreshParametersImpl(); bool IsMethodExecutable(u32 method); diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp index da8eab7ee..422d4d859 100644 --- a/src/video_core/engines/maxwell_dma.cpp +++ b/src/video_core/engines/maxwell_dma.cpp @@ -109,10 +109,11 @@ void MaxwellDMA::Launch() { const bool is_const_a_dst = regs.remap_const.dst_x == RemapConst::Swizzle::CONST_A; if (regs.launch_dma.remap_enable != 0 && is_const_a_dst) { ASSERT(regs.remap_const.component_size_minus_one == 3); - accelerate.BufferClear(regs.offset_out, regs.line_length_in, regs.remap_consta_value); + accelerate.BufferClear(regs.offset_out, regs.line_length_in, + regs.remap_const.remap_consta_value); read_buffer.resize_destructive(regs.line_length_in * sizeof(u32)); std::span<u32> span(reinterpret_cast<u32*>(read_buffer.data()), regs.line_length_in); - std::ranges::fill(span, regs.remap_consta_value); + std::ranges::fill(span, regs.remap_const.remap_consta_value); memory_manager.WriteBlockUnsafe(regs.offset_out, reinterpret_cast<u8*>(read_buffer.data()), regs.line_length_in * sizeof(u32)); @@ -361,21 +362,17 @@ void MaxwellDMA::ReleaseSemaphore() { const auto type = regs.launch_dma.semaphore_type; const GPUVAddr address = regs.semaphore.address; const u32 payload = regs.semaphore.payload; + VideoCommon::QueryPropertiesFlags flags{VideoCommon::QueryPropertiesFlags::IsAFence}; switch (type) { case LaunchDMA::SemaphoreType::NONE: break; case LaunchDMA::SemaphoreType::RELEASE_ONE_WORD_SEMAPHORE: { - std::function<void()> operation( - [this, address, payload] { memory_manager.Write<u32>(address, payload); }); - rasterizer->SignalFence(std::move(operation)); + rasterizer->Query(address, VideoCommon::QueryType::Payload, flags, payload, 0); break; } case LaunchDMA::SemaphoreType::RELEASE_FOUR_WORD_SEMAPHORE: { - std::function<void()> operation([this, address, payload] { - memory_manager.Write<u64>(address + sizeof(u64), system.GPU().GetTicks()); - memory_manager.Write<u64>(address, payload); - }); - rasterizer->SignalFence(std::move(operation)); + rasterizer->Query(address, VideoCommon::QueryType::Payload, + flags | VideoCommon::QueryPropertiesFlags::HasTimeout, payload, 0); break; } default: diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h index 69e26cb32..1a43e24b6 100644 --- a/src/video_core/engines/maxwell_dma.h +++ b/src/video_core/engines/maxwell_dma.h @@ -214,14 +214,15 @@ public: NO_WRITE = 6, }; - PackedGPUVAddr address; + u32 remap_consta_value; + u32 remap_constb_value; union { + BitField<0, 12, u32> dst_components_raw; BitField<0, 3, Swizzle> dst_x; BitField<4, 3, Swizzle> dst_y; BitField<8, 3, Swizzle> dst_z; BitField<12, 3, Swizzle> dst_w; - BitField<0, 12, u32> dst_components_raw; BitField<16, 2, u32> component_size_minus_one; BitField<20, 2, u32> num_src_components_minus_one; BitField<24, 2, u32> num_dst_components_minus_one; @@ -274,55 +275,57 @@ private: struct Regs { union { struct { - u32 reserved[0x40]; + INSERT_PADDING_BYTES_NOINIT(0x100); u32 nop; - u32 reserved01[0xf]; + INSERT_PADDING_BYTES_NOINIT(0x3C); u32 pm_trigger; - u32 reserved02[0x3f]; + INSERT_PADDING_BYTES_NOINIT(0xFC); Semaphore semaphore; - u32 reserved03[0x2]; + INSERT_PADDING_BYTES_NOINIT(0x8); RenderEnable render_enable; PhysMode src_phys_mode; PhysMode dst_phys_mode; - u32 reserved04[0x26]; + INSERT_PADDING_BYTES_NOINIT(0x98); LaunchDMA launch_dma; - u32 reserved05[0x3f]; + INSERT_PADDING_BYTES_NOINIT(0xFC); PackedGPUVAddr offset_in; PackedGPUVAddr offset_out; s32 pitch_in; s32 pitch_out; u32 line_length_in; u32 line_count; - u32 reserved06[0xb6]; - u32 remap_consta_value; - u32 remap_constb_value; + INSERT_PADDING_BYTES_NOINIT(0x2E0); RemapConst remap_const; DMA::Parameters dst_params; - u32 reserved07[0x1]; + INSERT_PADDING_BYTES_NOINIT(0x4); DMA::Parameters src_params; - u32 reserved08[0x275]; + INSERT_PADDING_BYTES_NOINIT(0x9D4); u32 pm_trigger_end; - u32 reserved09[0x3ba]; + INSERT_PADDING_BYTES_NOINIT(0xEE8); }; std::array<u32, NUM_REGS> reg_array; }; } regs{}; + static_assert(sizeof(Regs) == NUM_REGS * 4); #define ASSERT_REG_POSITION(field_name, position) \ - static_assert(offsetof(MaxwellDMA::Regs, field_name) == position * 4, \ + static_assert(offsetof(MaxwellDMA::Regs, field_name) == position, \ "Field " #field_name " has invalid position") - ASSERT_REG_POSITION(launch_dma, 0xC0); - ASSERT_REG_POSITION(offset_in, 0x100); - ASSERT_REG_POSITION(offset_out, 0x102); - ASSERT_REG_POSITION(pitch_in, 0x104); - ASSERT_REG_POSITION(pitch_out, 0x105); - ASSERT_REG_POSITION(line_length_in, 0x106); - ASSERT_REG_POSITION(line_count, 0x107); - ASSERT_REG_POSITION(remap_const, 0x1C0); - ASSERT_REG_POSITION(dst_params, 0x1C3); - ASSERT_REG_POSITION(src_params, 0x1CA); - + ASSERT_REG_POSITION(semaphore, 0x240); + ASSERT_REG_POSITION(render_enable, 0x254); + ASSERT_REG_POSITION(src_phys_mode, 0x260); + ASSERT_REG_POSITION(launch_dma, 0x300); + ASSERT_REG_POSITION(offset_in, 0x400); + ASSERT_REG_POSITION(offset_out, 0x408); + ASSERT_REG_POSITION(pitch_in, 0x410); + ASSERT_REG_POSITION(pitch_out, 0x414); + ASSERT_REG_POSITION(line_length_in, 0x418); + ASSERT_REG_POSITION(line_count, 0x41C); + ASSERT_REG_POSITION(remap_const, 0x700); + ASSERT_REG_POSITION(dst_params, 0x70C); + ASSERT_REG_POSITION(src_params, 0x728); + ASSERT_REG_POSITION(pm_trigger_end, 0x1114); #undef ASSERT_REG_POSITION }; diff --git a/src/video_core/engines/puller.cpp b/src/video_core/engines/puller.cpp index 6de2543b7..8dd34c04a 100644 --- a/src/video_core/engines/puller.cpp +++ b/src/video_core/engines/puller.cpp @@ -82,10 +82,8 @@ void Puller::ProcessSemaphoreTriggerMethod() { if (op == GpuSemaphoreOperation::WriteLong) { const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()}; const u32 payload = regs.semaphore_sequence; - [this, sequence_address, payload] { - memory_manager.Write<u64>(sequence_address + sizeof(u64), gpu.GetTicks()); - memory_manager.Write<u64>(sequence_address, payload); - }(); + rasterizer->Query(sequence_address, VideoCommon::QueryType::Payload, + VideoCommon::QueryPropertiesFlags::HasTimeout, payload, 0); } else { do { const u32 word{memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress())}; @@ -120,10 +118,8 @@ void Puller::ProcessSemaphoreTriggerMethod() { void Puller::ProcessSemaphoreRelease() { const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()}; const u32 payload = regs.semaphore_release; - std::function<void()> operation([this, sequence_address, payload] { - memory_manager.Write<u32>(sequence_address, payload); - }); - rasterizer->SignalFence(std::move(operation)); + rasterizer->Query(sequence_address, VideoCommon::QueryType::Payload, + VideoCommon::QueryPropertiesFlags::IsAFence, payload, 0); } void Puller::ProcessSemaphoreAcquire() { @@ -132,7 +128,6 @@ void Puller::ProcessSemaphoreAcquire() { while (word != value) { regs.acquire_active = true; regs.acquire_value = value; - std::this_thread::sleep_for(std::chrono::milliseconds(1)); rasterizer->ReleaseFences(); word = memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress()); // TODO(kemathe73) figure out how to do the acquire_timeout diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h index ab20ff30f..805a89900 100644 --- a/src/video_core/fence_manager.h +++ b/src/video_core/fence_manager.h @@ -55,6 +55,9 @@ public: // Unlike other fences, this one doesn't void SignalOrdering() { + if constexpr (!can_async_check) { + TryReleasePendingFences<false>(); + } std::scoped_lock lock{buffer_cache.mutex}; buffer_cache.AccumulateFlushes(); } @@ -104,9 +107,25 @@ public: SignalFence(std::move(func)); } - void WaitPendingFences() { + void WaitPendingFences([[maybe_unused]] bool force) { if constexpr (!can_async_check) { TryReleasePendingFences<true>(); + } else { + if (!force) { + return; + } + std::mutex wait_mutex; + std::condition_variable wait_cv; + std::atomic<bool> wait_finished{}; + std::function<void()> func([&] { + std::scoped_lock lk(wait_mutex); + wait_finished.store(true, std::memory_order_relaxed); + wait_cv.notify_all(); + }); + SignalFence(std::move(func)); + std::unique_lock lk(wait_mutex); + wait_cv.wait( + lk, [&wait_finished] { return wait_finished.load(std::memory_order_relaxed); }); } } diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index c192e33b2..11549d448 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -102,7 +102,8 @@ struct GPU::Impl { /// Signal the ending of command list. void OnCommandListEnd() { - rasterizer->ReleaseFences(); + rasterizer->ReleaseFences(false); + Settings::UpdateGPUAccuracy(); } /// Request a host GPU memory flush from the CPU. @@ -220,6 +221,7 @@ struct GPU::Impl { /// This can be used to launch any necessary threads and register any necessary /// core timing events. void Start() { + Settings::UpdateGPUAccuracy(); gpu_thread.StartThread(*renderer, renderer->Context(), *scheduler); } diff --git a/src/video_core/host_shaders/CMakeLists.txt b/src/video_core/host_shaders/CMakeLists.txt index c4d459077..8bb429578 100644 --- a/src/video_core/host_shaders/CMakeLists.txt +++ b/src/video_core/host_shaders/CMakeLists.txt @@ -19,6 +19,7 @@ set(SHADER_FILES block_linear_unswizzle_2d.comp block_linear_unswizzle_3d.comp convert_abgr8_to_d24s8.frag + convert_d32f_to_abgr8.frag convert_d24s8_to_abgr8.frag convert_depth_to_float.frag convert_float_to_depth.frag @@ -41,6 +42,9 @@ set(SHADER_FILES pitch_unswizzle.comp present_bicubic.frag present_gaussian.frag + queries_prefix_scan_sum.comp + queries_prefix_scan_sum_nosubgroups.comp + resolve_conditional_render.comp smaa_edge_detection.vert smaa_edge_detection.frag smaa_blending_weight_calculation.vert @@ -70,6 +74,7 @@ if ("${GLSLANGVALIDATOR}" STREQUAL "GLSLANGVALIDATOR-NOTFOUND") endif() set(GLSL_FLAGS "") +set(SPIR_V_VERSION "spirv1.3") set(QUIET_FLAG "--quiet") set(SHADER_INCLUDE ${CMAKE_CURRENT_BINARY_DIR}/include) @@ -123,7 +128,7 @@ foreach(FILENAME IN ITEMS ${SHADER_FILES}) OUTPUT ${SPIRV_HEADER_FILE} COMMAND - ${GLSLANGVALIDATOR} -V ${QUIET_FLAG} -I"${FIDELITYFX_INCLUDE_DIR}" ${GLSL_FLAGS} --variable-name ${SPIRV_VARIABLE_NAME} -o ${SPIRV_HEADER_FILE} ${SOURCE_FILE} + ${GLSLANGVALIDATOR} -V ${QUIET_FLAG} -I"${FIDELITYFX_INCLUDE_DIR}" ${GLSL_FLAGS} --variable-name ${SPIRV_VARIABLE_NAME} -o ${SPIRV_HEADER_FILE} ${SOURCE_FILE} --target-env ${SPIR_V_VERSION} MAIN_DEPENDENCY ${SOURCE_FILE} ) diff --git a/src/video_core/host_shaders/convert_d32f_to_abgr8.frag b/src/video_core/host_shaders/convert_d32f_to_abgr8.frag new file mode 100644 index 000000000..04cfef8b5 --- /dev/null +++ b/src/video_core/host_shaders/convert_d32f_to_abgr8.frag @@ -0,0 +1,14 @@ +// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#version 450 + +layout(binding = 0) uniform sampler2D depth_tex; + +layout(location = 0) out vec4 color; + +void main() { + ivec2 coord = ivec2(gl_FragCoord.xy); + float depth = textureLod(depth_tex, coord, 0).r; + color = vec4(depth, depth, depth, 1.0); +} diff --git a/src/video_core/host_shaders/convert_msaa_to_non_msaa.comp b/src/video_core/host_shaders/convert_msaa_to_non_msaa.comp index fc3854d18..66f2ad483 100644 --- a/src/video_core/host_shaders/convert_msaa_to_non_msaa.comp +++ b/src/video_core/host_shaders/convert_msaa_to_non_msaa.comp @@ -15,11 +15,14 @@ void main() { // TODO: Specialization constants for num_samples? const int num_samples = imageSamples(msaa_in); + const ivec3 msaa_size = imageSize(msaa_in); + const ivec3 out_size = imageSize(output_img); + const ivec3 scale = out_size / msaa_size; for (int curr_sample = 0; curr_sample < num_samples; ++curr_sample) { const vec4 pixel = imageLoad(msaa_in, coords, curr_sample); - const int single_sample_x = 2 * coords.x + (curr_sample & 1); - const int single_sample_y = 2 * coords.y + ((curr_sample / 2) & 1); + const int single_sample_x = scale.x * coords.x + (curr_sample & 1); + const int single_sample_y = scale.y * coords.y + ((curr_sample / 2) & 1); const ivec3 dest_coords = ivec3(single_sample_x, single_sample_y, coords.z); if (any(greaterThanEqual(dest_coords, imageSize(output_img)))) { diff --git a/src/video_core/host_shaders/convert_non_msaa_to_msaa.comp b/src/video_core/host_shaders/convert_non_msaa_to_msaa.comp index dedd962f1..c7ce38efa 100644 --- a/src/video_core/host_shaders/convert_non_msaa_to_msaa.comp +++ b/src/video_core/host_shaders/convert_non_msaa_to_msaa.comp @@ -15,9 +15,12 @@ void main() { // TODO: Specialization constants for num_samples? const int num_samples = imageSamples(output_msaa); + const ivec3 msaa_size = imageSize(output_msaa); + const ivec3 out_size = imageSize(img_in); + const ivec3 scale = out_size / msaa_size; for (int curr_sample = 0; curr_sample < num_samples; ++curr_sample) { - const int single_sample_x = 2 * coords.x + (curr_sample & 1); - const int single_sample_y = 2 * coords.y + ((curr_sample / 2) & 1); + const int single_sample_x = scale.x * coords.x + (curr_sample & 1); + const int single_sample_y = scale.y * coords.y + ((curr_sample / 2) & 1); const ivec3 single_coords = ivec3(single_sample_x, single_sample_y, coords.z); if (any(greaterThanEqual(single_coords, imageSize(img_in)))) { diff --git a/src/video_core/host_shaders/queries_prefix_scan_sum.comp b/src/video_core/host_shaders/queries_prefix_scan_sum.comp new file mode 100644 index 000000000..6faa8981f --- /dev/null +++ b/src/video_core/host_shaders/queries_prefix_scan_sum.comp @@ -0,0 +1,173 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#version 460 core + +#extension GL_KHR_shader_subgroup_basic : require +#extension GL_KHR_shader_subgroup_shuffle : require +#extension GL_KHR_shader_subgroup_shuffle_relative : require +#extension GL_KHR_shader_subgroup_arithmetic : require + +#ifdef VULKAN + +#define HAS_EXTENDED_TYPES 1 +#define BEGIN_PUSH_CONSTANTS layout(push_constant) uniform PushConstants { +#define END_PUSH_CONSTANTS }; +#define UNIFORM(n) +#define BINDING_INPUT_BUFFER 0 +#define BINDING_OUTPUT_IMAGE 1 + +#else // ^^^ Vulkan ^^^ // vvv OpenGL vvv + +#extension GL_NV_gpu_shader5 : enable +#ifdef GL_NV_gpu_shader5 +#define HAS_EXTENDED_TYPES 1 +#else +#define HAS_EXTENDED_TYPES 0 +#endif +#define BEGIN_PUSH_CONSTANTS +#define END_PUSH_CONSTANTS +#define UNIFORM(n) layout(location = n) uniform +#define BINDING_INPUT_BUFFER 0 +#define BINDING_OUTPUT_IMAGE 0 + +#endif + +BEGIN_PUSH_CONSTANTS +UNIFORM(0) uint min_accumulation_base; +UNIFORM(1) uint max_accumulation_base; +UNIFORM(2) uint accumulation_limit; +UNIFORM(3) uint buffer_offset; +END_PUSH_CONSTANTS + +#define LOCAL_RESULTS 8 +#define QUERIES_PER_INVOC 2048 + +layout(local_size_x = QUERIES_PER_INVOC / LOCAL_RESULTS) in; + +layout(std430, binding = 0) readonly buffer block1 { + uvec2 input_data[]; +}; + +layout(std430, binding = 1) coherent buffer block2 { + uvec2 output_data[]; +}; + +layout(std430, binding = 2) coherent buffer block3 { + uvec2 accumulated_data; +}; + +shared uvec2 shared_data[128]; + +// Simple Uint64 add that uses 2 uint variables for GPUs that don't support uint64 +uvec2 AddUint64(uvec2 value_1, uvec2 value_2) { + uint carry = 0; + uvec2 result; + result.x = uaddCarry(value_1.x, value_2.x, carry); + result.y = value_1.y + value_2.y + carry; + return result; +} + +// do subgroup Prefix Sum using Hillis and Steele's algorithm +uvec2 subgroupInclusiveAddUint64(uvec2 value) { + uvec2 result = value; + for (uint i = 1; i < gl_SubgroupSize; i *= 2) { + uvec2 other = subgroupShuffleUp(result, i); // get value from subgroup_inv_id - i; + if (i <= gl_SubgroupInvocationID) { + result = AddUint64(result, other); + } + } + return result; +} + +// Writes down the results to the output buffer and to the accumulation buffer +void WriteResults(uvec2 results[LOCAL_RESULTS]) { + const uint current_id = gl_LocalInvocationID.x; + const uvec2 accum = accumulated_data; + for (uint i = 0; i < LOCAL_RESULTS; i++) { + uvec2 base_data = current_id * LOCAL_RESULTS + i < min_accumulation_base ? accum : uvec2(0, 0); + AddUint64(results[i], base_data); + } + for (uint i = 0; i < LOCAL_RESULTS; i++) { + output_data[buffer_offset + current_id * LOCAL_RESULTS + i] = results[i]; + } + uint index = accumulation_limit % LOCAL_RESULTS; + uint base_id = accumulation_limit / LOCAL_RESULTS; + if (min_accumulation_base >= accumulation_limit + 1) { + if (current_id == base_id) { + accumulated_data = results[index]; + } + return; + } + // We have that ugly case in which the accumulation data is reset in the middle somewhere. + barrier(); + groupMemoryBarrier(); + + if (current_id == base_id) { + uvec2 reset_value = output_data[max_accumulation_base - 1]; + // Calculate two complement / negate manually + reset_value = AddUint64(uvec2(1,0), ~reset_value); + accumulated_data = AddUint64(results[index], reset_value); + } +} + +void main() { + const uint subgroup_inv_id = gl_SubgroupInvocationID; + const uint subgroup_id = gl_SubgroupID + gl_WorkGroupID.x * gl_NumSubgroups; + const uint last_subgroup_id = subgroupMax(subgroup_inv_id); + const uint current_id = gl_LocalInvocationID.x; + const uint total_work = accumulation_limit; + const uint last_result_id = LOCAL_RESULTS - 1; + uvec2 data[LOCAL_RESULTS]; + for (uint i = 0; i < LOCAL_RESULTS; i++) { + data[i] = input_data[buffer_offset + current_id * LOCAL_RESULTS + i]; + } + uvec2 results[LOCAL_RESULTS]; + results[0] = data[0]; + for (uint i = 1; i < LOCAL_RESULTS; i++) { + results[i] = AddUint64(data[i], results[i - 1]); + } + // make sure all input data has been loaded + subgroupBarrier(); + subgroupMemoryBarrier(); + + // on the last local result, do a subgroup inclusive scan sum + results[last_result_id] = subgroupInclusiveAddUint64(results[last_result_id]); + // get the last local result from the subgroup behind the current + uvec2 result_behind = subgroupShuffleUp(results[last_result_id], 1); + if (subgroup_inv_id != 0) { + for (uint i = 1; i < LOCAL_RESULTS; i++) { + results[i - 1] = AddUint64(results[i - 1], result_behind); + } + } + + // if we had less queries than our subgroup, just write down the results. + if (total_work <= gl_SubgroupSize * LOCAL_RESULTS) { // This condition is constant per dispatch. + WriteResults(results); + return; + } + + // We now have more, so lets write the last result into shared memory. + // Only pick the last subgroup. + if (subgroup_inv_id == last_subgroup_id) { + shared_data[subgroup_id] = results[last_result_id]; + } + // wait until everyone loaded their stuffs + barrier(); + memoryBarrierShared(); + + // only if it's not the first subgroup + if (subgroup_id != 0) { + // get the results from some previous invocation + uvec2 tmp = shared_data[subgroup_inv_id]; + subgroupBarrier(); + subgroupMemoryBarrierShared(); + tmp = subgroupInclusiveAddUint64(tmp); + // obtain the result that would be equivalent to the previous result + uvec2 shuffled_result = subgroupShuffle(tmp, subgroup_id - 1); + for (uint i = 0; i < LOCAL_RESULTS; i++) { + results[i] = AddUint64(results[i], shuffled_result); + } + } + WriteResults(results); +}
\ No newline at end of file diff --git a/src/video_core/host_shaders/queries_prefix_scan_sum_nosubgroups.comp b/src/video_core/host_shaders/queries_prefix_scan_sum_nosubgroups.comp new file mode 100644 index 000000000..559a213b9 --- /dev/null +++ b/src/video_core/host_shaders/queries_prefix_scan_sum_nosubgroups.comp @@ -0,0 +1,138 @@ +// SPDX-FileCopyrightText: Copyright 2015 Graham Sellers, Richard Wright Jr. and Nicholas Haemel +// SPDX-License-Identifier: MIT + +// Code obtained from OpenGL SuperBible, Seventh Edition by Graham Sellers, Richard Wright Jr. and +// Nicholas Haemel. Modified to suit needs. + +#version 460 core + +#ifdef VULKAN + +#define HAS_EXTENDED_TYPES 1 +#define BEGIN_PUSH_CONSTANTS layout(push_constant) uniform PushConstants { +#define END_PUSH_CONSTANTS }; +#define UNIFORM(n) +#define BINDING_INPUT_BUFFER 0 +#define BINDING_OUTPUT_IMAGE 1 + +#else // ^^^ Vulkan ^^^ // vvv OpenGL vvv + +#extension GL_NV_gpu_shader5 : enable +#ifdef GL_NV_gpu_shader5 +#define HAS_EXTENDED_TYPES 1 +#else +#define HAS_EXTENDED_TYPES 0 +#endif +#define BEGIN_PUSH_CONSTANTS +#define END_PUSH_CONSTANTS +#define UNIFORM(n) layout(location = n) uniform +#define BINDING_INPUT_BUFFER 0 +#define BINDING_OUTPUT_IMAGE 0 + +#endif + +BEGIN_PUSH_CONSTANTS +UNIFORM(0) uint min_accumulation_base; +UNIFORM(1) uint max_accumulation_base; +UNIFORM(2) uint accumulation_limit; +UNIFORM(3) uint buffer_offset; +END_PUSH_CONSTANTS + +#define LOCAL_RESULTS 4 +#define QUERIES_PER_INVOC 2048 + +layout(local_size_x = QUERIES_PER_INVOC / LOCAL_RESULTS) in; + +layout(std430, binding = 0) readonly buffer block1 { + uvec2 input_data[gl_WorkGroupSize.x * LOCAL_RESULTS]; +}; + +layout(std430, binding = 1) writeonly coherent buffer block2 { + uvec2 output_data[gl_WorkGroupSize.x * LOCAL_RESULTS]; +}; + +layout(std430, binding = 2) coherent buffer block3 { + uvec2 accumulated_data; +}; + +shared uvec2 shared_data[gl_WorkGroupSize.x * LOCAL_RESULTS]; + +uvec2 AddUint64(uvec2 value_1, uvec2 value_2) { + uint carry = 0; + uvec2 result; + result.x = uaddCarry(value_1.x, value_2.x, carry); + result.y = value_1.y + value_2.y + carry; + return result; +} + +void main(void) { + uint id = gl_LocalInvocationID.x; + uvec2 base_value[LOCAL_RESULTS]; + const uvec2 accum = accumulated_data; + for (uint i = 0; i < LOCAL_RESULTS; i++) { + base_value[i] = (buffer_offset + id * LOCAL_RESULTS + i) < min_accumulation_base + ? accumulated_data + : uvec2(0); + } + uint work_size = gl_WorkGroupSize.x; + uint rd_id; + uint wr_id; + uint mask; + uvec2 inputs[LOCAL_RESULTS]; + for (uint i = 0; i < LOCAL_RESULTS; i++) { + inputs[i] = input_data[buffer_offset + id * LOCAL_RESULTS + i]; + } + // The number of steps is the log base 2 of the + // work group size, which should be a power of 2 + const uint steps = uint(log2(work_size)) + uint(log2(LOCAL_RESULTS)); + uint step = 0; + + // Each invocation is responsible for the content of + // two elements of the output array + for (uint i = 0; i < LOCAL_RESULTS; i++) { + shared_data[id * LOCAL_RESULTS + i] = inputs[i]; + } + // Synchronize to make sure that everyone has initialized + // their elements of shared_data[] with data loaded from + // the input arrays + barrier(); + memoryBarrierShared(); + // For each step... + for (step = 0; step < steps; step++) { + // Calculate the read and write index in the + // shared array + mask = (1 << step) - 1; + rd_id = ((id >> step) << (step + 1)) + mask; + wr_id = rd_id + 1 + (id & mask); + // Accumulate the read data into our element + + shared_data[wr_id] = AddUint64(shared_data[rd_id], shared_data[wr_id]); + // Synchronize again to make sure that everyone + // has caught up with us + barrier(); + memoryBarrierShared(); + } + // Add the accumulation + for (uint i = 0; i < LOCAL_RESULTS; i++) { + shared_data[id * LOCAL_RESULTS + i] = + AddUint64(shared_data[id * LOCAL_RESULTS + i], base_value[i]); + } + barrier(); + memoryBarrierShared(); + + // Finally write our data back to the output buffer + for (uint i = 0; i < LOCAL_RESULTS; i++) { + output_data[buffer_offset + id * LOCAL_RESULTS + i] = shared_data[id * LOCAL_RESULTS + i]; + } + if (id == 0) { + if (min_accumulation_base >= accumulation_limit + 1) { + accumulated_data = shared_data[accumulation_limit]; + return; + } + uvec2 reset_value = shared_data[max_accumulation_base - 1]; + uvec2 final_value = shared_data[accumulation_limit]; + // Two complements + reset_value = AddUint64(uvec2(1, 0), ~reset_value); + accumulated_data = AddUint64(final_value, reset_value); + } +}
\ No newline at end of file diff --git a/src/video_core/host_shaders/resolve_conditional_render.comp b/src/video_core/host_shaders/resolve_conditional_render.comp new file mode 100644 index 000000000..307e77d1a --- /dev/null +++ b/src/video_core/host_shaders/resolve_conditional_render.comp @@ -0,0 +1,20 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#version 450 + +layout(local_size_x = 1) in; + +layout(std430, binding = 0) buffer Query { + uvec2 initial; + uvec2 unknown; + uvec2 current; +}; + +layout(std430, binding = 1) buffer Result { + uint result; +}; + +void main() { + result = all(equal(initial, current)) ? 1 : 0; +} diff --git a/src/video_core/macro/macro_hle.cpp b/src/video_core/macro/macro_hle.cpp index 6272a4652..046c8085e 100644 --- a/src/video_core/macro/macro_hle.cpp +++ b/src/video_core/macro/macro_hle.cpp @@ -67,6 +67,7 @@ public: } auto& params = maxwell3d.draw_manager->GetIndirectParams(); + params.is_byte_count = false; params.is_indexed = false; params.include_count = false; params.count_start_address = 0; @@ -161,6 +162,7 @@ public: 0, 0x644, Maxwell3D::HLEReplacementAttributeType::BaseInstance); } auto& params = maxwell3d.draw_manager->GetIndirectParams(); + params.is_byte_count = false; params.is_indexed = true; params.include_count = false; params.count_start_address = 0; @@ -256,6 +258,7 @@ public: const u32 estimate = static_cast<u32>(maxwell3d.EstimateIndexBufferSize()); maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; auto& params = maxwell3d.draw_manager->GetIndirectParams(); + params.is_byte_count = false; params.is_indexed = true; params.include_count = true; params.count_start_address = maxwell3d.GetMacroAddress(4); @@ -319,6 +322,47 @@ private: } }; +class HLE_DrawIndirectByteCount final : public HLEMacroImpl { +public: + explicit HLE_DrawIndirectByteCount(Maxwell3D& maxwell3d_) : HLEMacroImpl(maxwell3d_) {} + + void Execute(const std::vector<u32>& parameters, [[maybe_unused]] u32 method) override { + auto topology = static_cast<Maxwell3D::Regs::PrimitiveTopology>(parameters[0] & 0xFFFFU); + if (!maxwell3d.AnyParametersDirty() || !IsTopologySafe(topology)) { + Fallback(parameters); + return; + } + + auto& params = maxwell3d.draw_manager->GetIndirectParams(); + params.is_byte_count = true; + params.is_indexed = false; + params.include_count = false; + params.count_start_address = 0; + params.indirect_start_address = maxwell3d.GetMacroAddress(2); + params.buffer_size = 4; + params.max_draw_counts = 1; + params.stride = parameters[1]; + maxwell3d.regs.draw.begin = parameters[0]; + maxwell3d.regs.draw_auto_stride = parameters[1]; + maxwell3d.regs.draw_auto_byte_count = parameters[2]; + + maxwell3d.draw_manager->DrawArrayIndirect(topology); + } + +private: + void Fallback(const std::vector<u32>& parameters) { + maxwell3d.RefreshParameters(); + + maxwell3d.regs.draw.begin = parameters[0]; + maxwell3d.regs.draw_auto_stride = parameters[1]; + maxwell3d.regs.draw_auto_byte_count = parameters[2]; + + maxwell3d.draw_manager->DrawArray( + maxwell3d.regs.draw.topology, 0, + maxwell3d.regs.draw_auto_byte_count / maxwell3d.regs.draw_auto_stride, 0, 1); + } +}; + class HLE_C713C83D8F63CCF3 final : public HLEMacroImpl { public: explicit HLE_C713C83D8F63CCF3(Maxwell3D& maxwell3d_) : HLEMacroImpl(maxwell3d_) {} @@ -536,6 +580,11 @@ HLEMacro::HLEMacro(Maxwell3D& maxwell3d_) : maxwell3d{maxwell3d_} { [](Maxwell3D& maxwell3d__) -> std::unique_ptr<CachedMacro> { return std::make_unique<HLE_TransformFeedbackSetup>(maxwell3d__); })); + builders.emplace(0xB5F74EDB717278ECULL, + std::function<std::unique_ptr<CachedMacro>(Maxwell3D&)>( + [](Maxwell3D& maxwell3d__) -> std::unique_ptr<CachedMacro> { + return std::make_unique<HLE_DrawIndirectByteCount>(maxwell3d__); + })); } HLEMacro::~HLEMacro() = default; diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h index 7047e2e63..9fcaeeac7 100644 --- a/src/video_core/query_cache.h +++ b/src/video_core/query_cache.h @@ -25,6 +25,13 @@ #include "video_core/rasterizer_interface.h" #include "video_core/texture_cache/slot_vector.h" +namespace VideoCore { +enum class QueryType { + SamplesPassed, +}; +constexpr std::size_t NumQueryTypes = 1; +} // namespace VideoCore + namespace VideoCommon { using AsyncJobId = SlotId; @@ -98,10 +105,10 @@ private: }; template <class QueryCache, class CachedQuery, class CounterStream, class HostCounter> -class QueryCacheBase : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> { +class QueryCacheLegacy : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> { public: - explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_, - Core::Memory::Memory& cpu_memory_) + explicit QueryCacheLegacy(VideoCore::RasterizerInterface& rasterizer_, + Core::Memory::Memory& cpu_memory_) : rasterizer{rasterizer_}, // Use reinterpret_cast instead of static_cast as workaround for // UBSan bug (https://github.com/llvm/llvm-project/issues/59060) diff --git a/src/video_core/query_cache/bank_base.h b/src/video_core/query_cache/bank_base.h new file mode 100644 index 000000000..44769ea97 --- /dev/null +++ b/src/video_core/query_cache/bank_base.h @@ -0,0 +1,105 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#pragma once + +#include <atomic> +#include <deque> +#include <utility> + +#include "common/common_types.h" + +namespace VideoCommon { + +class BankBase { +protected: + const size_t base_bank_size{}; + size_t bank_size{}; + std::atomic<size_t> references{}; + size_t current_slot{}; + +public: + explicit BankBase(size_t bank_size_) : base_bank_size{bank_size_}, bank_size(bank_size_) {} + + virtual ~BankBase() = default; + + virtual std::pair<bool, size_t> Reserve() { + if (IsClosed()) { + return {false, bank_size}; + } + const size_t result = current_slot++; + return {true, result}; + } + + virtual void Reset() { + current_slot = 0; + references = 0; + bank_size = base_bank_size; + } + + size_t Size() const { + return bank_size; + } + + void AddReference(size_t how_many = 1) { + references.fetch_add(how_many, std::memory_order_relaxed); + } + + void CloseReference(size_t how_many = 1) { + if (how_many > references.load(std::memory_order_relaxed)) { + UNREACHABLE(); + } + references.fetch_sub(how_many, std::memory_order_relaxed); + } + + void Close() { + bank_size = current_slot; + } + + bool IsClosed() const { + return current_slot >= bank_size; + } + + bool IsDead() const { + return IsClosed() && references == 0; + } +}; + +template <typename BankType> +class BankPool { +private: + std::deque<BankType> bank_pool; + std::deque<size_t> bank_indices; + +public: + BankPool() = default; + ~BankPool() = default; + + // Reserve a bank from the pool and return its index + template <typename Func> + size_t ReserveBank(Func&& builder) { + if (!bank_indices.empty() && bank_pool[bank_indices.front()].IsDead()) { + size_t new_index = bank_indices.front(); + bank_indices.pop_front(); + bank_pool[new_index].Reset(); + bank_indices.push_back(new_index); + return new_index; + } + size_t new_index = bank_pool.size(); + builder(bank_pool, new_index); + bank_indices.push_back(new_index); + return new_index; + } + + // Get a reference to a bank using its index + BankType& GetBank(size_t index) { + return bank_pool[index]; + } + + // Get the total number of banks in the pool + size_t BankCount() const { + return bank_pool.size(); + } +}; + +} // namespace VideoCommon diff --git a/src/video_core/query_cache/query_base.h b/src/video_core/query_cache/query_base.h new file mode 100644 index 000000000..1d786b3a7 --- /dev/null +++ b/src/video_core/query_cache/query_base.h @@ -0,0 +1,70 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#pragma once + +#include "common/common_funcs.h" +#include "common/common_types.h" + +namespace VideoCommon { + +enum class QueryFlagBits : u32 { + HasTimestamp = 1 << 0, ///< Indicates if this query has a timestamp. + IsFinalValueSynced = 1 << 1, ///< Indicates if the query has been synced in the host + IsHostSynced = 1 << 2, ///< Indicates if the query has been synced in the host + IsGuestSynced = 1 << 3, ///< Indicates if the query has been synced with the guest. + IsHostManaged = 1 << 4, ///< Indicates if this query points to a host query + IsRewritten = 1 << 5, ///< Indicates if this query was rewritten by another query + IsInvalidated = 1 << 6, ///< Indicates the value of th query has been nullified. + IsOrphan = 1 << 7, ///< Indicates the query has not been set by a guest query. + IsFence = 1 << 8, ///< Indicates the query is a fence. +}; +DECLARE_ENUM_FLAG_OPERATORS(QueryFlagBits) + +class QueryBase { +public: + VAddr guest_address{}; + QueryFlagBits flags{}; + u64 value{}; + +protected: + // Default constructor + QueryBase() = default; + + // Parameterized constructor + QueryBase(VAddr address, QueryFlagBits flags_, u64 value_) + : guest_address(address), flags(flags_), value{value_} {} +}; + +class GuestQuery : public QueryBase { +public: + // Parameterized constructor + GuestQuery(bool isLong, VAddr address, u64 queryValue) + : QueryBase(address, QueryFlagBits::IsFinalValueSynced, queryValue) { + if (isLong) { + flags |= QueryFlagBits::HasTimestamp; + } + } +}; + +class HostQueryBase : public QueryBase { +public: + // Default constructor + HostQueryBase() : QueryBase(0, QueryFlagBits::IsHostManaged | QueryFlagBits::IsOrphan, 0) {} + + // Parameterized constructor + HostQueryBase(bool has_timestamp, VAddr address) + : QueryBase(address, QueryFlagBits::IsHostManaged, 0), start_bank_id{}, size_banks{}, + start_slot{}, size_slots{} { + if (has_timestamp) { + flags |= QueryFlagBits::HasTimestamp; + } + } + + u32 start_bank_id{}; + u32 size_banks{}; + size_t start_slot{}; + size_t size_slots{}; +}; + +} // namespace VideoCommon
\ No newline at end of file diff --git a/src/video_core/query_cache/query_cache.h b/src/video_core/query_cache/query_cache.h new file mode 100644 index 000000000..78b42b518 --- /dev/null +++ b/src/video_core/query_cache/query_cache.h @@ -0,0 +1,580 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#pragma once + +#include <array> +#include <deque> +#include <memory> +#include <mutex> +#include <unordered_map> +#include <utility> + +#include "common/assert.h" +#include "common/common_types.h" +#include "common/logging/log.h" +#include "common/scope_exit.h" +#include "common/settings.h" +#include "core/memory.h" +#include "video_core/engines/maxwell_3d.h" +#include "video_core/gpu.h" +#include "video_core/memory_manager.h" +#include "video_core/query_cache/bank_base.h" +#include "video_core/query_cache/query_base.h" +#include "video_core/query_cache/query_cache_base.h" +#include "video_core/query_cache/query_stream.h" +#include "video_core/query_cache/types.h" + +namespace VideoCommon { + +using Maxwell = Tegra::Engines::Maxwell3D; + +struct SyncValuesStruct { + VAddr address; + u64 value; + u64 size; + + static constexpr bool GeneratesBaseBuffer = true; +}; + +template <typename Traits> +class GuestStreamer : public SimpleStreamer<GuestQuery> { +public: + using RuntimeType = typename Traits::RuntimeType; + + GuestStreamer(size_t id_, RuntimeType& runtime_) + : SimpleStreamer<GuestQuery>(id_), runtime{runtime_} {} + + virtual ~GuestStreamer() = default; + + size_t WriteCounter(VAddr address, bool has_timestamp, u32 value, + std::optional<u32> subreport = std::nullopt) override { + auto new_id = BuildQuery(has_timestamp, address, static_cast<u64>(value)); + pending_sync.push_back(new_id); + return new_id; + } + + bool HasPendingSync() const override { + return !pending_sync.empty(); + } + + void SyncWrites() override { + if (pending_sync.empty()) { + return; + } + std::vector<SyncValuesStruct> sync_values; + sync_values.reserve(pending_sync.size()); + for (size_t pending_id : pending_sync) { + auto& query = slot_queries[pending_id]; + if (True(query.flags & QueryFlagBits::IsRewritten) || + True(query.flags & QueryFlagBits::IsInvalidated)) { + continue; + } + query.flags |= QueryFlagBits::IsHostSynced; + sync_values.emplace_back(SyncValuesStruct{ + .address = query.guest_address, + .value = query.value, + .size = static_cast<u64>(True(query.flags & QueryFlagBits::HasTimestamp) ? 8 : 4)}); + } + pending_sync.clear(); + if (sync_values.size() > 0) { + runtime.template SyncValues<SyncValuesStruct>(sync_values); + } + } + +private: + RuntimeType& runtime; + std::deque<size_t> pending_sync; +}; + +template <typename Traits> +class StubStreamer : public GuestStreamer<Traits> { +public: + using RuntimeType = typename Traits::RuntimeType; + + StubStreamer(size_t id_, RuntimeType& runtime_, u32 stub_value_) + : GuestStreamer<Traits>(id_, runtime_), stub_value{stub_value_} {} + + ~StubStreamer() override = default; + + size_t WriteCounter(VAddr address, bool has_timestamp, [[maybe_unused]] u32 value, + std::optional<u32> subreport = std::nullopt) override { + size_t new_id = + GuestStreamer<Traits>::WriteCounter(address, has_timestamp, stub_value, subreport); + return new_id; + } + +private: + u32 stub_value; +}; + +template <typename Traits> +struct QueryCacheBase<Traits>::QueryCacheBaseImpl { + using RuntimeType = typename Traits::RuntimeType; + + QueryCacheBaseImpl(QueryCacheBase<Traits>* owner_, VideoCore::RasterizerInterface& rasterizer_, + Core::Memory::Memory& cpu_memory_, RuntimeType& runtime_, Tegra::GPU& gpu_) + : owner{owner_}, rasterizer{rasterizer_}, + cpu_memory{cpu_memory_}, runtime{runtime_}, gpu{gpu_} { + streamer_mask = 0; + for (size_t i = 0; i < static_cast<size_t>(QueryType::MaxQueryTypes); i++) { + streamers[i] = runtime.GetStreamerInterface(static_cast<QueryType>(i)); + if (streamers[i]) { + streamer_mask |= 1ULL << streamers[i]->GetId(); + } + } + } + + template <typename Func> + void ForEachStreamerIn(u64 mask, Func&& func) { + static constexpr bool RETURNS_BOOL = + std::is_same_v<std::invoke_result<Func, StreamerInterface*>, bool>; + while (mask != 0) { + size_t position = std::countr_zero(mask); + mask &= ~(1ULL << position); + if constexpr (RETURNS_BOOL) { + if (func(streamers[position])) { + return; + } + } else { + func(streamers[position]); + } + } + } + + template <typename Func> + void ForEachStreamer(Func&& func) { + ForEachStreamerIn(streamer_mask, func); + } + + QueryBase* ObtainQuery(QueryCacheBase<Traits>::QueryLocation location) { + size_t which_stream = location.stream_id.Value(); + auto* streamer = streamers[which_stream]; + if (!streamer) { + return nullptr; + } + return streamer->GetQuery(location.query_id.Value()); + } + + QueryCacheBase<Traits>* owner; + VideoCore::RasterizerInterface& rasterizer; + Core::Memory::Memory& cpu_memory; + RuntimeType& runtime; + Tegra::GPU& gpu; + std::array<StreamerInterface*, static_cast<size_t>(QueryType::MaxQueryTypes)> streamers; + u64 streamer_mask; + std::mutex flush_guard; + std::deque<u64> flushes_pending; + std::vector<QueryCacheBase<Traits>::QueryLocation> pending_unregister; +}; + +template <typename Traits> +QueryCacheBase<Traits>::QueryCacheBase(Tegra::GPU& gpu_, + VideoCore::RasterizerInterface& rasterizer_, + Core::Memory::Memory& cpu_memory_, RuntimeType& runtime_) + : cached_queries{} { + impl = std::make_unique<QueryCacheBase<Traits>::QueryCacheBaseImpl>( + this, rasterizer_, cpu_memory_, runtime_, gpu_); +} + +template <typename Traits> +QueryCacheBase<Traits>::~QueryCacheBase() = default; + +template <typename Traits> +void QueryCacheBase<Traits>::CounterEnable(QueryType counter_type, bool is_enabled) { + size_t index = static_cast<size_t>(counter_type); + StreamerInterface* streamer = impl->streamers[index]; + if (!streamer) [[unlikely]] { + UNREACHABLE(); + return; + } + if (is_enabled) { + streamer->StartCounter(); + } else { + streamer->PauseCounter(); + } +} + +template <typename Traits> +void QueryCacheBase<Traits>::CounterClose(QueryType counter_type) { + size_t index = static_cast<size_t>(counter_type); + StreamerInterface* streamer = impl->streamers[index]; + if (!streamer) [[unlikely]] { + UNREACHABLE(); + return; + } + streamer->CloseCounter(); +} + +template <typename Traits> +void QueryCacheBase<Traits>::CounterReset(QueryType counter_type) { + size_t index = static_cast<size_t>(counter_type); + StreamerInterface* streamer = impl->streamers[index]; + if (!streamer) [[unlikely]] { + UNIMPLEMENTED(); + return; + } + streamer->ResetCounter(); +} + +template <typename Traits> +void QueryCacheBase<Traits>::BindToChannel(s32 id) { + VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo>::BindToChannel(id); + impl->runtime.Bind3DEngine(maxwell3d); +} + +template <typename Traits> +void QueryCacheBase<Traits>::CounterReport(GPUVAddr addr, QueryType counter_type, + QueryPropertiesFlags flags, u32 payload, u32 subreport) { + const bool has_timestamp = True(flags & QueryPropertiesFlags::HasTimeout); + const bool is_fence = True(flags & QueryPropertiesFlags::IsAFence); + size_t streamer_id = static_cast<size_t>(counter_type); + auto* streamer = impl->streamers[streamer_id]; + if (streamer == nullptr) [[unlikely]] { + counter_type = QueryType::Payload; + payload = 1U; + streamer_id = static_cast<size_t>(counter_type); + streamer = impl->streamers[streamer_id]; + } + auto cpu_addr_opt = gpu_memory->GpuToCpuAddress(addr); + if (!cpu_addr_opt) [[unlikely]] { + return; + } + VAddr cpu_addr = *cpu_addr_opt; + const size_t new_query_id = streamer->WriteCounter(cpu_addr, has_timestamp, payload, subreport); + auto* query = streamer->GetQuery(new_query_id); + if (is_fence) { + query->flags |= QueryFlagBits::IsFence; + } + QueryLocation query_location{}; + query_location.stream_id.Assign(static_cast<u32>(streamer_id)); + query_location.query_id.Assign(static_cast<u32>(new_query_id)); + const auto gen_caching_indexing = [](VAddr cur_addr) { + return std::make_pair<u64, u32>(cur_addr >> Core::Memory::YUZU_PAGEBITS, + static_cast<u32>(cur_addr & Core::Memory::YUZU_PAGEMASK)); + }; + u8* pointer = impl->cpu_memory.GetPointer(cpu_addr); + u8* pointer_timestamp = impl->cpu_memory.GetPointer(cpu_addr + 8); + bool is_synced = !Settings::IsGPULevelHigh() && is_fence; + + std::function<void()> operation([this, is_synced, streamer, query_base = query, query_location, + pointer, pointer_timestamp] { + if (True(query_base->flags & QueryFlagBits::IsInvalidated)) { + if (!is_synced) [[likely]] { + impl->pending_unregister.push_back(query_location); + } + return; + } + if (False(query_base->flags & QueryFlagBits::IsFinalValueSynced)) [[unlikely]] { + UNREACHABLE(); + return; + } + query_base->value += streamer->GetAmmendValue(); + streamer->SetAccumulationValue(query_base->value); + if (True(query_base->flags & QueryFlagBits::HasTimestamp)) { + u64 timestamp = impl->gpu.GetTicks(); + std::memcpy(pointer_timestamp, ×tamp, sizeof(timestamp)); + std::memcpy(pointer, &query_base->value, sizeof(query_base->value)); + } else { + u32 value = static_cast<u32>(query_base->value); + std::memcpy(pointer, &value, sizeof(value)); + } + if (!is_synced) [[likely]] { + impl->pending_unregister.push_back(query_location); + } + }); + if (is_fence) { + impl->rasterizer.SignalFence(std::move(operation)); + } else { + if (!Settings::IsGPULevelHigh() && counter_type == QueryType::Payload) { + if (has_timestamp) { + u64 timestamp = impl->gpu.GetTicks(); + u64 value = static_cast<u64>(payload); + std::memcpy(pointer_timestamp, ×tamp, sizeof(timestamp)); + std::memcpy(pointer, &value, sizeof(value)); + } else { + std::memcpy(pointer, &payload, sizeof(payload)); + } + streamer->Free(new_query_id); + return; + } + impl->rasterizer.SyncOperation(std::move(operation)); + } + if (is_synced) { + streamer->Free(new_query_id); + return; + } + auto [cont_addr, base] = gen_caching_indexing(cpu_addr); + { + std::scoped_lock lock(cache_mutex); + auto it1 = cached_queries.try_emplace(cont_addr); + auto& sub_container = it1.first->second; + auto it_current = sub_container.find(base); + if (it_current == sub_container.end()) { + sub_container.insert_or_assign(base, query_location); + return; + } + auto* old_query = impl->ObtainQuery(it_current->second); + old_query->flags |= QueryFlagBits::IsRewritten; + sub_container.insert_or_assign(base, query_location); + } +} + +template <typename Traits> +void QueryCacheBase<Traits>::UnregisterPending() { + const auto gen_caching_indexing = [](VAddr cur_addr) { + return std::make_pair<u64, u32>(cur_addr >> Core::Memory::YUZU_PAGEBITS, + static_cast<u32>(cur_addr & Core::Memory::YUZU_PAGEMASK)); + }; + std::scoped_lock lock(cache_mutex); + for (QueryLocation loc : impl->pending_unregister) { + const auto [streamer_id, query_id] = loc.unpack(); + auto* streamer = impl->streamers[streamer_id]; + if (!streamer) [[unlikely]] { + continue; + } + auto* query = streamer->GetQuery(query_id); + auto [cont_addr, base] = gen_caching_indexing(query->guest_address); + auto it1 = cached_queries.find(cont_addr); + if (it1 != cached_queries.end()) { + auto it2 = it1->second.find(base); + if (it2 != it1->second.end()) { + if (it2->second.raw == loc.raw) { + it1->second.erase(it2); + } + } + } + streamer->Free(query_id); + } + impl->pending_unregister.clear(); +} + +template <typename Traits> +void QueryCacheBase<Traits>::NotifyWFI() { + bool should_sync = false; + impl->ForEachStreamer( + [&should_sync](StreamerInterface* streamer) { should_sync |= streamer->HasPendingSync(); }); + if (!should_sync) { + return; + } + + impl->ForEachStreamer([](StreamerInterface* streamer) { streamer->PresyncWrites(); }); + impl->runtime.Barriers(true); + impl->ForEachStreamer([](StreamerInterface* streamer) { streamer->SyncWrites(); }); + impl->runtime.Barriers(false); +} + +template <typename Traits> +void QueryCacheBase<Traits>::NotifySegment(bool resume) { + if (resume) { + impl->runtime.ResumeHostConditionalRendering(); + } else { + CounterClose(VideoCommon::QueryType::ZPassPixelCount64); + CounterClose(VideoCommon::QueryType::StreamingByteCount); + impl->runtime.PauseHostConditionalRendering(); + } +} + +template <typename Traits> +bool QueryCacheBase<Traits>::AccelerateHostConditionalRendering() { + bool qc_dirty = false; + const auto gen_lookup = [this, &qc_dirty](GPUVAddr address) -> VideoCommon::LookupData { + auto cpu_addr_opt = gpu_memory->GpuToCpuAddress(address); + if (!cpu_addr_opt) [[unlikely]] { + return VideoCommon::LookupData{ + .address = 0, + .found_query = nullptr, + }; + } + VAddr cpu_addr = *cpu_addr_opt; + std::scoped_lock lock(cache_mutex); + auto it1 = cached_queries.find(cpu_addr >> Core::Memory::YUZU_PAGEBITS); + if (it1 == cached_queries.end()) { + return VideoCommon::LookupData{ + .address = cpu_addr, + .found_query = nullptr, + }; + } + auto& sub_container = it1->second; + auto it_current = sub_container.find(cpu_addr & Core::Memory::YUZU_PAGEMASK); + + if (it_current == sub_container.end()) { + auto it_current_2 = sub_container.find((cpu_addr & Core::Memory::YUZU_PAGEMASK) + 4); + if (it_current_2 == sub_container.end()) { + return VideoCommon::LookupData{ + .address = cpu_addr, + .found_query = nullptr, + }; + } + } + auto* query = impl->ObtainQuery(it_current->second); + qc_dirty |= True(query->flags & QueryFlagBits::IsHostManaged) && + False(query->flags & QueryFlagBits::IsGuestSynced); + return VideoCommon::LookupData{ + .address = cpu_addr, + .found_query = query, + }; + }; + + auto& regs = maxwell3d->regs; + if (regs.render_enable_override != Maxwell::Regs::RenderEnable::Override::UseRenderEnable) { + impl->runtime.EndHostConditionalRendering(); + return false; + } + const ComparisonMode mode = static_cast<ComparisonMode>(regs.render_enable.mode); + const GPUVAddr address = regs.render_enable.Address(); + switch (mode) { + case ComparisonMode::True: + impl->runtime.EndHostConditionalRendering(); + return false; + case ComparisonMode::False: + impl->runtime.EndHostConditionalRendering(); + return false; + case ComparisonMode::Conditional: { + VideoCommon::LookupData object_1{gen_lookup(address)}; + return impl->runtime.HostConditionalRenderingCompareValue(object_1, qc_dirty); + } + case ComparisonMode::IfEqual: { + VideoCommon::LookupData object_1{gen_lookup(address)}; + VideoCommon::LookupData object_2{gen_lookup(address + 16)}; + return impl->runtime.HostConditionalRenderingCompareValues(object_1, object_2, qc_dirty, + true); + } + case ComparisonMode::IfNotEqual: { + VideoCommon::LookupData object_1{gen_lookup(address)}; + VideoCommon::LookupData object_2{gen_lookup(address + 16)}; + return impl->runtime.HostConditionalRenderingCompareValues(object_1, object_2, qc_dirty, + false); + } + default: + return false; + } +} + +// Async downloads +template <typename Traits> +void QueryCacheBase<Traits>::CommitAsyncFlushes() { + // Make sure to have the results synced in Host. + NotifyWFI(); + + u64 mask{}; + { + std::scoped_lock lk(impl->flush_guard); + impl->ForEachStreamer([&mask](StreamerInterface* streamer) { + bool local_result = streamer->HasUnsyncedQueries(); + if (local_result) { + mask |= 1ULL << streamer->GetId(); + } + }); + impl->flushes_pending.push_back(mask); + } + std::function<void()> func([this] { UnregisterPending(); }); + impl->rasterizer.SyncOperation(std::move(func)); + if (mask == 0) { + return; + } + u64 ran_mask = ~mask; + while (mask) { + impl->ForEachStreamerIn(mask, [&mask, &ran_mask](StreamerInterface* streamer) { + u64 dep_mask = streamer->GetDependentMask(); + if ((dep_mask & ~ran_mask) != 0) { + return; + } + u64 index = streamer->GetId(); + ran_mask |= (1ULL << index); + mask &= ~(1ULL << index); + streamer->PushUnsyncedQueries(); + }); + } +} + +template <typename Traits> +bool QueryCacheBase<Traits>::HasUncommittedFlushes() const { + bool result = false; + impl->ForEachStreamer([&result](StreamerInterface* streamer) { + result |= streamer->HasUnsyncedQueries(); + return result; + }); + return result; +} + +template <typename Traits> +bool QueryCacheBase<Traits>::ShouldWaitAsyncFlushes() { + std::scoped_lock lk(impl->flush_guard); + return !impl->flushes_pending.empty() && impl->flushes_pending.front() != 0ULL; +} + +template <typename Traits> +void QueryCacheBase<Traits>::PopAsyncFlushes() { + u64 mask; + { + std::scoped_lock lk(impl->flush_guard); + mask = impl->flushes_pending.front(); + impl->flushes_pending.pop_front(); + } + if (mask == 0) { + return; + } + u64 ran_mask = ~mask; + while (mask) { + impl->ForEachStreamerIn(mask, [&mask, &ran_mask](StreamerInterface* streamer) { + u64 dep_mask = streamer->GetDependenceMask(); + if ((dep_mask & ~ran_mask) != 0) { + return; + } + u64 index = streamer->GetId(); + ran_mask |= (1ULL << index); + mask &= ~(1ULL << index); + streamer->PopUnsyncedQueries(); + }); + } +} + +// Invalidation + +template <typename Traits> +void QueryCacheBase<Traits>::InvalidateQuery(QueryCacheBase<Traits>::QueryLocation location) { + auto* query_base = impl->ObtainQuery(location); + if (!query_base) { + return; + } + query_base->flags |= QueryFlagBits::IsInvalidated; +} + +template <typename Traits> +bool QueryCacheBase<Traits>::IsQueryDirty(QueryCacheBase<Traits>::QueryLocation location) { + auto* query_base = impl->ObtainQuery(location); + if (!query_base) { + return false; + } + return True(query_base->flags & QueryFlagBits::IsHostManaged) && + False(query_base->flags & QueryFlagBits::IsGuestSynced); +} + +template <typename Traits> +bool QueryCacheBase<Traits>::SemiFlushQueryDirty(QueryCacheBase<Traits>::QueryLocation location) { + auto* query_base = impl->ObtainQuery(location); + if (!query_base) { + return false; + } + if (True(query_base->flags & QueryFlagBits::IsFinalValueSynced) && + False(query_base->flags & QueryFlagBits::IsGuestSynced)) { + auto* ptr = impl->cpu_memory.GetPointer(query_base->guest_address); + if (True(query_base->flags & QueryFlagBits::HasTimestamp)) { + std::memcpy(ptr, &query_base->value, sizeof(query_base->value)); + return false; + } + u32 value_l = static_cast<u32>(query_base->value); + std::memcpy(ptr, &value_l, sizeof(value_l)); + return false; + } + return True(query_base->flags & QueryFlagBits::IsHostManaged) && + False(query_base->flags & QueryFlagBits::IsGuestSynced); +} + +template <typename Traits> +void QueryCacheBase<Traits>::RequestGuestHostSync() { + impl->rasterizer.ReleaseFences(); +} + +} // namespace VideoCommon diff --git a/src/video_core/query_cache/query_cache_base.h b/src/video_core/query_cache/query_cache_base.h new file mode 100644 index 000000000..07be421c6 --- /dev/null +++ b/src/video_core/query_cache/query_cache_base.h @@ -0,0 +1,181 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#pragma once + +#include <functional> +#include <mutex> +#include <optional> +#include <span> +#include <unordered_map> +#include <utility> + +#include "common/assert.h" +#include "common/bit_field.h" +#include "common/common_types.h" +#include "core/memory.h" +#include "video_core/control/channel_state_cache.h" +#include "video_core/query_cache/query_base.h" +#include "video_core/query_cache/types.h" + +namespace Core::Memory { +class Memory; +} + +namespace VideoCore { +class RasterizerInterface; +} + +namespace Tegra { +class GPU; +} + +namespace VideoCommon { + +struct LookupData { + VAddr address; + QueryBase* found_query; +}; + +template <typename Traits> +class QueryCacheBase : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> { + using RuntimeType = typename Traits::RuntimeType; + +public: + union QueryLocation { + BitField<27, 5, u32> stream_id; + BitField<0, 27, u32> query_id; + u32 raw; + + std::pair<size_t, size_t> unpack() const { + return {static_cast<size_t>(stream_id.Value()), static_cast<size_t>(query_id.Value())}; + } + }; + + explicit QueryCacheBase(Tegra::GPU& gpu, VideoCore::RasterizerInterface& rasterizer_, + Core::Memory::Memory& cpu_memory_, RuntimeType& runtime_); + + ~QueryCacheBase(); + + void InvalidateRegion(VAddr addr, std::size_t size) { + IterateCache<true>(addr, size, + [this](QueryLocation location) { InvalidateQuery(location); }); + } + + void FlushRegion(VAddr addr, std::size_t size) { + bool result = false; + IterateCache<false>(addr, size, [this, &result](QueryLocation location) { + result |= SemiFlushQueryDirty(location); + return result; + }); + if (result) { + RequestGuestHostSync(); + } + } + + static u64 BuildMask(std::span<const QueryType> types) { + u64 mask = 0; + for (auto query_type : types) { + mask |= 1ULL << (static_cast<u64>(query_type)); + } + return mask; + } + + /// Return true when a CPU region is modified from the GPU + [[nodiscard]] bool IsRegionGpuModified(VAddr addr, size_t size) { + bool result = false; + IterateCache<false>(addr, size, [this, &result](QueryLocation location) { + result |= IsQueryDirty(location); + return result; + }); + return result; + } + + void CounterEnable(QueryType counter_type, bool is_enabled); + + void CounterReset(QueryType counter_type); + + void CounterClose(QueryType counter_type); + + void CounterReport(GPUVAddr addr, QueryType counter_type, QueryPropertiesFlags flags, + u32 payload, u32 subreport); + + void NotifyWFI(); + + bool AccelerateHostConditionalRendering(); + + // Async downloads + void CommitAsyncFlushes(); + + bool HasUncommittedFlushes() const; + + bool ShouldWaitAsyncFlushes(); + + void PopAsyncFlushes(); + + void NotifySegment(bool resume); + + void BindToChannel(s32 id) override; + +protected: + template <bool remove_from_cache, typename Func> + void IterateCache(VAddr addr, std::size_t size, Func&& func) { + static constexpr bool RETURNS_BOOL = + std::is_same_v<std::invoke_result<Func, QueryLocation>, bool>; + const u64 addr_begin = addr; + const u64 addr_end = addr_begin + size; + + const u64 page_end = addr_end >> Core::Memory::YUZU_PAGEBITS; + std::scoped_lock lock(cache_mutex); + for (u64 page = addr_begin >> Core::Memory::YUZU_PAGEBITS; page <= page_end; ++page) { + const u64 page_start = page << Core::Memory::YUZU_PAGEBITS; + const auto in_range = [page_start, addr_begin, addr_end](const u32 query_location) { + const u64 cache_begin = page_start + query_location; + const u64 cache_end = cache_begin + sizeof(u32); + return cache_begin < addr_end && addr_begin < cache_end; + }; + const auto& it = cached_queries.find(page); + if (it == std::end(cached_queries)) { + continue; + } + auto& contents = it->second; + for (auto& query : contents) { + if (!in_range(query.first)) { + continue; + } + if constexpr (RETURNS_BOOL) { + if (func(query.second)) { + return; + } + } else { + func(query.second); + } + } + if constexpr (remove_from_cache) { + const auto in_range2 = [&](const std::pair<u32, QueryLocation>& pair) { + return in_range(pair.first); + }; + std::erase_if(contents, in_range2); + } + } + } + + using ContentCache = std::unordered_map<u64, std::unordered_map<u32, QueryLocation>>; + + void InvalidateQuery(QueryLocation location); + bool IsQueryDirty(QueryLocation location); + bool SemiFlushQueryDirty(QueryLocation location); + void RequestGuestHostSync(); + void UnregisterPending(); + + std::unordered_map<u64, std::unordered_map<u32, QueryLocation>> cached_queries; + std::mutex cache_mutex; + + struct QueryCacheBaseImpl; + friend struct QueryCacheBaseImpl; + friend RuntimeType; + + std::unique_ptr<QueryCacheBaseImpl> impl; +}; + +} // namespace VideoCommon
\ No newline at end of file diff --git a/src/video_core/query_cache/query_stream.h b/src/video_core/query_cache/query_stream.h new file mode 100644 index 000000000..39da6ac07 --- /dev/null +++ b/src/video_core/query_cache/query_stream.h @@ -0,0 +1,149 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#pragma once + +#include <deque> +#include <optional> +#include <vector> + +#include "common/assert.h" +#include "common/common_types.h" +#include "video_core/query_cache/bank_base.h" +#include "video_core/query_cache/query_base.h" + +namespace VideoCommon { + +class StreamerInterface { +public: + explicit StreamerInterface(size_t id_) : id{id_}, dependence_mask{}, dependent_mask{} {} + virtual ~StreamerInterface() = default; + + virtual QueryBase* GetQuery(size_t id) = 0; + + virtual void StartCounter() { + /* Do Nothing */ + } + + virtual void PauseCounter() { + /* Do Nothing */ + } + + virtual void ResetCounter() { + /* Do Nothing */ + } + + virtual void CloseCounter() { + /* Do Nothing */ + } + + virtual bool HasPendingSync() const { + return false; + } + + virtual void PresyncWrites() { + /* Do Nothing */ + } + + virtual void SyncWrites() { + /* Do Nothing */ + } + + virtual size_t WriteCounter(VAddr address, bool has_timestamp, u32 value, + std::optional<u32> subreport = std::nullopt) = 0; + + virtual bool HasUnsyncedQueries() const { + return false; + } + + virtual void PushUnsyncedQueries() { + /* Do Nothing */ + } + + virtual void PopUnsyncedQueries() { + /* Do Nothing */ + } + + virtual void Free(size_t query_id) = 0; + + size_t GetId() const { + return id; + } + + u64 GetDependenceMask() const { + return dependence_mask; + } + + u64 GetDependentMask() const { + return dependence_mask; + } + + u64 GetAmmendValue() const { + return ammend_value; + } + + void SetAccumulationValue(u64 new_value) { + acumulation_value = new_value; + } + +protected: + void MakeDependent(StreamerInterface* depend_on) { + dependence_mask |= 1ULL << depend_on->id; + depend_on->dependent_mask |= 1ULL << id; + } + + const size_t id; + u64 dependence_mask; + u64 dependent_mask; + u64 ammend_value{}; + u64 acumulation_value{}; +}; + +template <typename QueryType> +class SimpleStreamer : public StreamerInterface { +public: + explicit SimpleStreamer(size_t id_) : StreamerInterface{id_} {} + virtual ~SimpleStreamer() = default; + +protected: + virtual QueryType* GetQuery(size_t query_id) override { + if (query_id < slot_queries.size()) { + return &slot_queries[query_id]; + } + return nullptr; + } + + virtual void Free(size_t query_id) override { + std::scoped_lock lk(guard); + ReleaseQuery(query_id); + } + + template <typename... Args, typename = decltype(QueryType(std::declval<Args>()...))> + size_t BuildQuery(Args&&... args) { + std::scoped_lock lk(guard); + if (!old_queries.empty()) { + size_t new_id = old_queries.front(); + old_queries.pop_front(); + new (&slot_queries[new_id]) QueryType(std::forward<Args>(args)...); + return new_id; + } + size_t new_id = slot_queries.size(); + slot_queries.emplace_back(std::forward<Args>(args)...); + return new_id; + } + + void ReleaseQuery(size_t query_id) { + + if (query_id < slot_queries.size()) { + old_queries.push_back(query_id); + return; + } + UNREACHABLE(); + } + + std::mutex guard; + std::deque<QueryType> slot_queries; + std::deque<size_t> old_queries; +}; + +} // namespace VideoCommon
\ No newline at end of file diff --git a/src/video_core/query_cache/types.h b/src/video_core/query_cache/types.h new file mode 100644 index 000000000..e9226bbfc --- /dev/null +++ b/src/video_core/query_cache/types.h @@ -0,0 +1,74 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later + +#pragma once + +#include "common/common_funcs.h" +#include "common/common_types.h" + +namespace VideoCommon { + +enum class QueryPropertiesFlags : u32 { + HasTimeout = 1 << 0, + IsAFence = 1 << 1, +}; +DECLARE_ENUM_FLAG_OPERATORS(QueryPropertiesFlags) + +// This should always be equivalent to maxwell3d Report Semaphore Reports +enum class QueryType : u32 { + Payload = 0, // "None" in docs, but confirmed via hardware to return the payload + VerticesGenerated = 1, + ZPassPixelCount = 2, + PrimitivesGenerated = 3, + AlphaBetaClocks = 4, + VertexShaderInvocations = 5, + StreamingPrimitivesNeededMinusSucceeded = 6, + GeometryShaderInvocations = 7, + GeometryShaderPrimitivesGenerated = 9, + ZCullStats0 = 10, + StreamingPrimitivesSucceeded = 11, + ZCullStats1 = 12, + StreamingPrimitivesNeeded = 13, + ZCullStats2 = 14, + ClipperInvocations = 15, + ZCullStats3 = 16, + ClipperPrimitivesGenerated = 17, + VtgPrimitivesOut = 18, + PixelShaderInvocations = 19, + ZPassPixelCount64 = 21, + IEEECleanColorTarget = 24, + IEEECleanZetaTarget = 25, + StreamingByteCount = 26, + TessellationInitInvocations = 27, + BoundingRectangle = 28, + TessellationShaderInvocations = 29, + TotalStreamingPrimitivesNeededMinusSucceeded = 30, + TessellationShaderPrimitivesGenerated = 31, + // max. + MaxQueryTypes, +}; + +// Comparison modes for Host Conditional Rendering +enum class ComparisonMode : u32 { + False = 0, + True = 1, + Conditional = 2, + IfEqual = 3, + IfNotEqual = 4, + MaxComparisonMode, +}; + +// Reduction ops. +enum class ReductionOp : u32 { + RedAdd = 0, + RedMin = 1, + RedMax = 2, + RedInc = 3, + RedDec = 4, + RedAnd = 5, + RedOr = 6, + RedXor = 7, + MaxReductionOp, +}; + +} // namespace VideoCommon
\ No newline at end of file diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h index cb8029a4f..af1469147 100644 --- a/src/video_core/rasterizer_interface.h +++ b/src/video_core/rasterizer_interface.h @@ -12,6 +12,7 @@ #include "video_core/cache_types.h" #include "video_core/engines/fermi_2d.h" #include "video_core/gpu.h" +#include "video_core/query_cache/types.h" #include "video_core/rasterizer_download_area.h" namespace Tegra { @@ -26,11 +27,6 @@ struct ChannelState; namespace VideoCore { -enum class QueryType { - SamplesPassed, -}; -constexpr std::size_t NumQueryTypes = 1; - enum class LoadCallbackStage { Prepare, Build, @@ -58,10 +54,11 @@ public: virtual void DispatchCompute() = 0; /// Resets the counter of a query - virtual void ResetCounter(QueryType type) = 0; + virtual void ResetCounter(VideoCommon::QueryType type) = 0; /// Records a GPU query and caches it - virtual void Query(GPUVAddr gpu_addr, QueryType type, std::optional<u64> timestamp) = 0; + virtual void Query(GPUVAddr gpu_addr, VideoCommon::QueryType type, + VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) = 0; /// Signal an uniform buffer binding virtual void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, @@ -83,7 +80,7 @@ public: virtual void SignalReference() = 0; /// Release all pending fences. - virtual void ReleaseFences() = 0; + virtual void ReleaseFences(bool force = true) = 0; /// Notify rasterizer that all caches should be flushed to Switch memory virtual void FlushAll() = 0; diff --git a/src/video_core/renderer_base.h b/src/video_core/renderer_base.h index 3e12a8813..78ea5208b 100644 --- a/src/video_core/renderer_base.h +++ b/src/video_core/renderer_base.h @@ -89,9 +89,6 @@ public: void RequestScreenshot(void* data, std::function<void(bool)> callback, const Layout::FramebufferLayout& layout); - /// This is called to notify the rendering backend of a surface change - virtual void NotifySurfaceChanged() {} - protected: Core::Frontend::EmuWindow& render_window; ///< Reference to the render window handle. std::unique_ptr<Core::Frontend::GraphicsContext> context; diff --git a/src/video_core/renderer_null/null_rasterizer.cpp b/src/video_core/renderer_null/null_rasterizer.cpp index 92ecf6682..65cd5aa06 100644 --- a/src/video_core/renderer_null/null_rasterizer.cpp +++ b/src/video_core/renderer_null/null_rasterizer.cpp @@ -26,16 +26,18 @@ void RasterizerNull::Draw(bool is_indexed, u32 instance_count) {} void RasterizerNull::DrawTexture() {} void RasterizerNull::Clear(u32 layer_count) {} void RasterizerNull::DispatchCompute() {} -void RasterizerNull::ResetCounter(VideoCore::QueryType type) {} -void RasterizerNull::Query(GPUVAddr gpu_addr, VideoCore::QueryType type, - std::optional<u64> timestamp) { +void RasterizerNull::ResetCounter(VideoCommon::QueryType type) {} +void RasterizerNull::Query(GPUVAddr gpu_addr, VideoCommon::QueryType type, + VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) { if (!gpu_memory) { return; } - - gpu_memory->Write(gpu_addr, u64{0}); - if (timestamp) { - gpu_memory->Write(gpu_addr + 8, *timestamp); + if (True(flags & VideoCommon::QueryPropertiesFlags::HasTimeout)) { + u64 ticks = m_gpu.GetTicks(); + gpu_memory->Write<u64>(gpu_addr + 8, ticks); + gpu_memory->Write<u64>(gpu_addr, static_cast<u64>(payload)); + } else { + gpu_memory->Write<u32>(gpu_addr, payload); } } void RasterizerNull::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, @@ -74,7 +76,7 @@ void RasterizerNull::SignalSyncPoint(u32 value) { syncpoint_manager.IncrementHost(value); } void RasterizerNull::SignalReference() {} -void RasterizerNull::ReleaseFences() {} +void RasterizerNull::ReleaseFences(bool) {} void RasterizerNull::FlushAndInvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType) {} void RasterizerNull::WaitForIdle() {} void RasterizerNull::FragmentBarrier() {} diff --git a/src/video_core/renderer_null/null_rasterizer.h b/src/video_core/renderer_null/null_rasterizer.h index 93b9a6971..23001eeb8 100644 --- a/src/video_core/renderer_null/null_rasterizer.h +++ b/src/video_core/renderer_null/null_rasterizer.h @@ -42,8 +42,9 @@ public: void DrawTexture() override; void Clear(u32 layer_count) override; void DispatchCompute() override; - void ResetCounter(VideoCore::QueryType type) override; - void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override; + void ResetCounter(VideoCommon::QueryType type) override; + void Query(GPUVAddr gpu_addr, VideoCommon::QueryType type, + VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) override; void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) override; void DisableGraphicsUniformBuffer(size_t stage, u32 index) override; void FlushAll() override; @@ -63,7 +64,7 @@ public: void SyncOperation(std::function<void()>&& func) override; void SignalSyncPoint(u32 value) override; void SignalReference() override; - void ReleaseFences() override; + void ReleaseFences(bool force) override; void FlushAndInvalidateRegion( VAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override; void WaitForIdle() override; diff --git a/src/video_core/renderer_opengl/gl_query_cache.cpp b/src/video_core/renderer_opengl/gl_query_cache.cpp index 99d7347f5..ec142d48e 100644 --- a/src/video_core/renderer_opengl/gl_query_cache.cpp +++ b/src/video_core/renderer_opengl/gl_query_cache.cpp @@ -27,7 +27,7 @@ constexpr GLenum GetTarget(VideoCore::QueryType type) { } // Anonymous namespace QueryCache::QueryCache(RasterizerOpenGL& rasterizer_, Core::Memory::Memory& cpu_memory_) - : QueryCacheBase(rasterizer_, cpu_memory_), gl_rasterizer{rasterizer_} {} + : QueryCacheLegacy(rasterizer_, cpu_memory_), gl_rasterizer{rasterizer_} {} QueryCache::~QueryCache() = default; diff --git a/src/video_core/renderer_opengl/gl_query_cache.h b/src/video_core/renderer_opengl/gl_query_cache.h index 872513f22..0721e0b3d 100644 --- a/src/video_core/renderer_opengl/gl_query_cache.h +++ b/src/video_core/renderer_opengl/gl_query_cache.h @@ -26,7 +26,7 @@ class RasterizerOpenGL; using CounterStream = VideoCommon::CounterStreamBase<QueryCache, HostCounter>; class QueryCache final - : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> { + : public VideoCommon::QueryCacheLegacy<QueryCache, CachedQuery, CounterStream, HostCounter> { public: explicit QueryCache(RasterizerOpenGL& rasterizer_, Core::Memory::Memory& cpu_memory_); ~QueryCache(); diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index dd03efecd..27e2de1bf 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -396,13 +396,39 @@ void RasterizerOpenGL::DispatchCompute() { has_written_global_memory |= pipeline->WritesGlobalMemory(); } -void RasterizerOpenGL::ResetCounter(VideoCore::QueryType type) { - query_cache.ResetCounter(type); +void RasterizerOpenGL::ResetCounter(VideoCommon::QueryType type) { + if (type == VideoCommon::QueryType::ZPassPixelCount64) { + query_cache.ResetCounter(VideoCore::QueryType::SamplesPassed); + } } -void RasterizerOpenGL::Query(GPUVAddr gpu_addr, VideoCore::QueryType type, - std::optional<u64> timestamp) { - query_cache.Query(gpu_addr, type, timestamp); +void RasterizerOpenGL::Query(GPUVAddr gpu_addr, VideoCommon::QueryType type, + VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) { + if (type == VideoCommon::QueryType::ZPassPixelCount64) { + if (True(flags & VideoCommon::QueryPropertiesFlags::HasTimeout)) { + query_cache.Query(gpu_addr, VideoCore::QueryType::SamplesPassed, {gpu.GetTicks()}); + } else { + query_cache.Query(gpu_addr, VideoCore::QueryType::SamplesPassed, std::nullopt); + } + return; + } + if (type != VideoCommon::QueryType::Payload) { + payload = 1u; + } + std::function<void()> func([this, gpu_addr, flags, memory_manager = gpu_memory, payload]() { + if (True(flags & VideoCommon::QueryPropertiesFlags::HasTimeout)) { + u64 ticks = gpu.GetTicks(); + memory_manager->Write<u64>(gpu_addr + 8, ticks); + memory_manager->Write<u64>(gpu_addr, static_cast<u64>(payload)); + } else { + memory_manager->Write<u32>(gpu_addr, payload); + } + }); + if (True(flags & VideoCommon::QueryPropertiesFlags::IsAFence)) { + SignalFence(std::move(func)); + return; + } + func(); } void RasterizerOpenGL::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, @@ -573,8 +599,8 @@ void RasterizerOpenGL::SignalReference() { fence_manager.SignalOrdering(); } -void RasterizerOpenGL::ReleaseFences() { - fence_manager.WaitPendingFences(); +void RasterizerOpenGL::ReleaseFences(bool force) { + fence_manager.WaitPendingFences(force); } void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size, diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index 8eda2ddba..ceffe1f1e 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -86,8 +86,9 @@ public: void DrawTexture() override; void Clear(u32 layer_count) override; void DispatchCompute() override; - void ResetCounter(VideoCore::QueryType type) override; - void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override; + void ResetCounter(VideoCommon::QueryType type) override; + void Query(GPUVAddr gpu_addr, VideoCommon::QueryType type, + VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) override; void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) override; void DisableGraphicsUniformBuffer(size_t stage, u32 index) override; void FlushAll() override; @@ -107,7 +108,7 @@ public: void SyncOperation(std::function<void()>&& func) override; void SignalSyncPoint(u32 value) override; void SignalReference() override; - void ReleaseFences() override; + void ReleaseFences(bool force = true) override; void FlushAndInvalidateRegion( VAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override; void WaitForIdle() override; diff --git a/src/video_core/renderer_opengl/maxwell_to_gl.h b/src/video_core/renderer_opengl/maxwell_to_gl.h index c7dc7e0a1..5ea9e2378 100644 --- a/src/video_core/renderer_opengl/maxwell_to_gl.h +++ b/src/video_core/renderer_opengl/maxwell_to_gl.h @@ -116,6 +116,7 @@ constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> FORMAT_TAB {GL_RGB9_E5, GL_RGB, GL_UNSIGNED_INT_5_9_9_9_REV}, // E5B9G9R9_FLOAT {GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT}, // D32_FLOAT {GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT}, // D16_UNORM + {GL_DEPTH_COMPONENT24, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT_24_8}, // X8_D24_UNORM {GL_STENCIL_INDEX8, GL_STENCIL, GL_UNSIGNED_BYTE}, // S8_UINT {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8}, // D24_UNORM_S8_UINT {GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8}, // S8_UINT_D24_UNORM diff --git a/src/video_core/renderer_vulkan/blit_image.cpp b/src/video_core/renderer_vulkan/blit_image.cpp index 1032c9d12..f01d2394e 100644 --- a/src/video_core/renderer_vulkan/blit_image.cpp +++ b/src/video_core/renderer_vulkan/blit_image.cpp @@ -9,6 +9,7 @@ #include "video_core/host_shaders/blit_color_float_frag_spv.h" #include "video_core/host_shaders/convert_abgr8_to_d24s8_frag_spv.h" #include "video_core/host_shaders/convert_d24s8_to_abgr8_frag_spv.h" +#include "video_core/host_shaders/convert_d32f_to_abgr8_frag_spv.h" #include "video_core/host_shaders/convert_depth_to_float_frag_spv.h" #include "video_core/host_shaders/convert_float_to_depth_frag_spv.h" #include "video_core/host_shaders/convert_s8d24_to_abgr8_frag_spv.h" @@ -433,6 +434,7 @@ BlitImageHelper::BlitImageHelper(const Device& device_, Scheduler& scheduler_, convert_depth_to_float_frag(BuildShader(device, CONVERT_DEPTH_TO_FLOAT_FRAG_SPV)), convert_float_to_depth_frag(BuildShader(device, CONVERT_FLOAT_TO_DEPTH_FRAG_SPV)), convert_abgr8_to_d24s8_frag(BuildShader(device, CONVERT_ABGR8_TO_D24S8_FRAG_SPV)), + convert_d32f_to_abgr8_frag(BuildShader(device, CONVERT_D32F_TO_ABGR8_FRAG_SPV)), convert_d24s8_to_abgr8_frag(BuildShader(device, CONVERT_D24S8_TO_ABGR8_FRAG_SPV)), convert_s8d24_to_abgr8_frag(BuildShader(device, CONVERT_S8D24_TO_ABGR8_FRAG_SPV)), linear_sampler(device.GetLogical().CreateSampler(SAMPLER_CREATE_INFO<VK_FILTER_LINEAR>)), @@ -557,6 +559,13 @@ void BlitImageHelper::ConvertABGR8ToD24S8(const Framebuffer* dst_framebuffer, Convert(*convert_abgr8_to_d24s8_pipeline, dst_framebuffer, src_image_view); } +void BlitImageHelper::ConvertD32FToABGR8(const Framebuffer* dst_framebuffer, + ImageView& src_image_view) { + ConvertPipelineColorTargetEx(convert_d32f_to_abgr8_pipeline, dst_framebuffer->RenderPass(), + convert_d32f_to_abgr8_frag); + ConvertDepthStencil(*convert_d32f_to_abgr8_pipeline, dst_framebuffer, src_image_view); +} + void BlitImageHelper::ConvertD24S8ToABGR8(const Framebuffer* dst_framebuffer, ImageView& src_image_view) { ConvertPipelineColorTargetEx(convert_d24s8_to_abgr8_pipeline, dst_framebuffer->RenderPass(), @@ -609,6 +618,8 @@ void BlitImageHelper::ClearDepthStencil(const Framebuffer* dst_framebuffer, bool const VkPipelineLayout layout = *clear_color_pipeline_layout; scheduler.RequestRenderpass(dst_framebuffer); scheduler.Record([pipeline, layout, clear_depth, dst_region](vk::CommandBuffer cmdbuf) { + constexpr std::array blend_constants{0.0f, 0.0f, 0.0f, 0.0f}; + cmdbuf.SetBlendConstants(blend_constants.data()); cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline); BindBlitState(cmdbuf, dst_region); cmdbuf.PushConstants(layout, VK_SHADER_STAGE_FRAGMENT_BIT, clear_depth); @@ -865,7 +876,7 @@ VkPipeline BlitImageHelper::FindOrEmplaceClearStencilPipeline( .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, .pNext = nullptr, .flags = 0, - .depthTestEnable = VK_FALSE, + .depthTestEnable = key.depth_clear, .depthWriteEnable = key.depth_clear, .depthCompareOp = VK_COMPARE_OP_ALWAYS, .depthBoundsTestEnable = VK_FALSE, diff --git a/src/video_core/renderer_vulkan/blit_image.h b/src/video_core/renderer_vulkan/blit_image.h index dcfe217aa..a032c71fb 100644 --- a/src/video_core/renderer_vulkan/blit_image.h +++ b/src/video_core/renderer_vulkan/blit_image.h @@ -67,6 +67,8 @@ public: void ConvertABGR8ToD24S8(const Framebuffer* dst_framebuffer, const ImageView& src_image_view); + void ConvertD32FToABGR8(const Framebuffer* dst_framebuffer, ImageView& src_image_view); + void ConvertD24S8ToABGR8(const Framebuffer* dst_framebuffer, ImageView& src_image_view); void ConvertS8D24ToABGR8(const Framebuffer* dst_framebuffer, ImageView& src_image_view); @@ -128,6 +130,7 @@ private: vk::ShaderModule convert_depth_to_float_frag; vk::ShaderModule convert_float_to_depth_frag; vk::ShaderModule convert_abgr8_to_d24s8_frag; + vk::ShaderModule convert_d32f_to_abgr8_frag; vk::ShaderModule convert_d24s8_to_abgr8_frag; vk::ShaderModule convert_s8d24_to_abgr8_frag; vk::Sampler linear_sampler; @@ -146,6 +149,7 @@ private: vk::Pipeline convert_d16_to_r16_pipeline; vk::Pipeline convert_r16_to_d16_pipeline; vk::Pipeline convert_abgr8_to_d24s8_pipeline; + vk::Pipeline convert_d32f_to_abgr8_pipeline; vk::Pipeline convert_d24s8_to_abgr8_pipeline; vk::Pipeline convert_s8d24_to_abgr8_pipeline; }; diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp index 35bf80ea3..a08f2f67f 100644 --- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp +++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp @@ -185,7 +185,7 @@ struct FormatTuple { {VK_FORMAT_BC2_SRGB_BLOCK}, // BC2_SRGB {VK_FORMAT_BC3_SRGB_BLOCK}, // BC3_SRGB {VK_FORMAT_BC7_SRGB_BLOCK}, // BC7_SRGB - {VK_FORMAT_R4G4B4A4_UNORM_PACK16}, // A4B4G4R4_UNORM + {VK_FORMAT_A4B4G4R4_UNORM_PACK16_EXT}, // A4B4G4R4_UNORM {VK_FORMAT_R4G4_UNORM_PACK8}, // G4R4_UNORM {VK_FORMAT_ASTC_4x4_SRGB_BLOCK}, // ASTC_2D_4X4_SRGB {VK_FORMAT_ASTC_8x8_SRGB_BLOCK}, // ASTC_2D_8X8_SRGB @@ -214,8 +214,9 @@ struct FormatTuple { {VK_FORMAT_E5B9G9R9_UFLOAT_PACK32}, // E5B9G9R9_FLOAT // Depth formats - {VK_FORMAT_D32_SFLOAT, Attachable}, // D32_FLOAT - {VK_FORMAT_D16_UNORM, Attachable}, // D16_UNORM + {VK_FORMAT_D32_SFLOAT, Attachable}, // D32_FLOAT + {VK_FORMAT_D16_UNORM, Attachable}, // D16_UNORM + {VK_FORMAT_X8_D24_UNORM_PACK32, Attachable}, // X8_D24_UNORM // Stencil formats {VK_FORMAT_S8_UINT, Attachable}, // S8_UINT diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.h b/src/video_core/renderer_vulkan/renderer_vulkan.h index 590bc1c64..14e257cf7 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.h +++ b/src/video_core/renderer_vulkan/renderer_vulkan.h @@ -56,10 +56,6 @@ public: return device.GetDriverName(); } - void NotifySurfaceChanged() override { - present_manager.NotifySurfaceChanged(); - } - private: void Report() const; diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp index 31928bb94..52fc142d1 100644 --- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp +++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp @@ -96,6 +96,7 @@ std::size_t GetSizeInBytes(const Tegra::FramebufferConfig& framebuffer) { VkFormat GetFormat(const Tegra::FramebufferConfig& framebuffer) { switch (framebuffer.pixel_format) { case Service::android::PixelFormat::Rgba8888: + case Service::android::PixelFormat::Rgbx8888: return VK_FORMAT_A8B8G8R8_UNORM_PACK32; case Service::android::PixelFormat::Rgb565: return VK_FORMAT_R5G6B5_UNORM_PACK16; diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp index e15865d16..d8148e89a 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp @@ -61,6 +61,9 @@ vk::Buffer CreateBuffer(const Device& device, const MemoryAllocator& memory_allo if (device.IsExtTransformFeedbackSupported()) { flags |= VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT; } + if (device.IsExtConditionalRendering()) { + flags |= VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT; + } const VkBufferCreateInfo buffer_ci = { .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, .pNext = nullptr, diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp index 54ee030ce..617f92910 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp +++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp @@ -3,6 +3,7 @@ #include <array> #include <memory> +#include <numeric> #include <optional> #include <utility> @@ -11,7 +12,13 @@ #include "common/assert.h" #include "common/common_types.h" #include "common/div_ceil.h" +#include "common/vector_math.h" #include "video_core/host_shaders/astc_decoder_comp_spv.h" +#include "video_core/host_shaders/convert_msaa_to_non_msaa_comp_spv.h" +#include "video_core/host_shaders/convert_non_msaa_to_msaa_comp_spv.h" +#include "video_core/host_shaders/queries_prefix_scan_sum_comp_spv.h" +#include "video_core/host_shaders/queries_prefix_scan_sum_nosubgroups_comp_spv.h" +#include "video_core/host_shaders/resolve_conditional_render_comp_spv.h" #include "video_core/host_shaders/vulkan_quad_indexed_comp_spv.h" #include "video_core/host_shaders/vulkan_uint8_comp_spv.h" #include "video_core/renderer_vulkan/vk_compute_pass.h" @@ -57,6 +64,30 @@ constexpr std::array<VkDescriptorSetLayoutBinding, 2> INPUT_OUTPUT_DESCRIPTOR_SE }, }}; +constexpr std::array<VkDescriptorSetLayoutBinding, 3> QUERIES_SCAN_DESCRIPTOR_SET_BINDINGS{{ + { + .binding = 0, + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .descriptorCount = 1, + .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT, + .pImmutableSamplers = nullptr, + }, + { + .binding = 1, + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .descriptorCount = 1, + .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT, + .pImmutableSamplers = nullptr, + }, + { + .binding = 2, + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .descriptorCount = 1, + .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT, + .pImmutableSamplers = nullptr, + }, +}}; + constexpr DescriptorBankInfo INPUT_OUTPUT_BANK_INFO{ .uniform_buffers = 0, .storage_buffers = 2, @@ -67,6 +98,16 @@ constexpr DescriptorBankInfo INPUT_OUTPUT_BANK_INFO{ .score = 2, }; +constexpr DescriptorBankInfo QUERIES_SCAN_BANK_INFO{ + .uniform_buffers = 0, + .storage_buffers = 3, + .texture_buffers = 0, + .image_buffers = 0, + .textures = 0, + .images = 0, + .score = 3, +}; + constexpr std::array<VkDescriptorSetLayoutBinding, ASTC_NUM_BINDINGS> ASTC_DESCRIPTOR_SET_BINDINGS{{ { .binding = ASTC_BINDING_INPUT_BUFFER, @@ -94,6 +135,33 @@ constexpr DescriptorBankInfo ASTC_BANK_INFO{ .score = 2, }; +constexpr std::array<VkDescriptorSetLayoutBinding, ASTC_NUM_BINDINGS> MSAA_DESCRIPTOR_SET_BINDINGS{{ + { + .binding = 0, + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, + .descriptorCount = 1, + .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT, + .pImmutableSamplers = nullptr, + }, + { + .binding = 1, + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, + .descriptorCount = 1, + .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT, + .pImmutableSamplers = nullptr, + }, +}}; + +constexpr DescriptorBankInfo MSAA_BANK_INFO{ + .uniform_buffers = 0, + .storage_buffers = 0, + .texture_buffers = 0, + .image_buffers = 0, + .textures = 0, + .images = 2, + .score = 2, +}; + constexpr VkDescriptorUpdateTemplateEntry INPUT_OUTPUT_DESCRIPTOR_UPDATE_TEMPLATE{ .dstBinding = 0, .dstArrayElement = 0, @@ -103,6 +171,24 @@ constexpr VkDescriptorUpdateTemplateEntry INPUT_OUTPUT_DESCRIPTOR_UPDATE_TEMPLAT .stride = sizeof(DescriptorUpdateEntry), }; +constexpr VkDescriptorUpdateTemplateEntry QUERIES_SCAN_DESCRIPTOR_UPDATE_TEMPLATE{ + .dstBinding = 0, + .dstArrayElement = 0, + .descriptorCount = 3, + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + .offset = 0, + .stride = sizeof(DescriptorUpdateEntry), +}; + +constexpr VkDescriptorUpdateTemplateEntry MSAA_DESCRIPTOR_UPDATE_TEMPLATE{ + .dstBinding = 0, + .dstArrayElement = 0, + .descriptorCount = 2, + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, + .offset = 0, + .stride = sizeof(DescriptorUpdateEntry), +}; + constexpr std::array<VkDescriptorUpdateTemplateEntry, ASTC_NUM_BINDINGS> ASTC_PASS_DESCRIPTOR_UPDATE_TEMPLATE_ENTRY{{ { @@ -131,13 +217,21 @@ struct AstcPushConstants { u32 block_height; u32 block_height_mask; }; + +struct QueriesPrefixScanPushConstants { + u32 min_accumulation_base; + u32 max_accumulation_base; + u32 accumulation_limit; + u32 buffer_offset; +}; } // Anonymous namespace ComputePass::ComputePass(const Device& device_, DescriptorPool& descriptor_pool, vk::Span<VkDescriptorSetLayoutBinding> bindings, vk::Span<VkDescriptorUpdateTemplateEntry> templates, const DescriptorBankInfo& bank_info, - vk::Span<VkPushConstantRange> push_constants, std::span<const u32> code) + vk::Span<VkPushConstantRange> push_constants, std::span<const u32> code, + std::optional<u32> optional_subgroup_size) : device{device_} { descriptor_set_layout = device.GetLogical().CreateDescriptorSetLayout({ .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, @@ -170,6 +264,9 @@ ComputePass::ComputePass(const Device& device_, DescriptorPool& descriptor_pool, }); descriptor_allocator = descriptor_pool.Allocator(*descriptor_set_layout, bank_info); } + if (code.empty()) { + return; + } module = device.GetLogical().CreateShaderModule({ .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, .pNext = nullptr, @@ -178,13 +275,19 @@ ComputePass::ComputePass(const Device& device_, DescriptorPool& descriptor_pool, .pCode = code.data(), }); device.SaveShader(code); + const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{ + .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT, + .pNext = nullptr, + .requiredSubgroupSize = optional_subgroup_size ? *optional_subgroup_size : 32U, + }; + bool use_setup_size = device.IsExtSubgroupSizeControlSupported() && optional_subgroup_size; pipeline = device.GetLogical().CreateComputePipeline({ .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, .pNext = nullptr, .flags = 0, .stage{ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, - .pNext = nullptr, + .pNext = use_setup_size ? &subgroup_size_ci : nullptr, .flags = 0, .stage = VK_SHADER_STAGE_COMPUTE_BIT, .module = *module, @@ -302,6 +405,123 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble( return {staging.buffer, staging.offset}; } +ConditionalRenderingResolvePass::ConditionalRenderingResolvePass( + const Device& device_, Scheduler& scheduler_, DescriptorPool& descriptor_pool_, + ComputePassDescriptorQueue& compute_pass_descriptor_queue_) + : ComputePass(device_, descriptor_pool_, INPUT_OUTPUT_DESCRIPTOR_SET_BINDINGS, + INPUT_OUTPUT_DESCRIPTOR_UPDATE_TEMPLATE, INPUT_OUTPUT_BANK_INFO, nullptr, + RESOLVE_CONDITIONAL_RENDER_COMP_SPV), + scheduler{scheduler_}, compute_pass_descriptor_queue{compute_pass_descriptor_queue_} {} + +void ConditionalRenderingResolvePass::Resolve(VkBuffer dst_buffer, VkBuffer src_buffer, + u32 src_offset, bool compare_to_zero) { + const size_t compare_size = compare_to_zero ? 8 : 24; + + compute_pass_descriptor_queue.Acquire(); + compute_pass_descriptor_queue.AddBuffer(src_buffer, src_offset, compare_size); + compute_pass_descriptor_queue.AddBuffer(dst_buffer, 0, sizeof(u32)); + const void* const descriptor_data{compute_pass_descriptor_queue.UpdateData()}; + + scheduler.RequestOutsideRenderPassOperationContext(); + scheduler.Record([this, descriptor_data](vk::CommandBuffer cmdbuf) { + static constexpr VkMemoryBarrier read_barrier{ + .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_SHADER_WRITE_BIT, + .dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, + }; + static constexpr VkMemoryBarrier write_barrier{ + .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT, + .dstAccessMask = VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT, + }; + const VkDescriptorSet set = descriptor_allocator.Commit(); + device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data); + + cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, + VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, read_barrier); + cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline); + cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {}); + cmdbuf.Dispatch(1, 1, 1); + cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, + VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT, 0, write_barrier); + }); +} + +QueriesPrefixScanPass::QueriesPrefixScanPass( + const Device& device_, Scheduler& scheduler_, DescriptorPool& descriptor_pool_, + ComputePassDescriptorQueue& compute_pass_descriptor_queue_) + : ComputePass( + device_, descriptor_pool_, QUERIES_SCAN_DESCRIPTOR_SET_BINDINGS, + QUERIES_SCAN_DESCRIPTOR_UPDATE_TEMPLATE, QUERIES_SCAN_BANK_INFO, + COMPUTE_PUSH_CONSTANT_RANGE<sizeof(QueriesPrefixScanPushConstants)>, + device_.IsSubgroupFeatureSupported(VK_SUBGROUP_FEATURE_BASIC_BIT) && + device_.IsSubgroupFeatureSupported(VK_SUBGROUP_FEATURE_ARITHMETIC_BIT) && + device_.IsSubgroupFeatureSupported(VK_SUBGROUP_FEATURE_SHUFFLE_BIT) && + device_.IsSubgroupFeatureSupported(VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT) + ? std::span<const u32>(QUERIES_PREFIX_SCAN_SUM_COMP_SPV) + : std::span<const u32>(QUERIES_PREFIX_SCAN_SUM_NOSUBGROUPS_COMP_SPV)), + scheduler{scheduler_}, compute_pass_descriptor_queue{compute_pass_descriptor_queue_} {} + +void QueriesPrefixScanPass::Run(VkBuffer accumulation_buffer, VkBuffer dst_buffer, + VkBuffer src_buffer, size_t number_of_sums, + size_t min_accumulation_limit, size_t max_accumulation_limit) { + size_t current_runs = number_of_sums; + size_t offset = 0; + while (current_runs != 0) { + static constexpr size_t DISPATCH_SIZE = 2048U; + size_t runs_to_do = std::min<size_t>(current_runs, DISPATCH_SIZE); + current_runs -= runs_to_do; + compute_pass_descriptor_queue.Acquire(); + compute_pass_descriptor_queue.AddBuffer(src_buffer, 0, number_of_sums * sizeof(u64)); + compute_pass_descriptor_queue.AddBuffer(dst_buffer, 0, number_of_sums * sizeof(u64)); + compute_pass_descriptor_queue.AddBuffer(accumulation_buffer, 0, sizeof(u64)); + const void* const descriptor_data{compute_pass_descriptor_queue.UpdateData()}; + size_t used_offset = offset; + offset += runs_to_do; + + scheduler.RequestOutsideRenderPassOperationContext(); + scheduler.Record([this, descriptor_data, min_accumulation_limit, max_accumulation_limit, + runs_to_do, used_offset](vk::CommandBuffer cmdbuf) { + static constexpr VkMemoryBarrier read_barrier{ + .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, + .dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, + }; + static constexpr VkMemoryBarrier write_barrier{ + .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT, + .dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT | + VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | + VK_ACCESS_INDIRECT_COMMAND_READ_BIT | VK_ACCESS_INDEX_READ_BIT | + VK_ACCESS_UNIFORM_READ_BIT | + VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT, + }; + const QueriesPrefixScanPushConstants uniforms{ + .min_accumulation_base = static_cast<u32>(min_accumulation_limit), + .max_accumulation_base = static_cast<u32>(max_accumulation_limit), + .accumulation_limit = static_cast<u32>(runs_to_do - 1), + .buffer_offset = static_cast<u32>(used_offset), + }; + const VkDescriptorSet set = descriptor_allocator.Commit(); + device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data); + + cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, + VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, read_barrier); + cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline); + cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {}); + cmdbuf.PushConstants(*layout, VK_SHADER_STAGE_COMPUTE_BIT, uniforms); + cmdbuf.Dispatch(1, 1, 1); + cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, + VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT, 0, + write_barrier); + }); + } +} + ASTCDecoderPass::ASTCDecoderPass(const Device& device_, Scheduler& scheduler_, DescriptorPool& descriptor_pool_, StagingBufferPool& staging_buffer_pool_, @@ -413,4 +633,100 @@ void ASTCDecoderPass::Assemble(Image& image, const StagingBufferRef& map, scheduler.Finish(); } +MSAACopyPass::MSAACopyPass(const Device& device_, Scheduler& scheduler_, + DescriptorPool& descriptor_pool_, + StagingBufferPool& staging_buffer_pool_, + ComputePassDescriptorQueue& compute_pass_descriptor_queue_) + : ComputePass(device_, descriptor_pool_, MSAA_DESCRIPTOR_SET_BINDINGS, + MSAA_DESCRIPTOR_UPDATE_TEMPLATE, MSAA_BANK_INFO, {}, + CONVERT_NON_MSAA_TO_MSAA_COMP_SPV), + scheduler{scheduler_}, staging_buffer_pool{staging_buffer_pool_}, + compute_pass_descriptor_queue{compute_pass_descriptor_queue_} { + const auto make_msaa_pipeline = [this](size_t i, std::span<const u32> code) { + modules[i] = device.GetLogical().CreateShaderModule({ + .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, + .pNext = nullptr, + .flags = 0, + .codeSize = static_cast<u32>(code.size_bytes()), + .pCode = code.data(), + }); + pipelines[i] = device.GetLogical().CreateComputePipeline({ + .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, + .pNext = nullptr, + .flags = 0, + .stage{ + .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, + .pNext = nullptr, + .flags = 0, + .stage = VK_SHADER_STAGE_COMPUTE_BIT, + .module = *modules[i], + .pName = "main", + .pSpecializationInfo = nullptr, + }, + .layout = *layout, + .basePipelineHandle = nullptr, + .basePipelineIndex = 0, + }); + }; + make_msaa_pipeline(0, CONVERT_NON_MSAA_TO_MSAA_COMP_SPV); + make_msaa_pipeline(1, CONVERT_MSAA_TO_NON_MSAA_COMP_SPV); +} + +MSAACopyPass::~MSAACopyPass() = default; + +void MSAACopyPass::CopyImage(Image& dst_image, Image& src_image, + std::span<const VideoCommon::ImageCopy> copies, + bool msaa_to_non_msaa) { + const VkPipeline msaa_pipeline = *pipelines[msaa_to_non_msaa ? 1 : 0]; + scheduler.RequestOutsideRenderPassOperationContext(); + for (const VideoCommon::ImageCopy& copy : copies) { + ASSERT(copy.src_subresource.base_layer == 0); + ASSERT(copy.src_subresource.num_layers == 1); + ASSERT(copy.dst_subresource.base_layer == 0); + ASSERT(copy.dst_subresource.num_layers == 1); + + compute_pass_descriptor_queue.Acquire(); + compute_pass_descriptor_queue.AddImage( + src_image.StorageImageView(copy.src_subresource.base_level)); + compute_pass_descriptor_queue.AddImage( + dst_image.StorageImageView(copy.dst_subresource.base_level)); + const void* const descriptor_data{compute_pass_descriptor_queue.UpdateData()}; + + const Common::Vec3<u32> num_dispatches = { + Common::DivCeil(copy.extent.width, 8U), + Common::DivCeil(copy.extent.height, 8U), + copy.extent.depth, + }; + + scheduler.Record([this, dst = dst_image.Handle(), msaa_pipeline, num_dispatches, + descriptor_data](vk::CommandBuffer cmdbuf) { + const VkDescriptorSet set = descriptor_allocator.Commit(); + device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data); + cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, msaa_pipeline); + cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {}); + cmdbuf.Dispatch(num_dispatches.x, num_dispatches.y, num_dispatches.z); + const VkImageMemoryBarrier write_barrier{ + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT, + .dstAccessMask = VK_ACCESS_SHADER_READ_BIT, + .oldLayout = VK_IMAGE_LAYOUT_GENERAL, + .newLayout = VK_IMAGE_LAYOUT_GENERAL, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = dst, + .subresourceRange{ + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .baseMipLevel = 0, + .levelCount = VK_REMAINING_MIP_LEVELS, + .baseArrayLayer = 0, + .layerCount = VK_REMAINING_ARRAY_LAYERS, + }, + }; + cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, + VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, write_barrier); + }); + } +} + } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h index dd3927376..7b8f938c1 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pass.h +++ b/src/video_core/renderer_vulkan/vk_compute_pass.h @@ -3,6 +3,7 @@ #pragma once +#include <optional> #include <span> #include <utility> @@ -10,6 +11,7 @@ #include "video_core/engines/maxwell_3d.h" #include "video_core/renderer_vulkan/vk_descriptor_pool.h" #include "video_core/renderer_vulkan/vk_update_descriptor.h" +#include "video_core/texture_cache/types.h" #include "video_core/vulkan_common/vulkan_memory_allocator.h" #include "video_core/vulkan_common/vulkan_wrapper.h" @@ -31,7 +33,8 @@ public: vk::Span<VkDescriptorSetLayoutBinding> bindings, vk::Span<VkDescriptorUpdateTemplateEntry> templates, const DescriptorBankInfo& bank_info, - vk::Span<VkPushConstantRange> push_constants, std::span<const u32> code); + vk::Span<VkPushConstantRange> push_constants, std::span<const u32> code, + std::optional<u32> optional_subgroup_size = std::nullopt); ~ComputePass(); protected: @@ -82,6 +85,33 @@ private: ComputePassDescriptorQueue& compute_pass_descriptor_queue; }; +class ConditionalRenderingResolvePass final : public ComputePass { +public: + explicit ConditionalRenderingResolvePass( + const Device& device_, Scheduler& scheduler_, DescriptorPool& descriptor_pool_, + ComputePassDescriptorQueue& compute_pass_descriptor_queue_); + + void Resolve(VkBuffer dst_buffer, VkBuffer src_buffer, u32 src_offset, bool compare_to_zero); + +private: + Scheduler& scheduler; + ComputePassDescriptorQueue& compute_pass_descriptor_queue; +}; + +class QueriesPrefixScanPass final : public ComputePass { +public: + explicit QueriesPrefixScanPass(const Device& device_, Scheduler& scheduler_, + DescriptorPool& descriptor_pool_, + ComputePassDescriptorQueue& compute_pass_descriptor_queue_); + + void Run(VkBuffer accumulation_buffer, VkBuffer dst_buffer, VkBuffer src_buffer, + size_t number_of_sums, size_t min_accumulation_limit, size_t max_accumulation_limit); + +private: + Scheduler& scheduler; + ComputePassDescriptorQueue& compute_pass_descriptor_queue; +}; + class ASTCDecoderPass final : public ComputePass { public: explicit ASTCDecoderPass(const Device& device_, Scheduler& scheduler_, @@ -101,4 +131,22 @@ private: MemoryAllocator& memory_allocator; }; +class MSAACopyPass final : public ComputePass { +public: + explicit MSAACopyPass(const Device& device_, Scheduler& scheduler_, + DescriptorPool& descriptor_pool_, StagingBufferPool& staging_buffer_pool_, + ComputePassDescriptorQueue& compute_pass_descriptor_queue_); + ~MSAACopyPass(); + + void CopyImage(Image& dst_image, Image& src_image, + std::span<const VideoCommon::ImageCopy> copies, bool msaa_to_non_msaa); + +private: + Scheduler& scheduler; + StagingBufferPool& staging_buffer_pool; + ComputePassDescriptorQueue& compute_pass_descriptor_queue; + std::array<vk::ShaderModule, 2> modules; + std::array<vk::Pipeline, 2> pipelines; +}; + } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.h b/src/video_core/renderer_vulkan/vk_fence_manager.h index 145359d4e..336573574 100644 --- a/src/video_core/renderer_vulkan/vk_fence_manager.h +++ b/src/video_core/renderer_vulkan/vk_fence_manager.h @@ -7,6 +7,7 @@ #include "video_core/fence_manager.h" #include "video_core/renderer_vulkan/vk_buffer_cache.h" +#include "video_core/renderer_vulkan/vk_query_cache.h" #include "video_core/renderer_vulkan/vk_texture_cache.h" namespace Core { @@ -20,7 +21,6 @@ class RasterizerInterface; namespace Vulkan { class Device; -class QueryCache; class Scheduler; class InnerFence : public VideoCommon::FenceBase { diff --git a/src/video_core/renderer_vulkan/vk_present_manager.cpp b/src/video_core/renderer_vulkan/vk_present_manager.cpp index d681bd22a..2ef36583b 100644 --- a/src/video_core/renderer_vulkan/vk_present_manager.cpp +++ b/src/video_core/renderer_vulkan/vk_present_manager.cpp @@ -103,8 +103,7 @@ PresentManager::PresentManager(const vk::Instance& instance_, surface{surface_}, blit_supported{CanBlitToSwapchain(device.GetPhysical(), swapchain.GetImageViewFormat())}, use_present_thread{Settings::values.async_presentation.GetValue()}, - image_count{swapchain.GetImageCount()}, last_render_surface{ - render_window_.GetWindowInfo().render_surface} { + image_count{swapchain.GetImageCount()} { auto& dld = device.GetLogical(); cmdpool = dld.CreateCommandPool({ @@ -289,44 +288,36 @@ void PresentManager::PresentThread(std::stop_token token) { } } -void PresentManager::NotifySurfaceChanged() { -#ifdef ANDROID - std::scoped_lock lock{recreate_surface_mutex}; - recreate_surface_cv.notify_one(); -#endif +void PresentManager::RecreateSwapchain(Frame* frame) { + swapchain.Create(*surface, frame->width, frame->height, frame->is_srgb); + image_count = swapchain.GetImageCount(); } void PresentManager::CopyToSwapchain(Frame* frame) { - MICROPROFILE_SCOPE(Vulkan_CopyToSwapchain); - - const auto recreate_swapchain = [&] { - swapchain.Create(*surface, frame->width, frame->height, frame->is_srgb); - image_count = swapchain.GetImageCount(); - }; - -#ifdef ANDROID - std::unique_lock lock{recreate_surface_mutex}; - - const auto needs_recreation = [&] { - if (last_render_surface != render_window.GetWindowInfo().render_surface) { - return true; - } - if (swapchain.NeedsRecreation(frame->is_srgb)) { - return true; + bool requires_recreation = false; + + while (true) { + try { + // Recreate surface and swapchain if needed. + if (requires_recreation) { + surface = CreateSurface(instance, render_window.GetWindowInfo()); + RecreateSwapchain(frame); + } + + // Draw to swapchain. + return CopyToSwapchainImpl(frame); + } catch (const vk::Exception& except) { + if (except.GetResult() != VK_ERROR_SURFACE_LOST_KHR) { + throw; + } + + requires_recreation = true; } - return false; - }; - - recreate_surface_cv.wait_for(lock, std::chrono::milliseconds(400), - [&]() { return !needs_recreation(); }); - - // If the frontend recreated the surface, recreate the renderer surface and swapchain. - if (last_render_surface != render_window.GetWindowInfo().render_surface) { - last_render_surface = render_window.GetWindowInfo().render_surface; - surface = CreateSurface(instance, render_window.GetWindowInfo()); - recreate_swapchain(); } -#endif +} + +void PresentManager::CopyToSwapchainImpl(Frame* frame) { + MICROPROFILE_SCOPE(Vulkan_CopyToSwapchain); // If the size or colorspace of the incoming frames has changed, recreate the swapchain // to account for that. @@ -334,11 +325,11 @@ void PresentManager::CopyToSwapchain(Frame* frame) { const bool size_changed = swapchain.GetWidth() != frame->width || swapchain.GetHeight() != frame->height; if (srgb_changed || size_changed) { - recreate_swapchain(); + RecreateSwapchain(frame); } while (swapchain.AcquireNextImage()) { - recreate_swapchain(); + RecreateSwapchain(frame); } const vk::CommandBuffer cmdbuf{frame->cmdbuf}; @@ -488,4 +479,4 @@ void PresentManager::CopyToSwapchain(Frame* frame) { swapchain.Present(render_semaphore); } -} // namespace Vulkan
\ No newline at end of file +} // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_present_manager.h b/src/video_core/renderer_vulkan/vk_present_manager.h index 83e859416..a3d825fe6 100644 --- a/src/video_core/renderer_vulkan/vk_present_manager.h +++ b/src/video_core/renderer_vulkan/vk_present_manager.h @@ -54,14 +54,15 @@ public: /// Waits for the present thread to finish presenting all queued frames. void WaitPresent(); - /// This is called to notify the rendering backend of a surface change - void NotifySurfaceChanged(); - private: void PresentThread(std::stop_token token); void CopyToSwapchain(Frame* frame); + void CopyToSwapchainImpl(Frame* frame); + + void RecreateSwapchain(Frame* frame); + private: const vk::Instance& instance; Core::Frontend::EmuWindow& render_window; @@ -76,16 +77,13 @@ private: std::queue<Frame*> free_queue; std::condition_variable_any frame_cv; std::condition_variable free_cv; - std::condition_variable recreate_surface_cv; std::mutex swapchain_mutex; - std::mutex recreate_surface_mutex; std::mutex queue_mutex; std::mutex free_mutex; std::jthread present_thread; bool blit_supported; bool use_present_thread; std::size_t image_count{}; - void* last_render_surface{}; }; } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp index 29e0b797b..2edaafa7e 100644 --- a/src/video_core/renderer_vulkan/vk_query_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp @@ -1,139 +1,1555 @@ -// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later -#include <algorithm> #include <cstddef> +#include <limits> +#include <map> +#include <memory> +#include <span> +#include <type_traits> +#include <unordered_map> #include <utility> #include <vector> +#include "common/bit_util.h" +#include "common/common_types.h" +#include "core/memory.h" +#include "video_core/engines/draw_manager.h" +#include "video_core/query_cache/query_cache.h" +#include "video_core/renderer_vulkan/vk_buffer_cache.h" +#include "video_core/renderer_vulkan/vk_compute_pass.h" #include "video_core/renderer_vulkan/vk_query_cache.h" #include "video_core/renderer_vulkan/vk_resource_pool.h" #include "video_core/renderer_vulkan/vk_scheduler.h" +#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" +#include "video_core/renderer_vulkan/vk_update_descriptor.h" #include "video_core/vulkan_common/vulkan_device.h" +#include "video_core/vulkan_common/vulkan_memory_allocator.h" #include "video_core/vulkan_common/vulkan_wrapper.h" namespace Vulkan { -using VideoCore::QueryType; +using Tegra::Engines::Maxwell3D; +using VideoCommon::QueryType; namespace { +class SamplesQueryBank : public VideoCommon::BankBase { +public: + static constexpr size_t BANK_SIZE = 256; + static constexpr size_t QUERY_SIZE = 8; + explicit SamplesQueryBank(const Device& device_, size_t index_) + : BankBase(BANK_SIZE), device{device_}, index{index_} { + const auto& dev = device.GetLogical(); + query_pool = dev.CreateQueryPool({ + .sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO, + .pNext = nullptr, + .flags = 0, + .queryType = VK_QUERY_TYPE_OCCLUSION, + .queryCount = BANK_SIZE, + .pipelineStatistics = 0, + }); + Reset(); + } -constexpr std::array QUERY_TARGETS = {VK_QUERY_TYPE_OCCLUSION}; + ~SamplesQueryBank() = default; -constexpr VkQueryType GetTarget(QueryType type) { - return QUERY_TARGETS[static_cast<std::size_t>(type)]; -} + void Reset() override { + ASSERT(references == 0); + VideoCommon::BankBase::Reset(); + const auto& dev = device.GetLogical(); + dev.ResetQueryPool(*query_pool, 0, BANK_SIZE); + host_results.fill(0ULL); + next_bank = 0; + } + + void Sync(size_t start, size_t size) { + const auto& dev = device.GetLogical(); + const VkResult query_result = dev.GetQueryResults( + *query_pool, static_cast<u32>(start), static_cast<u32>(size), sizeof(u64) * size, + &host_results[start], sizeof(u64), VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT); + switch (query_result) { + case VK_SUCCESS: + return; + case VK_ERROR_DEVICE_LOST: + device.ReportLoss(); + [[fallthrough]]; + default: + throw vk::Exception(query_result); + } + } + + VkQueryPool GetInnerPool() { + return *query_pool; + } + + size_t GetIndex() const { + return index; + } + + const std::array<u64, BANK_SIZE>& GetResults() const { + return host_results; + } + + size_t next_bank; + +private: + const Device& device; + const size_t index; + vk::QueryPool query_pool; + std::array<u64, BANK_SIZE> host_results; +}; + +using BaseStreamer = VideoCommon::SimpleStreamer<VideoCommon::HostQueryBase>; + +struct HostSyncValues { + VAddr address; + size_t size; + size_t offset; + + static constexpr bool GeneratesBaseBuffer = false; +}; + +class SamplesStreamer : public BaseStreamer { +public: + explicit SamplesStreamer(size_t id_, QueryCacheRuntime& runtime_, + VideoCore::RasterizerInterface* rasterizer_, const Device& device_, + Scheduler& scheduler_, const MemoryAllocator& memory_allocator_, + ComputePassDescriptorQueue& compute_pass_descriptor_queue, + DescriptorPool& descriptor_pool) + : BaseStreamer(id_), runtime{runtime_}, rasterizer{rasterizer_}, device{device_}, + scheduler{scheduler_}, memory_allocator{memory_allocator_} { + current_bank = nullptr; + current_query = nullptr; + ammend_value = 0; + acumulation_value = 0; + queries_prefix_scan_pass = std::make_unique<QueriesPrefixScanPass>( + device, scheduler, descriptor_pool, compute_pass_descriptor_queue); + + const VkBufferCreateInfo buffer_ci = { + .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, + .pNext = nullptr, + .flags = 0, + .size = 8, + .usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | + VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + .queueFamilyIndexCount = 0, + .pQueueFamilyIndices = nullptr, + }; + accumulation_buffer = memory_allocator.CreateBuffer(buffer_ci, MemoryUsage::DeviceLocal); + scheduler.RequestOutsideRenderPassOperationContext(); + scheduler.Record([buffer = *accumulation_buffer](vk::CommandBuffer cmdbuf) { + cmdbuf.FillBuffer(buffer, 0, 8, 0); + }); + } + + ~SamplesStreamer() = default; + + void StartCounter() override { + if (has_started) { + return; + } + ReserveHostQuery(); + scheduler.Record([query_pool = current_query_pool, + query_index = current_bank_slot](vk::CommandBuffer cmdbuf) { + const bool use_precise = Settings::IsGPULevelHigh(); + cmdbuf.BeginQuery(query_pool, static_cast<u32>(query_index), + use_precise ? VK_QUERY_CONTROL_PRECISE_BIT : 0); + }); + has_started = true; + } + + void PauseCounter() override { + if (!has_started) { + return; + } + scheduler.Record([query_pool = current_query_pool, + query_index = current_bank_slot](vk::CommandBuffer cmdbuf) { + cmdbuf.EndQuery(query_pool, static_cast<u32>(query_index)); + }); + has_started = false; + } + + void ResetCounter() override { + if (has_started) { + PauseCounter(); + } + AbandonCurrentQuery(); + std::function<void()> func([this, counts = pending_flush_queries.size()] { + ammend_value = 0; + acumulation_value = 0; + }); + rasterizer->SyncOperation(std::move(func)); + accumulation_since_last_sync = false; + first_accumulation_checkpoint = std::min(first_accumulation_checkpoint, num_slots_used); + last_accumulation_checkpoint = std::max(last_accumulation_checkpoint, num_slots_used); + } + + void CloseCounter() override { + PauseCounter(); + } -} // Anonymous namespace + bool HasPendingSync() const override { + return !pending_sync.empty(); + } + + void SyncWrites() override { + if (sync_values_stash.empty()) { + return; + } -QueryPool::QueryPool(const Device& device_, Scheduler& scheduler, QueryType type_) - : ResourcePool{scheduler.GetMasterSemaphore(), GROW_STEP}, device{device_}, type{type_} {} + for (size_t i = 0; i < sync_values_stash.size(); i++) { + runtime.template SyncValues<HostSyncValues>(sync_values_stash[i], + *buffers[resolve_buffers[i]]); + } + + sync_values_stash.clear(); + } -QueryPool::~QueryPool() = default; + void PresyncWrites() override { + if (pending_sync.empty()) { + return; + } + PauseCounter(); + sync_values_stash.clear(); + sync_values_stash.emplace_back(); + std::vector<HostSyncValues>* sync_values = &sync_values_stash.back(); + sync_values->reserve(num_slots_used); + std::unordered_map<size_t, std::pair<size_t, size_t>> offsets; + resolve_buffers.clear(); + size_t resolve_buffer_index = ObtainBuffer<true>(num_slots_used); + resolve_buffers.push_back(resolve_buffer_index); + size_t base_offset = 0; -std::pair<VkQueryPool, u32> QueryPool::Commit() { - std::size_t index; - do { - index = CommitResource(); - } while (usage[index]); - usage[index] = true; + ApplyBanksWideOp<true>(pending_sync, [&](SamplesQueryBank* bank, size_t start, + size_t amount) { + size_t bank_id = bank->GetIndex(); + auto& resolve_buffer = buffers[resolve_buffer_index]; + VkQueryPool query_pool = bank->GetInnerPool(); + scheduler.RequestOutsideRenderPassOperationContext(); + scheduler.Record([start, amount, base_offset, query_pool, + buffer = *resolve_buffer](vk::CommandBuffer cmdbuf) { + const VkBufferMemoryBarrier copy_query_pool_barrier{ + .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, + .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .buffer = buffer, + .offset = base_offset, + .size = amount * SamplesQueryBank::QUERY_SIZE, + }; + + cmdbuf.CopyQueryPoolResults( + query_pool, static_cast<u32>(start), static_cast<u32>(amount), buffer, + static_cast<u32>(base_offset), SamplesQueryBank::QUERY_SIZE, + VK_QUERY_RESULT_WAIT_BIT | VK_QUERY_RESULT_64_BIT); + cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, + VK_PIPELINE_STAGE_TRANSFER_BIT, 0, copy_query_pool_barrier); + }); + offsets[bank_id] = {start, base_offset}; + base_offset += amount * SamplesQueryBank::QUERY_SIZE; + }); + + // Convert queries + bool has_multi_queries = false; + for (auto q : pending_sync) { + auto* query = GetQuery(q); + size_t sync_value_slot = 0; + if (True(query->flags & VideoCommon::QueryFlagBits::IsRewritten)) { + continue; + } + if (True(query->flags & VideoCommon::QueryFlagBits::IsInvalidated)) { + continue; + } + if (accumulation_since_last_sync || query->size_slots > 1) { + if (!has_multi_queries) { + has_multi_queries = true; + sync_values_stash.emplace_back(); + } + sync_value_slot = 1; + } + query->flags |= VideoCommon::QueryFlagBits::IsHostSynced; + auto loc_data = offsets[query->start_bank_id]; + sync_values_stash[sync_value_slot].emplace_back(HostSyncValues{ + .address = query->guest_address, + .size = SamplesQueryBank::QUERY_SIZE, + .offset = + loc_data.second + (query->start_slot - loc_data.first + query->size_slots - 1) * + SamplesQueryBank::QUERY_SIZE, + }); + } + + if (has_multi_queries) { + size_t intermediary_buffer_index = ObtainBuffer<false>(num_slots_used); + resolve_buffers.push_back(intermediary_buffer_index); + queries_prefix_scan_pass->Run(*accumulation_buffer, *buffers[intermediary_buffer_index], + *buffers[resolve_buffer_index], num_slots_used, + std::min(first_accumulation_checkpoint, num_slots_used), + last_accumulation_checkpoint); + + } else { + scheduler.RequestOutsideRenderPassOperationContext(); + scheduler.Record([buffer = *accumulation_buffer](vk::CommandBuffer cmdbuf) { + cmdbuf.FillBuffer(buffer, 0, 8, 0); + }); + } + + ReplicateCurrentQueryIfNeeded(); + std::function<void()> func([this] { ammend_value = acumulation_value; }); + rasterizer->SyncOperation(std::move(func)); + AbandonCurrentQuery(); + num_slots_used = 0; + first_accumulation_checkpoint = std::numeric_limits<size_t>::max(); + last_accumulation_checkpoint = 0; + accumulation_since_last_sync = has_multi_queries; + pending_sync.clear(); + } - return {*pools[index / GROW_STEP], static_cast<u32>(index % GROW_STEP)}; + size_t WriteCounter(VAddr address, bool has_timestamp, u32 value, + [[maybe_unused]] std::optional<u32> subreport) override { + PauseCounter(); + auto index = BuildQuery(); + auto* new_query = GetQuery(index); + new_query->guest_address = address; + new_query->value = 0; + new_query->flags &= ~VideoCommon::QueryFlagBits::IsOrphan; + if (has_timestamp) { + new_query->flags |= VideoCommon::QueryFlagBits::HasTimestamp; + } + if (!current_query) { + new_query->flags |= VideoCommon::QueryFlagBits::IsFinalValueSynced; + return index; + } + new_query->start_bank_id = current_query->start_bank_id; + new_query->size_banks = current_query->size_banks; + new_query->start_slot = current_query->start_slot; + new_query->size_slots = current_query->size_slots; + ApplyBankOp(new_query, [](SamplesQueryBank* bank, size_t start, size_t amount) { + bank->AddReference(amount); + }); + pending_sync.push_back(index); + pending_flush_queries.push_back(index); + return index; + } + + bool HasUnsyncedQueries() const override { + return !pending_flush_queries.empty(); + } + + void PushUnsyncedQueries() override { + PauseCounter(); + current_bank->Close(); + { + std::scoped_lock lk(flush_guard); + pending_flush_sets.emplace_back(std::move(pending_flush_queries)); + } + } + + void PopUnsyncedQueries() override { + std::vector<size_t> current_flush_queries; + { + std::scoped_lock lk(flush_guard); + current_flush_queries = std::move(pending_flush_sets.front()); + pending_flush_sets.pop_front(); + } + ApplyBanksWideOp<false>( + current_flush_queries, + [](SamplesQueryBank* bank, size_t start, size_t amount) { bank->Sync(start, amount); }); + for (auto q : current_flush_queries) { + auto* query = GetQuery(q); + u64 total = 0; + ApplyBankOp(query, [&total](SamplesQueryBank* bank, size_t start, size_t amount) { + const auto& results = bank->GetResults(); + for (size_t i = 0; i < amount; i++) { + total += results[start + i]; + } + }); + query->value = total; + query->flags |= VideoCommon::QueryFlagBits::IsFinalValueSynced; + } + } + +private: + template <typename Func> + void ApplyBankOp(VideoCommon::HostQueryBase* query, Func&& func) { + size_t size_slots = query->size_slots; + if (size_slots == 0) { + return; + } + size_t bank_id = query->start_bank_id; + size_t banks_set = query->size_banks; + size_t start_slot = query->start_slot; + for (size_t i = 0; i < banks_set; i++) { + auto& the_bank = bank_pool.GetBank(bank_id); + size_t amount = std::min(the_bank.Size() - start_slot, size_slots); + func(&the_bank, start_slot, amount); + bank_id = the_bank.next_bank - 1; + start_slot = 0; + size_slots -= amount; + } + } + + template <bool is_ordered, typename Func> + void ApplyBanksWideOp(std::vector<size_t>& queries, Func&& func) { + std::conditional_t<is_ordered, std::map<size_t, std::pair<size_t, size_t>>, + std::unordered_map<size_t, std::pair<size_t, size_t>>> + indexer; + for (auto q : queries) { + auto* query = GetQuery(q); + ApplyBankOp(query, [&indexer](SamplesQueryBank* bank, size_t start, size_t amount) { + auto id_ = bank->GetIndex(); + auto pair = indexer.try_emplace(id_, std::numeric_limits<size_t>::max(), + std::numeric_limits<size_t>::min()); + auto& current_pair = pair.first->second; + current_pair.first = std::min(current_pair.first, start); + current_pair.second = std::max(current_pair.second, amount + start); + }); + } + for (auto& cont : indexer) { + func(&bank_pool.GetBank(cont.first), cont.second.first, + cont.second.second - cont.second.first); + } + } + + void ReserveBank() { + current_bank_id = + bank_pool.ReserveBank([this](std::deque<SamplesQueryBank>& queue, size_t index) { + queue.emplace_back(device, index); + }); + if (current_bank) { + current_bank->next_bank = current_bank_id + 1; + } + current_bank = &bank_pool.GetBank(current_bank_id); + current_query_pool = current_bank->GetInnerPool(); + } + + size_t ReserveBankSlot() { + if (!current_bank || current_bank->IsClosed()) { + ReserveBank(); + } + auto [built, index] = current_bank->Reserve(); + current_bank_slot = index; + return index; + } + + void ReserveHostQuery() { + size_t new_slot = ReserveBankSlot(); + current_bank->AddReference(1); + num_slots_used++; + if (current_query) { + size_t bank_id = current_query->start_bank_id; + size_t banks_set = current_query->size_banks - 1; + bool found = bank_id == current_bank_id; + while (!found && banks_set > 0) { + SamplesQueryBank& some_bank = bank_pool.GetBank(bank_id); + bank_id = some_bank.next_bank - 1; + found = bank_id == current_bank_id; + banks_set--; + } + if (!found) { + current_query->size_banks++; + } + current_query->size_slots++; + } else { + current_query_id = BuildQuery(); + current_query = GetQuery(current_query_id); + current_query->start_bank_id = static_cast<u32>(current_bank_id); + current_query->size_banks = 1; + current_query->start_slot = new_slot; + current_query->size_slots = 1; + } + } + + void Free(size_t query_id) override { + std::scoped_lock lk(guard); + auto* query = GetQuery(query_id); + ApplyBankOp(query, [](SamplesQueryBank* bank, size_t start, size_t amount) { + bank->CloseReference(amount); + }); + ReleaseQuery(query_id); + } + + void AbandonCurrentQuery() { + if (!current_query) { + return; + } + Free(current_query_id); + current_query = nullptr; + current_query_id = 0; + } + + void ReplicateCurrentQueryIfNeeded() { + if (pending_sync.empty()) { + return; + } + if (!current_query) { + return; + } + auto index = BuildQuery(); + auto* new_query = GetQuery(index); + new_query->guest_address = 0; + new_query->value = 0; + new_query->flags &= ~VideoCommon::QueryFlagBits::IsOrphan; + new_query->start_bank_id = current_query->start_bank_id; + new_query->size_banks = current_query->size_banks; + new_query->start_slot = current_query->start_slot; + new_query->size_slots = current_query->size_slots; + ApplyBankOp(new_query, [](SamplesQueryBank* bank, size_t start, size_t amount) { + bank->AddReference(amount); + }); + pending_flush_queries.push_back(index); + std::function<void()> func([this, index] { + auto* query = GetQuery(index); + query->value += GetAmmendValue(); + SetAccumulationValue(query->value); + Free(index); + }); + rasterizer->SyncOperation(std::move(func)); + } + + template <bool is_resolve> + size_t ObtainBuffer(size_t num_needed) { + const size_t log_2 = std::max<size_t>(11U, Common::Log2Ceil64(num_needed)); + if constexpr (is_resolve) { + if (resolve_table[log_2] != 0) { + return resolve_table[log_2] - 1; + } + } else { + if (intermediary_table[log_2] != 0) { + return intermediary_table[log_2] - 1; + } + } + const VkBufferCreateInfo buffer_ci = { + .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, + .pNext = nullptr, + .flags = 0, + .size = SamplesQueryBank::QUERY_SIZE * (1ULL << log_2), + .usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | + VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + .queueFamilyIndexCount = 0, + .pQueueFamilyIndices = nullptr, + }; + buffers.emplace_back(memory_allocator.CreateBuffer(buffer_ci, MemoryUsage::DeviceLocal)); + if constexpr (is_resolve) { + resolve_table[log_2] = buffers.size(); + } else { + intermediary_table[log_2] = buffers.size(); + } + return buffers.size() - 1; + } + + QueryCacheRuntime& runtime; + VideoCore::RasterizerInterface* rasterizer; + const Device& device; + Scheduler& scheduler; + const MemoryAllocator& memory_allocator; + VideoCommon::BankPool<SamplesQueryBank> bank_pool; + std::deque<vk::Buffer> buffers; + std::array<size_t, 32> resolve_table{}; + std::array<size_t, 32> intermediary_table{}; + vk::Buffer accumulation_buffer; + std::deque<std::vector<HostSyncValues>> sync_values_stash; + std::vector<size_t> resolve_buffers; + + // syncing queue + std::vector<size_t> pending_sync; + + // flush levels + std::vector<size_t> pending_flush_queries; + std::deque<std::vector<size_t>> pending_flush_sets; + + // State Machine + size_t current_bank_slot; + size_t current_bank_id; + SamplesQueryBank* current_bank; + VkQueryPool current_query_pool; + size_t current_query_id; + size_t num_slots_used{}; + size_t first_accumulation_checkpoint{}; + size_t last_accumulation_checkpoint{}; + bool accumulation_since_last_sync{}; + VideoCommon::HostQueryBase* current_query; + bool has_started{}; + std::mutex flush_guard; + + std::unique_ptr<QueriesPrefixScanPass> queries_prefix_scan_pass; +}; + +// Transform feedback queries +class TFBQueryBank : public VideoCommon::BankBase { +public: + static constexpr size_t BANK_SIZE = 1024; + static constexpr size_t QUERY_SIZE = 4; + explicit TFBQueryBank(Scheduler& scheduler_, const MemoryAllocator& memory_allocator, + size_t index_) + : BankBase(BANK_SIZE), scheduler{scheduler_}, index{index_} { + const VkBufferCreateInfo buffer_ci = { + .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, + .pNext = nullptr, + .flags = 0, + .size = QUERY_SIZE * BANK_SIZE, + .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + .queueFamilyIndexCount = 0, + .pQueueFamilyIndices = nullptr, + }; + buffer = memory_allocator.CreateBuffer(buffer_ci, MemoryUsage::DeviceLocal); + } + + ~TFBQueryBank() = default; + + void Reset() override { + ASSERT(references == 0); + VideoCommon::BankBase::Reset(); + } + + void Sync(StagingBufferRef& stagging_buffer, size_t extra_offset, size_t start, size_t size) { + scheduler.RequestOutsideRenderPassOperationContext(); + scheduler.Record([this, dst_buffer = stagging_buffer.buffer, extra_offset, start, + size](vk::CommandBuffer cmdbuf) { + std::array<VkBufferCopy, 1> copy{VkBufferCopy{ + .srcOffset = start * QUERY_SIZE, + .dstOffset = extra_offset, + .size = size * QUERY_SIZE, + }}; + cmdbuf.CopyBuffer(*buffer, dst_buffer, copy); + }); + } + + size_t GetIndex() const { + return index; + } + + VkBuffer GetBuffer() const { + return *buffer; + } + +private: + Scheduler& scheduler; + const size_t index; + vk::Buffer buffer; +}; + +class PrimitivesSucceededStreamer; + +class TFBCounterStreamer : public BaseStreamer { +public: + explicit TFBCounterStreamer(size_t id_, QueryCacheRuntime& runtime_, const Device& device_, + Scheduler& scheduler_, const MemoryAllocator& memory_allocator_, + StagingBufferPool& staging_pool_) + : BaseStreamer(id_), runtime{runtime_}, device{device_}, scheduler{scheduler_}, + memory_allocator{memory_allocator_}, staging_pool{staging_pool_} { + buffers_count = 0; + current_bank = nullptr; + counter_buffers.fill(VK_NULL_HANDLE); + offsets.fill(0); + last_queries.fill(0); + last_queries_stride.fill(1); + const VkBufferCreateInfo buffer_ci = { + .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, + .pNext = nullptr, + .flags = 0, + .size = TFBQueryBank::QUERY_SIZE * NUM_STREAMS, + .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | + VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT, + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + .queueFamilyIndexCount = 0, + .pQueueFamilyIndices = nullptr, + }; + + counters_buffer = memory_allocator.CreateBuffer(buffer_ci, MemoryUsage::DeviceLocal); + for (auto& c : counter_buffers) { + c = *counters_buffer; + } + size_t base_offset = 0; + for (auto& o : offsets) { + o = base_offset; + base_offset += TFBQueryBank::QUERY_SIZE; + } + } + + ~TFBCounterStreamer() = default; + + void StartCounter() override { + FlushBeginTFB(); + has_started = true; + } + + void PauseCounter() override { + CloseCounter(); + } + + void ResetCounter() override { + CloseCounter(); + } + + void CloseCounter() override { + if (has_flushed_end_pending) { + FlushEndTFB(); + } + runtime.View3DRegs([this](Maxwell3D& maxwell3d) { + if (maxwell3d.regs.transform_feedback_enabled == 0) { + streams_mask = 0; + has_started = false; + } + }); + } + + bool HasPendingSync() const override { + return !pending_sync.empty(); + } + + void SyncWrites() override { + CloseCounter(); + std::unordered_map<size_t, std::vector<HostSyncValues>> sync_values_stash; + for (auto q : pending_sync) { + auto* query = GetQuery(q); + if (True(query->flags & VideoCommon::QueryFlagBits::IsRewritten)) { + continue; + } + if (True(query->flags & VideoCommon::QueryFlagBits::IsInvalidated)) { + continue; + } + query->flags |= VideoCommon::QueryFlagBits::IsHostSynced; + sync_values_stash.try_emplace(query->start_bank_id); + sync_values_stash[query->start_bank_id].emplace_back(HostSyncValues{ + .address = query->guest_address, + .size = TFBQueryBank::QUERY_SIZE, + .offset = query->start_slot * TFBQueryBank::QUERY_SIZE, + }); + } + for (auto& p : sync_values_stash) { + auto& bank = bank_pool.GetBank(p.first); + runtime.template SyncValues<HostSyncValues>(p.second, bank.GetBuffer()); + } + pending_sync.clear(); + } + + size_t WriteCounter(VAddr address, bool has_timestamp, u32 value, + std::optional<u32> subreport_) override { + auto index = BuildQuery(); + auto* new_query = GetQuery(index); + new_query->guest_address = address; + new_query->value = 0; + new_query->flags &= ~VideoCommon::QueryFlagBits::IsOrphan; + if (has_timestamp) { + new_query->flags |= VideoCommon::QueryFlagBits::HasTimestamp; + } + if (!subreport_) { + new_query->flags |= VideoCommon::QueryFlagBits::IsFinalValueSynced; + return index; + } + const size_t subreport = static_cast<size_t>(*subreport_); + last_queries[subreport] = address; + if ((streams_mask & (1ULL << subreport)) == 0) { + new_query->flags |= VideoCommon::QueryFlagBits::IsFinalValueSynced; + return index; + } + CloseCounter(); + auto [bank_slot, data_slot] = ProduceCounterBuffer(subreport); + new_query->start_bank_id = static_cast<u32>(bank_slot); + new_query->size_banks = 1; + new_query->start_slot = static_cast<u32>(data_slot); + new_query->size_slots = 1; + pending_sync.push_back(index); + pending_flush_queries.push_back(index); + return index; + } + + std::optional<std::pair<VAddr, size_t>> GetLastQueryStream(size_t stream) { + if (last_queries[stream] != 0) { + std::pair<VAddr, size_t> result(last_queries[stream], last_queries_stride[stream]); + return result; + } + return std::nullopt; + } + + Maxwell3D::Regs::PrimitiveTopology GetOutputTopology() const { + return out_topology; + } + + bool HasUnsyncedQueries() const override { + return !pending_flush_queries.empty(); + } + + void PushUnsyncedQueries() override { + CloseCounter(); + auto staging_ref = staging_pool.Request( + pending_flush_queries.size() * TFBQueryBank::QUERY_SIZE, MemoryUsage::Download, true); + size_t offset_base = staging_ref.offset; + for (auto q : pending_flush_queries) { + auto* query = GetQuery(q); + auto& bank = bank_pool.GetBank(query->start_bank_id); + bank.Sync(staging_ref, offset_base, query->start_slot, 1); + offset_base += TFBQueryBank::QUERY_SIZE; + bank.CloseReference(); + } + static constexpr VkMemoryBarrier WRITE_BARRIER{ + .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, + .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT, + }; + scheduler.RequestOutsideRenderPassOperationContext(); + scheduler.Record([](vk::CommandBuffer cmdbuf) { + cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, WRITE_BARRIER); + }); + + std::scoped_lock lk(flush_guard); + for (auto& str : free_queue) { + staging_pool.FreeDeferred(str); + } + free_queue.clear(); + download_buffers.emplace_back(staging_ref); + pending_flush_sets.emplace_back(std::move(pending_flush_queries)); + } + + void PopUnsyncedQueries() override { + StagingBufferRef staging_ref; + std::vector<size_t> flushed_queries; + { + std::scoped_lock lk(flush_guard); + staging_ref = download_buffers.front(); + flushed_queries = std::move(pending_flush_sets.front()); + download_buffers.pop_front(); + pending_flush_sets.pop_front(); + } + + size_t offset_base = staging_ref.offset; + for (auto q : flushed_queries) { + auto* query = GetQuery(q); + u32 result = 0; + std::memcpy(&result, staging_ref.mapped_span.data() + offset_base, sizeof(u32)); + query->value = static_cast<u64>(result); + query->flags |= VideoCommon::QueryFlagBits::IsFinalValueSynced; + offset_base += TFBQueryBank::QUERY_SIZE; + } + + { + std::scoped_lock lk(flush_guard); + free_queue.emplace_back(staging_ref); + } + } + +private: + void FlushBeginTFB() { + if (has_flushed_end_pending) [[unlikely]] { + return; + } + has_flushed_end_pending = true; + if (!has_started || buffers_count == 0) { + scheduler.Record([](vk::CommandBuffer cmdbuf) { + cmdbuf.BeginTransformFeedbackEXT(0, 0, nullptr, nullptr); + }); + UpdateBuffers(); + return; + } + scheduler.Record([this, total = static_cast<u32>(buffers_count)](vk::CommandBuffer cmdbuf) { + cmdbuf.BeginTransformFeedbackEXT(0, total, counter_buffers.data(), offsets.data()); + }); + UpdateBuffers(); + } + + void FlushEndTFB() { + if (!has_flushed_end_pending) [[unlikely]] { + UNREACHABLE(); + return; + } + has_flushed_end_pending = false; + + if (buffers_count == 0) { + scheduler.Record([](vk::CommandBuffer cmdbuf) { + cmdbuf.EndTransformFeedbackEXT(0, 0, nullptr, nullptr); + }); + } else { + scheduler.Record([this, + total = static_cast<u32>(buffers_count)](vk::CommandBuffer cmdbuf) { + cmdbuf.EndTransformFeedbackEXT(0, total, counter_buffers.data(), offsets.data()); + }); + } + } + + void UpdateBuffers() { + last_queries.fill(0); + last_queries_stride.fill(1); + runtime.View3DRegs([this](Maxwell3D& maxwell3d) { + buffers_count = 0; + out_topology = maxwell3d.draw_manager->GetDrawState().topology; + for (size_t i = 0; i < Maxwell3D::Regs::NumTransformFeedbackBuffers; i++) { + const auto& tf = maxwell3d.regs.transform_feedback; + if (tf.buffers[i].enable == 0) { + continue; + } + const size_t stream = tf.controls[i].stream; + last_queries_stride[stream] = tf.controls[i].stride; + streams_mask |= 1ULL << stream; + buffers_count = std::max<size_t>(buffers_count, stream + 1); + } + }); + } + + std::pair<size_t, size_t> ProduceCounterBuffer(size_t stream) { + if (current_bank == nullptr || current_bank->IsClosed()) { + current_bank_id = + bank_pool.ReserveBank([this](std::deque<TFBQueryBank>& queue, size_t index) { + queue.emplace_back(scheduler, memory_allocator, index); + }); + current_bank = &bank_pool.GetBank(current_bank_id); + } + auto [dont_care, other] = current_bank->Reserve(); + const size_t slot = other; // workaround to compile bug. + current_bank->AddReference(); + + static constexpr VkMemoryBarrier READ_BARRIER{ + .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT, + .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT, + }; + static constexpr VkMemoryBarrier WRITE_BARRIER{ + .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, + .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT, + }; + scheduler.RequestOutsideRenderPassOperationContext(); + scheduler.Record([dst_buffer = current_bank->GetBuffer(), + src_buffer = counter_buffers[stream], src_offset = offsets[stream], + slot](vk::CommandBuffer cmdbuf) { + cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT, + VK_PIPELINE_STAGE_TRANSFER_BIT, 0, READ_BARRIER); + std::array<VkBufferCopy, 1> copy{VkBufferCopy{ + .srcOffset = src_offset, + .dstOffset = slot * TFBQueryBank::QUERY_SIZE, + .size = TFBQueryBank::QUERY_SIZE, + }}; + cmdbuf.CopyBuffer(src_buffer, dst_buffer, copy); + cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, + 0, WRITE_BARRIER); + }); + return {current_bank_id, slot}; + } + + friend class PrimitivesSucceededStreamer; + + static constexpr size_t NUM_STREAMS = 4; + + QueryCacheRuntime& runtime; + const Device& device; + Scheduler& scheduler; + const MemoryAllocator& memory_allocator; + StagingBufferPool& staging_pool; + VideoCommon::BankPool<TFBQueryBank> bank_pool; + size_t current_bank_id; + TFBQueryBank* current_bank; + vk::Buffer counters_buffer; + + // syncing queue + std::vector<size_t> pending_sync; + + // flush levels + std::vector<size_t> pending_flush_queries; + std::deque<StagingBufferRef> download_buffers; + std::deque<std::vector<size_t>> pending_flush_sets; + std::vector<StagingBufferRef> free_queue; + std::mutex flush_guard; + + // state machine + bool has_started{}; + bool has_flushed_end_pending{}; + size_t buffers_count{}; + std::array<VkBuffer, NUM_STREAMS> counter_buffers{}; + std::array<VkDeviceSize, NUM_STREAMS> offsets{}; + std::array<VAddr, NUM_STREAMS> last_queries; + std::array<size_t, NUM_STREAMS> last_queries_stride; + Maxwell3D::Regs::PrimitiveTopology out_topology; + u64 streams_mask; +}; + +class PrimitivesQueryBase : public VideoCommon::QueryBase { +public: + // Default constructor + PrimitivesQueryBase() + : VideoCommon::QueryBase(0, VideoCommon::QueryFlagBits::IsHostManaged, 0) {} + + // Parameterized constructor + PrimitivesQueryBase(bool has_timestamp, VAddr address) + : VideoCommon::QueryBase(address, VideoCommon::QueryFlagBits::IsHostManaged, 0) { + if (has_timestamp) { + flags |= VideoCommon::QueryFlagBits::HasTimestamp; + } + } + + u64 stride{}; + VAddr dependant_address{}; + Maxwell3D::Regs::PrimitiveTopology topology{Maxwell3D::Regs::PrimitiveTopology::Points}; + size_t dependant_index{}; + bool dependant_manage{}; +}; + +class PrimitivesSucceededStreamer : public VideoCommon::SimpleStreamer<PrimitivesQueryBase> { +public: + explicit PrimitivesSucceededStreamer(size_t id_, QueryCacheRuntime& runtime_, + TFBCounterStreamer& tfb_streamer_, + Core::Memory::Memory& cpu_memory_) + : VideoCommon::SimpleStreamer<PrimitivesQueryBase>(id_), runtime{runtime_}, + tfb_streamer{tfb_streamer_}, cpu_memory{cpu_memory_} { + MakeDependent(&tfb_streamer); + } + + ~PrimitivesSucceededStreamer() = default; + + size_t WriteCounter(VAddr address, bool has_timestamp, u32 value, + std::optional<u32> subreport_) override { + auto index = BuildQuery(); + auto* new_query = GetQuery(index); + new_query->guest_address = address; + new_query->value = 0; + if (has_timestamp) { + new_query->flags |= VideoCommon::QueryFlagBits::HasTimestamp; + } + if (!subreport_) { + new_query->flags |= VideoCommon::QueryFlagBits::IsFinalValueSynced; + return index; + } + const size_t subreport = static_cast<size_t>(*subreport_); + auto dependant_address_opt = tfb_streamer.GetLastQueryStream(subreport); + bool must_manage_dependance = false; + new_query->topology = tfb_streamer.GetOutputTopology(); + if (dependant_address_opt) { + auto [dep_address, stride] = *dependant_address_opt; + new_query->dependant_address = dep_address; + new_query->stride = stride; + } else { + new_query->dependant_index = + tfb_streamer.WriteCounter(address, has_timestamp, value, subreport_); + auto* dependant_query = tfb_streamer.GetQuery(new_query->dependant_index); + dependant_query->flags |= VideoCommon::QueryFlagBits::IsInvalidated; + must_manage_dependance = true; + if (True(dependant_query->flags & VideoCommon::QueryFlagBits::IsFinalValueSynced)) { + new_query->value = 0; + new_query->flags |= VideoCommon::QueryFlagBits::IsFinalValueSynced; + if (must_manage_dependance) { + tfb_streamer.Free(new_query->dependant_index); + } + return index; + } + new_query->stride = 1; + runtime.View3DRegs([new_query, subreport](Maxwell3D& maxwell3d) { + for (size_t i = 0; i < Maxwell3D::Regs::NumTransformFeedbackBuffers; i++) { + const auto& tf = maxwell3d.regs.transform_feedback; + if (tf.buffers[i].enable == 0) { + continue; + } + if (tf.controls[i].stream != subreport) { + continue; + } + new_query->stride = tf.controls[i].stride; + break; + } + }); + } + + new_query->dependant_manage = must_manage_dependance; + pending_flush_queries.push_back(index); + return index; + } + + bool HasUnsyncedQueries() const override { + return !pending_flush_queries.empty(); + } + + void PushUnsyncedQueries() override { + std::scoped_lock lk(flush_guard); + pending_flush_sets.emplace_back(std::move(pending_flush_queries)); + pending_flush_queries.clear(); + } + + void PopUnsyncedQueries() override { + std::vector<size_t> flushed_queries; + { + std::scoped_lock lk(flush_guard); + flushed_queries = std::move(pending_flush_sets.front()); + pending_flush_sets.pop_front(); + } + + for (auto q : flushed_queries) { + auto* query = GetQuery(q); + if (True(query->flags & VideoCommon::QueryFlagBits::IsFinalValueSynced)) { + continue; + } + + query->flags |= VideoCommon::QueryFlagBits::IsFinalValueSynced; + u64 num_vertices = 0; + if (query->dependant_manage) { + auto* dependant_query = tfb_streamer.GetQuery(query->dependant_index); + num_vertices = dependant_query->value / query->stride; + tfb_streamer.Free(query->dependant_index); + } else { + u8* pointer = cpu_memory.GetPointer(query->dependant_address); + u32 result; + std::memcpy(&result, pointer, sizeof(u32)); + num_vertices = static_cast<u64>(result) / query->stride; + } + query->value = [&]() -> u64 { + switch (query->topology) { + case Maxwell3D::Regs::PrimitiveTopology::Points: + return num_vertices; + case Maxwell3D::Regs::PrimitiveTopology::Lines: + return num_vertices / 2; + case Maxwell3D::Regs::PrimitiveTopology::LineLoop: + return (num_vertices / 2) + 1; + case Maxwell3D::Regs::PrimitiveTopology::LineStrip: + return num_vertices - 1; + case Maxwell3D::Regs::PrimitiveTopology::Patches: + case Maxwell3D::Regs::PrimitiveTopology::Triangles: + case Maxwell3D::Regs::PrimitiveTopology::TrianglesAdjacency: + return num_vertices / 3; + case Maxwell3D::Regs::PrimitiveTopology::TriangleFan: + case Maxwell3D::Regs::PrimitiveTopology::TriangleStrip: + case Maxwell3D::Regs::PrimitiveTopology::TriangleStripAdjacency: + return num_vertices - 2; + case Maxwell3D::Regs::PrimitiveTopology::Quads: + return num_vertices / 4; + case Maxwell3D::Regs::PrimitiveTopology::Polygon: + return 1U; + default: + return num_vertices; + } + }(); + } + } + +private: + QueryCacheRuntime& runtime; + TFBCounterStreamer& tfb_streamer; + Core::Memory::Memory& cpu_memory; + + // syncing queue + std::vector<size_t> pending_sync; + + // flush levels + std::vector<size_t> pending_flush_queries; + std::deque<std::vector<size_t>> pending_flush_sets; + std::mutex flush_guard; +}; + +} // namespace + +struct QueryCacheRuntimeImpl { + QueryCacheRuntimeImpl(QueryCacheRuntime& runtime, VideoCore::RasterizerInterface* rasterizer_, + Core::Memory::Memory& cpu_memory_, Vulkan::BufferCache& buffer_cache_, + const Device& device_, const MemoryAllocator& memory_allocator_, + Scheduler& scheduler_, StagingBufferPool& staging_pool_, + ComputePassDescriptorQueue& compute_pass_descriptor_queue, + DescriptorPool& descriptor_pool) + : rasterizer{rasterizer_}, cpu_memory{cpu_memory_}, + buffer_cache{buffer_cache_}, device{device_}, + memory_allocator{memory_allocator_}, scheduler{scheduler_}, staging_pool{staging_pool_}, + guest_streamer(0, runtime), + sample_streamer(static_cast<size_t>(QueryType::ZPassPixelCount64), runtime, rasterizer, + device, scheduler, memory_allocator, compute_pass_descriptor_queue, + descriptor_pool), + tfb_streamer(static_cast<size_t>(QueryType::StreamingByteCount), runtime, device, + scheduler, memory_allocator, staging_pool), + primitives_succeeded_streamer( + static_cast<size_t>(QueryType::StreamingPrimitivesSucceeded), runtime, tfb_streamer, + cpu_memory_), + primitives_needed_minus_suceeded_streamer( + static_cast<size_t>(QueryType::StreamingPrimitivesNeededMinusSucceeded), runtime, 0u), + hcr_setup{}, hcr_is_set{}, is_hcr_running{}, maxwell3d{} { + + hcr_setup.sType = VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT; + hcr_setup.pNext = nullptr; + hcr_setup.flags = 0; + + conditional_resolve_pass = std::make_unique<ConditionalRenderingResolvePass>( + device, scheduler, descriptor_pool, compute_pass_descriptor_queue); + + const VkBufferCreateInfo buffer_ci = { + .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, + .pNext = nullptr, + .flags = 0, + .size = sizeof(u32), + .usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | + VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT, + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + .queueFamilyIndexCount = 0, + .pQueueFamilyIndices = nullptr, + }; + hcr_resolve_buffer = memory_allocator.CreateBuffer(buffer_ci, MemoryUsage::DeviceLocal); + } + + VideoCore::RasterizerInterface* rasterizer; + Core::Memory::Memory& cpu_memory; + Vulkan::BufferCache& buffer_cache; + + const Device& device; + const MemoryAllocator& memory_allocator; + Scheduler& scheduler; + StagingBufferPool& staging_pool; + + // Streamers + VideoCommon::GuestStreamer<QueryCacheParams> guest_streamer; + SamplesStreamer sample_streamer; + TFBCounterStreamer tfb_streamer; + PrimitivesSucceededStreamer primitives_succeeded_streamer; + VideoCommon::StubStreamer<QueryCacheParams> primitives_needed_minus_suceeded_streamer; + + std::vector<std::pair<VAddr, VAddr>> little_cache; + std::vector<std::pair<VkBuffer, VkDeviceSize>> buffers_to_upload_to; + std::vector<size_t> redirect_cache; + std::vector<std::vector<VkBufferCopy>> copies_setup; + + // Host conditional rendering data + std::unique_ptr<ConditionalRenderingResolvePass> conditional_resolve_pass; + vk::Buffer hcr_resolve_buffer; + VkConditionalRenderingBeginInfoEXT hcr_setup; + VkBuffer hcr_buffer; + size_t hcr_offset; + bool hcr_is_set; + bool is_hcr_running; + + // maxwell3d + Maxwell3D* maxwell3d; +}; + +QueryCacheRuntime::QueryCacheRuntime(VideoCore::RasterizerInterface* rasterizer, + Core::Memory::Memory& cpu_memory_, + Vulkan::BufferCache& buffer_cache_, const Device& device_, + const MemoryAllocator& memory_allocator_, + Scheduler& scheduler_, StagingBufferPool& staging_pool_, + ComputePassDescriptorQueue& compute_pass_descriptor_queue, + DescriptorPool& descriptor_pool) { + impl = std::make_unique<QueryCacheRuntimeImpl>( + *this, rasterizer, cpu_memory_, buffer_cache_, device_, memory_allocator_, scheduler_, + staging_pool_, compute_pass_descriptor_queue, descriptor_pool); } -void QueryPool::Allocate(std::size_t begin, std::size_t end) { - usage.resize(end); +void QueryCacheRuntime::Bind3DEngine(Maxwell3D* maxwell3d) { + impl->maxwell3d = maxwell3d; +} - pools.push_back(device.GetLogical().CreateQueryPool({ - .sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO, - .pNext = nullptr, - .flags = 0, - .queryType = GetTarget(type), - .queryCount = static_cast<u32>(end - begin), - .pipelineStatistics = 0, - })); +template <typename Func> +void QueryCacheRuntime::View3DRegs(Func&& func) { + if (impl->maxwell3d) { + func(*impl->maxwell3d); + } +} + +void QueryCacheRuntime::EndHostConditionalRendering() { + PauseHostConditionalRendering(); + impl->hcr_is_set = false; + impl->is_hcr_running = false; + impl->hcr_buffer = nullptr; + impl->hcr_offset = 0; +} + +void QueryCacheRuntime::PauseHostConditionalRendering() { + if (!impl->hcr_is_set) { + return; + } + if (impl->is_hcr_running) { + impl->scheduler.Record( + [](vk::CommandBuffer cmdbuf) { cmdbuf.EndConditionalRenderingEXT(); }); + } + impl->is_hcr_running = false; } -void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) { - const auto it = - std::find_if(pools.begin(), pools.end(), [query_pool = query.first](vk::QueryPool& pool) { - return query_pool == *pool; +void QueryCacheRuntime::ResumeHostConditionalRendering() { + if (!impl->hcr_is_set) { + return; + } + if (!impl->is_hcr_running) { + impl->scheduler.Record([hcr_setup = impl->hcr_setup](vk::CommandBuffer cmdbuf) { + cmdbuf.BeginConditionalRenderingEXT(hcr_setup); }); + } + impl->is_hcr_running = true; +} - if (it != std::end(pools)) { - const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it); - usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false; +void QueryCacheRuntime::HostConditionalRenderingCompareValueImpl(VideoCommon::LookupData object, + bool is_equal) { + { + std::scoped_lock lk(impl->buffer_cache.mutex); + static constexpr auto sync_info = VideoCommon::ObtainBufferSynchronize::FullSynchronize; + const auto post_op = VideoCommon::ObtainBufferOperation::DoNothing; + const auto [buffer, offset] = + impl->buffer_cache.ObtainCPUBuffer(object.address, 8, sync_info, post_op); + impl->hcr_buffer = buffer->Handle(); + impl->hcr_offset = offset; + } + if (impl->hcr_is_set) { + if (impl->hcr_setup.buffer == impl->hcr_buffer && + impl->hcr_setup.offset == impl->hcr_offset) { + ResumeHostConditionalRendering(); + return; + } + PauseHostConditionalRendering(); } + impl->hcr_setup.buffer = impl->hcr_buffer; + impl->hcr_setup.offset = impl->hcr_offset; + impl->hcr_setup.flags = is_equal ? VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT : 0; + impl->hcr_is_set = true; + impl->is_hcr_running = false; + ResumeHostConditionalRendering(); } -QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, - Core::Memory::Memory& cpu_memory_, const Device& device_, - Scheduler& scheduler_) - : QueryCacheBase{rasterizer_, cpu_memory_}, device{device_}, scheduler{scheduler_}, - query_pools{ - QueryPool{device_, scheduler_, QueryType::SamplesPassed}, - } {} - -QueryCache::~QueryCache() { - // TODO(Rodrigo): This is a hack to destroy all HostCounter instances before the base class - // destructor is called. The query cache should be redesigned to have a proper ownership model - // instead of using shared pointers. - for (size_t query_type = 0; query_type < VideoCore::NumQueryTypes; ++query_type) { - auto& stream = Stream(static_cast<QueryType>(query_type)); - stream.Update(false); - stream.Reset(); +void QueryCacheRuntime::HostConditionalRenderingCompareBCImpl(VAddr address, bool is_equal) { + VkBuffer to_resolve; + u32 to_resolve_offset; + { + std::scoped_lock lk(impl->buffer_cache.mutex); + static constexpr auto sync_info = VideoCommon::ObtainBufferSynchronize::NoSynchronize; + const auto post_op = VideoCommon::ObtainBufferOperation::DoNothing; + const auto [buffer, offset] = + impl->buffer_cache.ObtainCPUBuffer(address, 24, sync_info, post_op); + to_resolve = buffer->Handle(); + to_resolve_offset = static_cast<u32>(offset); } + if (impl->is_hcr_running) { + PauseHostConditionalRendering(); + } + impl->conditional_resolve_pass->Resolve(*impl->hcr_resolve_buffer, to_resolve, + to_resolve_offset, false); + impl->hcr_setup.buffer = *impl->hcr_resolve_buffer; + impl->hcr_setup.offset = 0; + impl->hcr_setup.flags = is_equal ? 0 : VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT; + impl->hcr_is_set = true; + impl->is_hcr_running = false; + ResumeHostConditionalRendering(); } -std::pair<VkQueryPool, u32> QueryCache::AllocateQuery(QueryType type) { - return query_pools[static_cast<std::size_t>(type)].Commit(); +bool QueryCacheRuntime::HostConditionalRenderingCompareValue(VideoCommon::LookupData object_1, + [[maybe_unused]] bool qc_dirty) { + if (!impl->device.IsExtConditionalRendering()) { + return false; + } + HostConditionalRenderingCompareValueImpl(object_1, false); + return true; } -void QueryCache::Reserve(QueryType type, std::pair<VkQueryPool, u32> query) { - query_pools[static_cast<std::size_t>(type)].Reserve(query); +bool QueryCacheRuntime::HostConditionalRenderingCompareValues(VideoCommon::LookupData object_1, + VideoCommon::LookupData object_2, + bool qc_dirty, bool equal_check) { + if (!impl->device.IsExtConditionalRendering()) { + return false; + } + + const auto check_in_bc = [&](VAddr address) { + return impl->buffer_cache.IsRegionGpuModified(address, 8); + }; + const auto check_value = [&](VAddr address) { + u8* ptr = impl->cpu_memory.GetPointer(address); + u64 value{}; + std::memcpy(&value, ptr, sizeof(value)); + return value == 0; + }; + std::array<VideoCommon::LookupData*, 2> objects{&object_1, &object_2}; + std::array<bool, 2> is_in_bc{}; + std::array<bool, 2> is_in_qc{}; + std::array<bool, 2> is_in_ac{}; + std::array<bool, 2> is_null{}; + { + std::scoped_lock lk(impl->buffer_cache.mutex); + for (size_t i = 0; i < 2; i++) { + is_in_qc[i] = objects[i]->found_query != nullptr; + is_in_bc[i] = !is_in_qc[i] && check_in_bc(objects[i]->address); + is_in_ac[i] = is_in_qc[i] || is_in_bc[i]; + } + } + + if (!is_in_ac[0] && !is_in_ac[1]) { + EndHostConditionalRendering(); + return false; + } + + if (!qc_dirty && !is_in_bc[0] && !is_in_bc[1]) { + EndHostConditionalRendering(); + return false; + } + + const bool is_gpu_high = Settings::IsGPULevelHigh(); + if (!is_gpu_high && impl->device.GetDriverID() == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS) { + return true; + } + + for (size_t i = 0; i < 2; i++) { + is_null[i] = !is_in_ac[i] && check_value(objects[i]->address); + } + + for (size_t i = 0; i < 2; i++) { + if (is_null[i]) { + size_t j = (i + 1) % 2; + HostConditionalRenderingCompareValueImpl(*objects[j], equal_check); + return true; + } + } + + if (!is_gpu_high) { + return true; + } + + if (!is_in_bc[0] && !is_in_bc[1]) { + // Both queries are in query cache, it's best to just flush. + return true; + } + HostConditionalRenderingCompareBCImpl(object_1.address, equal_check); + return true; } -HostCounter::HostCounter(QueryCache& cache_, std::shared_ptr<HostCounter> dependency_, - QueryType type_) - : HostCounterBase{std::move(dependency_)}, cache{cache_}, type{type_}, - query{cache_.AllocateQuery(type_)}, tick{cache_.GetScheduler().CurrentTick()} { - const vk::Device* logical = &cache.GetDevice().GetLogical(); - cache.GetScheduler().Record([logical, query_ = query](vk::CommandBuffer cmdbuf) { - const bool use_precise = Settings::IsGPULevelHigh(); - logical->ResetQueryPool(query_.first, query_.second, 1); - cmdbuf.BeginQuery(query_.first, query_.second, - use_precise ? VK_QUERY_CONTROL_PRECISE_BIT : 0); - }); +QueryCacheRuntime::~QueryCacheRuntime() = default; + +VideoCommon::StreamerInterface* QueryCacheRuntime::GetStreamerInterface(QueryType query_type) { + switch (query_type) { + case QueryType::Payload: + return &impl->guest_streamer; + case QueryType::ZPassPixelCount64: + return &impl->sample_streamer; + case QueryType::StreamingByteCount: + return &impl->tfb_streamer; + case QueryType::StreamingPrimitivesNeeded: + case QueryType::VtgPrimitivesOut: + case QueryType::StreamingPrimitivesSucceeded: + return &impl->primitives_succeeded_streamer; + case QueryType::StreamingPrimitivesNeededMinusSucceeded: + return &impl->primitives_needed_minus_suceeded_streamer; + default: + return nullptr; + } } -HostCounter::~HostCounter() { - cache.Reserve(type, query); +void QueryCacheRuntime::Barriers(bool is_prebarrier) { + static constexpr VkMemoryBarrier READ_BARRIER{ + .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, + .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT, + }; + static constexpr VkMemoryBarrier WRITE_BARRIER{ + .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER, + .pNext = nullptr, + .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, + .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT, + }; + if (is_prebarrier) { + impl->scheduler.Record([](vk::CommandBuffer cmdbuf) { + cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, + VK_PIPELINE_STAGE_TRANSFER_BIT, 0, READ_BARRIER); + }); + } else { + impl->scheduler.Record([](vk::CommandBuffer cmdbuf) { + cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, WRITE_BARRIER); + }); + } } -void HostCounter::EndQuery() { - cache.GetScheduler().Record([query_ = query](vk::CommandBuffer cmdbuf) { - cmdbuf.EndQuery(query_.first, query_.second); +template <typename SyncValuesType> +void QueryCacheRuntime::SyncValues(std::span<SyncValuesType> values, VkBuffer base_src_buffer) { + if (values.size() == 0) { + return; + } + impl->redirect_cache.clear(); + impl->little_cache.clear(); + size_t total_size = 0; + for (auto& sync_val : values) { + total_size += sync_val.size; + bool found = false; + VAddr base = Common::AlignDown(sync_val.address, Core::Memory::YUZU_PAGESIZE); + VAddr base_end = base + Core::Memory::YUZU_PAGESIZE; + for (size_t i = 0; i < impl->little_cache.size(); i++) { + const auto set_found = [&] { + impl->redirect_cache.push_back(i); + found = true; + }; + auto& loc = impl->little_cache[i]; + if (base < loc.second && loc.first < base_end) { + set_found(); + break; + } + if (loc.first == base_end) { + loc.first = base; + set_found(); + break; + } + if (loc.second == base) { + loc.second = base_end; + set_found(); + break; + } + } + if (!found) { + impl->redirect_cache.push_back(impl->little_cache.size()); + impl->little_cache.emplace_back(base, base_end); + } + } + + // Vulkan part. + std::scoped_lock lk(impl->buffer_cache.mutex); + impl->buffer_cache.BufferOperations([&] { + impl->buffers_to_upload_to.clear(); + for (auto& pair : impl->little_cache) { + static constexpr auto sync_info = VideoCommon::ObtainBufferSynchronize::FullSynchronize; + const auto post_op = VideoCommon::ObtainBufferOperation::DoNothing; + const auto [buffer, offset] = impl->buffer_cache.ObtainCPUBuffer( + pair.first, static_cast<u32>(pair.second - pair.first), sync_info, post_op); + impl->buffers_to_upload_to.emplace_back(buffer->Handle(), offset); + } }); -} -u64 HostCounter::BlockingQuery(bool async) const { - if (!async) { - cache.GetScheduler().Wait(tick); - } - u64 data; - const VkResult query_result = cache.GetDevice().GetLogical().GetQueryResults( - query.first, query.second, 1, sizeof(data), &data, sizeof(data), - VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT); - - switch (query_result) { - case VK_SUCCESS: - return data; - case VK_ERROR_DEVICE_LOST: - cache.GetDevice().ReportLoss(); - [[fallthrough]]; - default: - throw vk::Exception(query_result); + VkBuffer src_buffer; + [[maybe_unused]] StagingBufferRef ref; + impl->copies_setup.clear(); + impl->copies_setup.resize(impl->little_cache.size()); + if constexpr (SyncValuesType::GeneratesBaseBuffer) { + ref = impl->staging_pool.Request(total_size, MemoryUsage::Upload); + size_t current_offset = ref.offset; + size_t accumulated_size = 0; + for (size_t i = 0; i < values.size(); i++) { + size_t which_copy = impl->redirect_cache[i]; + impl->copies_setup[which_copy].emplace_back(VkBufferCopy{ + .srcOffset = current_offset + accumulated_size, + .dstOffset = impl->buffers_to_upload_to[which_copy].second + values[i].address - + impl->little_cache[which_copy].first, + .size = values[i].size, + }); + std::memcpy(ref.mapped_span.data() + accumulated_size, &values[i].value, + values[i].size); + accumulated_size += values[i].size; + } + src_buffer = ref.buffer; + } else { + for (size_t i = 0; i < values.size(); i++) { + size_t which_copy = impl->redirect_cache[i]; + impl->copies_setup[which_copy].emplace_back(VkBufferCopy{ + .srcOffset = values[i].offset, + .dstOffset = impl->buffers_to_upload_to[which_copy].second + values[i].address - + impl->little_cache[which_copy].first, + .size = values[i].size, + }); + } + src_buffer = base_src_buffer; } + + impl->scheduler.RequestOutsideRenderPassOperationContext(); + impl->scheduler.Record([src_buffer, dst_buffers = std::move(impl->buffers_to_upload_to), + vk_copies = std::move(impl->copies_setup)](vk::CommandBuffer cmdbuf) { + size_t size = dst_buffers.size(); + for (size_t i = 0; i < size; i++) { + cmdbuf.CopyBuffer(src_buffer, dst_buffers[i].first, vk_copies[i]); + } + }); } } // namespace Vulkan + +namespace VideoCommon { + +template class QueryCacheBase<Vulkan::QueryCacheParams>; + +} // namespace VideoCommon diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h index c1b9552eb..e9a1ea169 100644 --- a/src/video_core/renderer_vulkan/vk_query_cache.h +++ b/src/video_core/renderer_vulkan/vk_query_cache.h @@ -1,101 +1,75 @@ -// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-3.0-or-later #pragma once -#include <cstddef> #include <memory> -#include <utility> -#include <vector> -#include "common/common_types.h" -#include "video_core/query_cache.h" -#include "video_core/renderer_vulkan/vk_resource_pool.h" -#include "video_core/vulkan_common/vulkan_wrapper.h" +#include "video_core/query_cache/query_cache_base.h" +#include "video_core/renderer_vulkan/vk_buffer_cache.h" namespace VideoCore { class RasterizerInterface; } +namespace VideoCommon { +class StreamerInterface; +} + namespace Vulkan { -class CachedQuery; class Device; -class HostCounter; -class QueryCache; class Scheduler; +class StagingBufferPool; -using CounterStream = VideoCommon::CounterStreamBase<QueryCache, HostCounter>; +struct QueryCacheRuntimeImpl; -class QueryPool final : public ResourcePool { +class QueryCacheRuntime { public: - explicit QueryPool(const Device& device, Scheduler& scheduler, VideoCore::QueryType type); - ~QueryPool() override; + explicit QueryCacheRuntime(VideoCore::RasterizerInterface* rasterizer, + Core::Memory::Memory& cpu_memory_, + Vulkan::BufferCache& buffer_cache_, const Device& device_, + const MemoryAllocator& memory_allocator_, Scheduler& scheduler_, + StagingBufferPool& staging_pool_, + ComputePassDescriptorQueue& compute_pass_descriptor_queue, + DescriptorPool& descriptor_pool); + ~QueryCacheRuntime(); - std::pair<VkQueryPool, u32> Commit(); + template <typename SyncValuesType> + void SyncValues(std::span<SyncValuesType> values, VkBuffer base_src_buffer = nullptr); - void Reserve(std::pair<VkQueryPool, u32> query); + void Barriers(bool is_prebarrier); -protected: - void Allocate(std::size_t begin, std::size_t end) override; + void EndHostConditionalRendering(); -private: - static constexpr std::size_t GROW_STEP = 512; + void PauseHostConditionalRendering(); - const Device& device; - const VideoCore::QueryType type; + void ResumeHostConditionalRendering(); - std::vector<vk::QueryPool> pools; - std::vector<bool> usage; -}; + bool HostConditionalRenderingCompareValue(VideoCommon::LookupData object_1, bool qc_dirty); -class QueryCache final - : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> { -public: - explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_, - Core::Memory::Memory& cpu_memory_, const Device& device_, - Scheduler& scheduler_); - ~QueryCache(); - - std::pair<VkQueryPool, u32> AllocateQuery(VideoCore::QueryType type); + bool HostConditionalRenderingCompareValues(VideoCommon::LookupData object_1, + VideoCommon::LookupData object_2, bool qc_dirty, + bool equal_check); - void Reserve(VideoCore::QueryType type, std::pair<VkQueryPool, u32> query); + VideoCommon::StreamerInterface* GetStreamerInterface(VideoCommon::QueryType query_type); - const Device& GetDevice() const noexcept { - return device; - } + void Bind3DEngine(Tegra::Engines::Maxwell3D* maxwell3d); - Scheduler& GetScheduler() const noexcept { - return scheduler; - } + template <typename Func> + void View3DRegs(Func&& func); private: - const Device& device; - Scheduler& scheduler; - std::array<QueryPool, VideoCore::NumQueryTypes> query_pools; + void HostConditionalRenderingCompareValueImpl(VideoCommon::LookupData object, bool is_equal); + void HostConditionalRenderingCompareBCImpl(VAddr address, bool is_equal); + friend struct QueryCacheRuntimeImpl; + std::unique_ptr<QueryCacheRuntimeImpl> impl; }; -class HostCounter final : public VideoCommon::HostCounterBase<QueryCache, HostCounter> { -public: - explicit HostCounter(QueryCache& cache_, std::shared_ptr<HostCounter> dependency_, - VideoCore::QueryType type_); - ~HostCounter(); - - void EndQuery(); - -private: - u64 BlockingQuery(bool async = false) const override; - - QueryCache& cache; - const VideoCore::QueryType type; - const std::pair<VkQueryPool, u32> query; - const u64 tick; +struct QueryCacheParams { + using RuntimeType = typename Vulkan::QueryCacheRuntime; }; -class CachedQuery : public VideoCommon::CachedQueryBase<HostCounter> { -public: - explicit CachedQuery(QueryCache&, VideoCore::QueryType, VAddr cpu_addr_, u8* host_ptr_) - : CachedQueryBase{cpu_addr_, host_ptr_} {} -}; +using QueryCache = VideoCommon::QueryCacheBase<QueryCacheParams>; } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 01e76a82c..83f2b6045 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -24,6 +24,7 @@ #include "video_core/renderer_vulkan/vk_compute_pipeline.h" #include "video_core/renderer_vulkan/vk_descriptor_pool.h" #include "video_core/renderer_vulkan/vk_pipeline_cache.h" +#include "video_core/renderer_vulkan/vk_query_cache.h" #include "video_core/renderer_vulkan/vk_rasterizer.h" #include "video_core/renderer_vulkan/vk_scheduler.h" #include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" @@ -170,9 +171,11 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool, guest_descriptor_queue, compute_pass_descriptor_queue, descriptor_pool), buffer_cache(*this, cpu_memory_, buffer_cache_runtime), + query_cache_runtime(this, cpu_memory_, buffer_cache, device, memory_allocator, scheduler, + staging_pool, compute_pass_descriptor_queue, descriptor_pool), + query_cache(gpu, *this, cpu_memory_, query_cache_runtime), pipeline_cache(*this, device, scheduler, descriptor_pool, guest_descriptor_queue, render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()), - query_cache{*this, cpu_memory_, device, scheduler}, accelerate_dma(buffer_cache, texture_cache, scheduler), fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler), wfi_event(device.GetLogical().CreateEvent()) { @@ -189,14 +192,7 @@ void RasterizerVulkan::PrepareDraw(bool is_indexed, Func&& draw_func) { FlushWork(); gpu_memory->FlushCaching(); -#if ANDROID - if (Settings::IsGPULevelHigh()) { - // This is problematic on Android, disable on GPU Normal. - query_cache.UpdateCounters(); - } -#else - query_cache.UpdateCounters(); -#endif + query_cache.NotifySegment(true); GraphicsPipeline* const pipeline{pipeline_cache.CurrentGraphicsPipeline()}; if (!pipeline) { @@ -207,13 +203,12 @@ void RasterizerVulkan::PrepareDraw(bool is_indexed, Func&& draw_func) { pipeline->SetEngine(maxwell3d, gpu_memory); pipeline->Configure(is_indexed); - BeginTransformFeedback(); - UpdateDynamicStates(); + HandleTransformFeedback(); + query_cache.CounterEnable(VideoCommon::QueryType::ZPassPixelCount64, + maxwell3d->regs.zpass_pixel_count_enable); draw_func(); - - EndTransformFeedback(); } void RasterizerVulkan::Draw(bool is_indexed, u32 instance_count) { @@ -241,6 +236,14 @@ void RasterizerVulkan::DrawIndirect() { const auto indirect_buffer = buffer_cache.GetDrawIndirectBuffer(); const auto& buffer = indirect_buffer.first; const auto& offset = indirect_buffer.second; + if (params.is_byte_count) { + scheduler.Record([buffer_obj = buffer->Handle(), offset, + stride = params.stride](vk::CommandBuffer cmdbuf) { + cmdbuf.DrawIndirectByteCountEXT(1, 0, buffer_obj, offset, 0, + static_cast<u32>(stride)); + }); + return; + } if (params.include_count) { const auto count = buffer_cache.GetDrawIndirectCount(); const auto& draw_buffer = count.first; @@ -280,20 +283,15 @@ void RasterizerVulkan::DrawTexture() { SCOPE_EXIT({ gpu.TickWork(); }); FlushWork(); -#if ANDROID - if (Settings::IsGPULevelHigh()) { - // This is problematic on Android, disable on GPU Normal. - query_cache.UpdateCounters(); - } -#else - query_cache.UpdateCounters(); -#endif + query_cache.NotifySegment(true); texture_cache.SynchronizeGraphicsDescriptors(); texture_cache.UpdateRenderTargets(false); UpdateDynamicStates(); + query_cache.CounterEnable(VideoCommon::QueryType::ZPassPixelCount64, + maxwell3d->regs.zpass_pixel_count_enable); const auto& draw_texture_state = maxwell3d->draw_manager->GetDrawTextureState(); const auto& sampler = texture_cache.GetGraphicsSampler(draw_texture_state.src_sampler); const auto& texture = texture_cache.GetImageView(draw_texture_state.src_texture); @@ -316,14 +314,9 @@ void RasterizerVulkan::Clear(u32 layer_count) { FlushWork(); gpu_memory->FlushCaching(); -#if ANDROID - if (Settings::IsGPULevelHigh()) { - // This is problematic on Android, disable on GPU Normal. - query_cache.UpdateCounters(); - } -#else - query_cache.UpdateCounters(); -#endif + query_cache.NotifySegment(true); + query_cache.CounterEnable(VideoCommon::QueryType::ZPassPixelCount64, + maxwell3d->regs.zpass_pixel_count_enable); auto& regs = maxwell3d->regs; const bool use_color = regs.clear_surface.R || regs.clear_surface.G || regs.clear_surface.B || @@ -429,7 +422,8 @@ void RasterizerVulkan::Clear(u32 layer_count) { return; } - if (use_stencil && regs.stencil_front_mask != 0xFF && regs.stencil_front_mask != 0) { + if (use_stencil && framebuffer->HasAspectStencilBit() && regs.stencil_front_mask != 0xFF && + regs.stencil_front_mask != 0) { Region2D dst_region = { Offset2D{.x = clear_rect.rect.offset.x, .y = clear_rect.rect.offset.y}, Offset2D{.x = clear_rect.rect.offset.x + static_cast<s32>(clear_rect.rect.extent.width), @@ -482,13 +476,13 @@ void RasterizerVulkan::DispatchCompute() { scheduler.Record([dim](vk::CommandBuffer cmdbuf) { cmdbuf.Dispatch(dim[0], dim[1], dim[2]); }); } -void RasterizerVulkan::ResetCounter(VideoCore::QueryType type) { - query_cache.ResetCounter(type); +void RasterizerVulkan::ResetCounter(VideoCommon::QueryType type) { + query_cache.CounterReset(type); } -void RasterizerVulkan::Query(GPUVAddr gpu_addr, VideoCore::QueryType type, - std::optional<u64> timestamp) { - query_cache.Query(gpu_addr, type, timestamp); +void RasterizerVulkan::Query(GPUVAddr gpu_addr, VideoCommon::QueryType type, + VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) { + query_cache.CounterReport(gpu_addr, type, flags, payload, subreport); } void RasterizerVulkan::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, @@ -669,8 +663,8 @@ void RasterizerVulkan::SignalReference() { fence_manager.SignalReference(); } -void RasterizerVulkan::ReleaseFences() { - fence_manager.WaitPendingFences(); +void RasterizerVulkan::ReleaseFences(bool force) { + fence_manager.WaitPendingFences(force); } void RasterizerVulkan::FlushAndInvalidateRegion(VAddr addr, u64 size, @@ -694,6 +688,8 @@ void RasterizerVulkan::WaitForIdle() { flags |= VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT; } + query_cache.NotifyWFI(); + scheduler.RequestOutsideRenderPassOperationContext(); scheduler.Record([event = *wfi_event, flags](vk::CommandBuffer cmdbuf) { cmdbuf.SetEvent(event, flags); @@ -737,19 +733,7 @@ void RasterizerVulkan::TickFrame() { bool RasterizerVulkan::AccelerateConditionalRendering() { gpu_memory->FlushCaching(); - if (Settings::IsGPULevelHigh()) { - // TODO(Blinkhawk): Reimplement Host conditional rendering. - return false; - } - // Medium / Low Hack: stub any checks on queries written into the buffer cache. - const GPUVAddr condition_address{maxwell3d->regs.render_enable.Address()}; - Maxwell::ReportSemaphore::Compare cmp; - if (gpu_memory->IsMemoryDirty(condition_address, sizeof(cmp), - VideoCommon::CacheType::BufferCache | - VideoCommon::CacheType::QueryCache)) { - return true; - } - return false; + return query_cache.AccelerateHostConditionalRendering(); } bool RasterizerVulkan::AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Surface& src, @@ -795,6 +779,7 @@ bool RasterizerVulkan::AccelerateDisplay(const Tegra::FramebufferConfig& config, if (!image_view) { return false; } + query_cache.NotifySegment(false); screen_info.image = image_view->ImageHandle(); screen_info.image_view = image_view->Handle(Shader::TextureType::Color2D); screen_info.width = image_view->size.width; @@ -933,31 +918,18 @@ void RasterizerVulkan::UpdateDynamicStates() { } } -void RasterizerVulkan::BeginTransformFeedback() { +void RasterizerVulkan::HandleTransformFeedback() { const auto& regs = maxwell3d->regs; - if (regs.transform_feedback_enabled == 0) { - return; - } if (!device.IsExtTransformFeedbackSupported()) { LOG_ERROR(Render_Vulkan, "Transform feedbacks used but not supported"); return; } - UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderType::TessellationInit) || - regs.IsShaderConfigEnabled(Maxwell::ShaderType::Tessellation)); - scheduler.Record( - [](vk::CommandBuffer cmdbuf) { cmdbuf.BeginTransformFeedbackEXT(0, 0, nullptr, nullptr); }); -} - -void RasterizerVulkan::EndTransformFeedback() { - const auto& regs = maxwell3d->regs; - if (regs.transform_feedback_enabled == 0) { - return; - } - if (!device.IsExtTransformFeedbackSupported()) { - return; + query_cache.CounterEnable(VideoCommon::QueryType::StreamingByteCount, + regs.transform_feedback_enabled); + if (regs.transform_feedback_enabled != 0) { + UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderType::TessellationInit) || + regs.IsShaderConfigEnabled(Maxwell::ShaderType::Tessellation)); } - scheduler.Record( - [](vk::CommandBuffer cmdbuf) { cmdbuf.EndTransformFeedbackEXT(0, 0, nullptr, nullptr); }); } void RasterizerVulkan::UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& regs) { @@ -1043,15 +1015,37 @@ void RasterizerVulkan::UpdateDepthBias(Tegra::Engines::Maxwell3D::Regs& regs) { regs.zeta.format == Tegra::DepthFormat::X8Z24_UNORM || regs.zeta.format == Tegra::DepthFormat::S8Z24_UNORM || regs.zeta.format == Tegra::DepthFormat::V8Z24_UNORM; - if (is_d24 && !device.SupportsD24DepthBuffer()) { + bool force_unorm = ([&] { + if (!is_d24 || device.SupportsD24DepthBuffer()) { + return false; + } + if (device.IsExtDepthBiasControlSupported()) { + return true; + } + if (!Settings::values.renderer_amdvlk_depth_bias_workaround) { + return false; + } // the base formulas can be obtained from here: // https://docs.microsoft.com/en-us/windows/win32/direct3d11/d3d10-graphics-programming-guide-output-merger-stage-depth-bias const double rescale_factor = static_cast<double>(1ULL << (32 - 24)) / (static_cast<double>(0x1.ep+127)); units = static_cast<float>(static_cast<double>(units) * rescale_factor); - } + return false; + })(); scheduler.Record([constant = units, clamp = regs.depth_bias_clamp, - factor = regs.slope_scale_depth_bias](vk::CommandBuffer cmdbuf) { + factor = regs.slope_scale_depth_bias, force_unorm, + precise = device.HasExactDepthBiasControl()](vk::CommandBuffer cmdbuf) { + if (force_unorm) { + VkDepthBiasRepresentationInfoEXT info{ + .sType = VK_STRUCTURE_TYPE_DEPTH_BIAS_REPRESENTATION_INFO_EXT, + .pNext = nullptr, + .depthBiasRepresentation = + VK_DEPTH_BIAS_REPRESENTATION_LEAST_REPRESENTABLE_VALUE_FORCE_UNORM_EXT, + .depthBiasExact = precise ? VK_TRUE : VK_FALSE, + }; + cmdbuf.SetDepthBias(constant, clamp, factor, &info); + return; + } cmdbuf.SetDepthBias(constant, clamp, factor); }); } diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index b31982485..ad069556c 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h @@ -84,8 +84,9 @@ public: void DrawTexture() override; void Clear(u32 layer_count) override; void DispatchCompute() override; - void ResetCounter(VideoCore::QueryType type) override; - void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override; + void ResetCounter(VideoCommon::QueryType type) override; + void Query(GPUVAddr gpu_addr, VideoCommon::QueryType type, + VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) override; void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) override; void DisableGraphicsUniformBuffer(size_t stage, u32 index) override; void FlushAll() override; @@ -106,7 +107,7 @@ public: void SyncOperation(std::function<void()>&& func) override; void SignalSyncPoint(u32 value) override; void SignalReference() override; - void ReleaseFences() override; + void ReleaseFences(bool force = true) override; void FlushAndInvalidateRegion( VAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override; void WaitForIdle() override; @@ -146,9 +147,7 @@ private: void UpdateDynamicStates(); - void BeginTransformFeedback(); - - void EndTransformFeedback(); + void HandleTransformFeedback(); void UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& regs); void UpdateScissorsState(Tegra::Engines::Maxwell3D::Regs& regs); @@ -195,8 +194,9 @@ private: TextureCache texture_cache; BufferCacheRuntime buffer_cache_runtime; BufferCache buffer_cache; - PipelineCache pipeline_cache; + QueryCacheRuntime query_cache_runtime; QueryCache query_cache; + PipelineCache pipeline_cache; AccelerateDMA accelerate_dma; FenceManager fence_manager; diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp index 89fd31b4f..3be7837f4 100644 --- a/src/video_core/renderer_vulkan/vk_scheduler.cpp +++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp @@ -243,10 +243,10 @@ void Scheduler::AllocateNewContext() { #if ANDROID if (Settings::IsGPULevelHigh()) { // This is problematic on Android, disable on GPU Normal. - query_cache->UpdateCounters(); + query_cache->NotifySegment(true); } #else - query_cache->UpdateCounters(); + query_cache->NotifySegment(true); #endif } } @@ -261,11 +261,12 @@ void Scheduler::EndPendingOperations() { #if ANDROID if (Settings::IsGPULevelHigh()) { // This is problematic on Android, disable on GPU Normal. - query_cache->DisableStreams(); + // query_cache->DisableStreams(); } #else - query_cache->DisableStreams(); + // query_cache->DisableStreams(); #endif + query_cache->NotifySegment(false); EndRenderPass(); } diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h index 475c682eb..da03803aa 100644 --- a/src/video_core/renderer_vulkan/vk_scheduler.h +++ b/src/video_core/renderer_vulkan/vk_scheduler.h @@ -17,6 +17,11 @@ #include "video_core/renderer_vulkan/vk_master_semaphore.h" #include "video_core/vulkan_common/vulkan_wrapper.h" +namespace VideoCommon { +template <typename Trait> +class QueryCacheBase; +} + namespace Vulkan { class CommandPool; @@ -24,7 +29,8 @@ class Device; class Framebuffer; class GraphicsPipeline; class StateTracker; -class QueryCache; + +struct QueryCacheParams; /// The scheduler abstracts command buffer and fence management with an interface that's able to do /// OpenGL-like operations on Vulkan command buffers. @@ -63,7 +69,7 @@ public: void InvalidateState(); /// Assigns the query cache. - void SetQueryCache(QueryCache& query_cache_) { + void SetQueryCache(VideoCommon::QueryCacheBase<QueryCacheParams>& query_cache_) { query_cache = &query_cache_; } @@ -219,7 +225,7 @@ private: std::unique_ptr<MasterSemaphore> master_semaphore; std::unique_ptr<CommandPool> command_pool; - QueryCache* query_cache = nullptr; + VideoCommon::QueryCacheBase<QueryCacheParams>* query_cache = nullptr; vk::CommandBuffer current_cmdbuf; diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp index ce92f66ab..b278614e6 100644 --- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp +++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp @@ -24,25 +24,38 @@ using namespace Common::Literals; // Maximum potential alignment of a Vulkan buffer constexpr VkDeviceSize MAX_ALIGNMENT = 256; -// Maximum size to put elements in the stream buffer -constexpr VkDeviceSize MAX_STREAM_BUFFER_REQUEST_SIZE = 8_MiB; // Stream buffer size in bytes -constexpr VkDeviceSize STREAM_BUFFER_SIZE = 128_MiB; -constexpr VkDeviceSize REGION_SIZE = STREAM_BUFFER_SIZE / StagingBufferPool::NUM_SYNCS; +constexpr VkDeviceSize MAX_STREAM_BUFFER_SIZE = 128_MiB; -size_t Region(size_t iterator) noexcept { - return iterator / REGION_SIZE; +size_t GetStreamBufferSize(const Device& device) { + VkDeviceSize size{0}; + if (device.HasDebuggingToolAttached()) { + ForEachDeviceLocalHostVisibleHeap(device, [&size](size_t index, VkMemoryHeap& heap) { + size = std::max(size, heap.size); + }); + // If rebar is not supported, cut the max heap size to 40%. This will allow 2 captures to be + // loaded at the same time in RenderDoc. If rebar is supported, this shouldn't be an issue + // as the heap will be much larger. + if (size <= 256_MiB) { + size = size * 40 / 100; + } + } else { + size = MAX_STREAM_BUFFER_SIZE; + } + return std::min(Common::AlignUp(size, MAX_ALIGNMENT), MAX_STREAM_BUFFER_SIZE); } } // Anonymous namespace StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& memory_allocator_, Scheduler& scheduler_) - : device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_} { + : device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_}, + stream_buffer_size{GetStreamBufferSize(device)}, region_size{stream_buffer_size / + StagingBufferPool::NUM_SYNCS} { VkBufferCreateInfo stream_ci = { .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, .pNext = nullptr, .flags = 0, - .size = STREAM_BUFFER_SIZE, + .size = stream_buffer_size, .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, .sharingMode = VK_SHARING_MODE_EXCLUSIVE, @@ -63,7 +76,7 @@ StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& mem StagingBufferPool::~StagingBufferPool() = default; StagingBufferRef StagingBufferPool::Request(size_t size, MemoryUsage usage, bool deferred) { - if (!deferred && usage == MemoryUsage::Upload && size <= MAX_STREAM_BUFFER_REQUEST_SIZE) { + if (!deferred && usage == MemoryUsage::Upload && size <= region_size) { return GetStreamBuffer(size); } return GetStagingBuffer(size, usage, deferred); @@ -101,7 +114,7 @@ StagingBufferRef StagingBufferPool::GetStreamBuffer(size_t size) { used_iterator = iterator; free_iterator = std::max(free_iterator, iterator + size); - if (iterator + size >= STREAM_BUFFER_SIZE) { + if (iterator + size >= stream_buffer_size) { std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + NUM_SYNCS, current_tick); used_iterator = 0; diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h index 5f69f08b1..d3deb9072 100644 --- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h +++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h @@ -90,6 +90,9 @@ private: void ReleaseCache(MemoryUsage usage); void ReleaseLevel(StagingBuffersCache& cache, size_t log2); + size_t Region(size_t iter) const noexcept { + return iter / region_size; + } const Device& device; MemoryAllocator& memory_allocator; @@ -97,6 +100,8 @@ private: vk::Buffer stream_buffer; std::span<u8> stream_pointer; + VkDeviceSize stream_buffer_size; + VkDeviceSize region_size; size_t iterator = 0; size_t used_iterator = 0; diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp index b3e17c332..00ab47268 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp @@ -120,19 +120,9 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) { return usage; } -/// Returns the preferred format for a VkImage -[[nodiscard]] PixelFormat StorageFormat(PixelFormat format) { - switch (format) { - case PixelFormat::A8B8G8R8_SRGB: - return PixelFormat::A8B8G8R8_UNORM; - default: - return format; - } -} - [[nodiscard]] VkImageCreateInfo MakeImageCreateInfo(const Device& device, const ImageInfo& info) { - const PixelFormat format = StorageFormat(info.format); - const auto format_info = MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, false, format); + const auto format_info = + MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, false, info.format); VkImageCreateFlags flags{}; if (info.type == ImageType::e2D && info.resources.layers >= 6 && info.size.width == info.size.height && !device.HasBrokenCubeImageCompability()) { @@ -157,7 +147,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) { .arrayLayers = static_cast<u32>(info.resources.layers), .samples = ConvertSampleCount(info.num_samples), .tiling = VK_IMAGE_TILING_OPTIMAL, - .usage = ImageUsageFlags(format_info, format), + .usage = ImageUsageFlags(format_info, info.format), .sharingMode = VK_SHARING_MODE_EXCLUSIVE, .queueFamilyIndexCount = 0, .pQueueFamilyIndices = nullptr, @@ -186,6 +176,36 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) { return allocator.CreateImage(image_ci); } +[[nodiscard]] vk::ImageView MakeStorageView(const vk::Device& device, u32 level, VkImage image, + VkFormat format) { + static constexpr VkImageViewUsageCreateInfo storage_image_view_usage_create_info{ + .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO, + .pNext = nullptr, + .usage = VK_IMAGE_USAGE_STORAGE_BIT, + }; + return device.CreateImageView(VkImageViewCreateInfo{ + .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, + .pNext = &storage_image_view_usage_create_info, + .flags = 0, + .image = image, + .viewType = VK_IMAGE_VIEW_TYPE_2D_ARRAY, + .format = format, + .components{ + .r = VK_COMPONENT_SWIZZLE_IDENTITY, + .g = VK_COMPONENT_SWIZZLE_IDENTITY, + .b = VK_COMPONENT_SWIZZLE_IDENTITY, + .a = VK_COMPONENT_SWIZZLE_IDENTITY, + }, + .subresourceRange{ + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .baseMipLevel = level, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = VK_REMAINING_ARRAY_LAYERS, + }, + }); +} + [[nodiscard]] VkImageAspectFlags ImageAspectMask(PixelFormat format) { switch (VideoCore::Surface::GetFormatType(format)) { case VideoCore::Surface::SurfaceType::ColorTexture: @@ -218,6 +238,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) { return any_r ? VK_IMAGE_ASPECT_STENCIL_BIT : VK_IMAGE_ASPECT_DEPTH_BIT; case PixelFormat::D16_UNORM: case PixelFormat::D32_FLOAT: + case PixelFormat::X8_D24_UNORM: return VK_IMAGE_ASPECT_DEPTH_BIT; case PixelFormat::S8_UINT: return VK_IMAGE_ASPECT_STENCIL_BIT; @@ -600,7 +621,7 @@ void CopyBufferToImage(vk::CommandBuffer cmdbuf, VkBuffer src_buffer, VkImage im } void TryTransformSwizzleIfNeeded(PixelFormat format, std::array<SwizzleSource, 4>& swizzle, - bool emulate_bgr565) { + bool emulate_bgr565, bool emulate_a4b4g4r4) { switch (format) { case PixelFormat::A1B5G5R5_UNORM: std::ranges::transform(swizzle, swizzle.begin(), SwapBlueRed); @@ -616,6 +637,11 @@ void TryTransformSwizzleIfNeeded(PixelFormat format, std::array<SwizzleSource, 4 case PixelFormat::G4R4_UNORM: std::ranges::transform(swizzle, swizzle.begin(), SwapGreenRed); break; + case PixelFormat::A4B4G4R4_UNORM: + if (emulate_a4b4g4r4) { + std::ranges::reverse(swizzle); + } + break; default: break; } @@ -822,6 +848,10 @@ TextureCacheRuntime::TextureCacheRuntime(const Device& device_, Scheduler& sched astc_decoder_pass.emplace(device, scheduler, descriptor_pool, staging_buffer_pool, compute_pass_descriptor_queue, memory_allocator); } + if (device.IsStorageImageMultisampleSupported()) { + msaa_copy_pass = std::make_unique<MSAACopyPass>( + device, scheduler, descriptor_pool, staging_buffer_pool, compute_pass_descriptor_queue); + } if (!device.IsKhrImageFormatListSupported()) { return; } @@ -1044,15 +1074,27 @@ void TextureCacheRuntime::BlitImage(Framebuffer* dst_framebuffer, ImageView& dst dst_region, src_region, filter, operation); return; } + ASSERT(src.format == dst.format); if (aspect_mask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) { - if (!device.IsBlitDepthStencilSupported()) { + const auto format = src.format; + const auto can_blit_depth_stencil = [this, format] { + switch (format) { + case VideoCore::Surface::PixelFormat::D24_UNORM_S8_UINT: + case VideoCore::Surface::PixelFormat::S8_UINT_D24_UNORM: + return device.IsBlitDepth24Stencil8Supported(); + case VideoCore::Surface::PixelFormat::D32_FLOAT_S8_UINT: + return device.IsBlitDepth32Stencil8Supported(); + default: + UNREACHABLE(); + } + }(); + if (!can_blit_depth_stencil) { UNIMPLEMENTED_IF(is_src_msaa || is_dst_msaa); blit_image_helper.BlitDepthStencil(dst_framebuffer, src.DepthView(), src.StencilView(), dst_region, src_region, filter, operation); return; } } - ASSERT(src.format == dst.format); ASSERT(!(is_dst_msaa && !is_src_msaa)); ASSERT(operation == Fermi2D::Operation::SrcCopy); @@ -1159,6 +1201,9 @@ void TextureCacheRuntime::ConvertImage(Framebuffer* dst, ImageView& dst_view, Im if (src_view.format == PixelFormat::D24_UNORM_S8_UINT) { return blit_image_helper.ConvertS8D24ToABGR8(dst, src_view); } + if (src_view.format == PixelFormat::D32_FLOAT) { + return blit_image_helper.ConvertD32FToABGR8(dst, src_view); + } break; case PixelFormat::R32_FLOAT: if (src_view.format == PixelFormat::D32_FLOAT) { @@ -1278,7 +1323,11 @@ void TextureCacheRuntime::CopyImage(Image& dst, Image& src, void TextureCacheRuntime::CopyImageMSAA(Image& dst, Image& src, std::span<const VideoCommon::ImageCopy> copies) { - UNIMPLEMENTED_MSG("Copying images with different samples is not implemented in Vulkan."); + const bool msaa_to_non_msaa = src.info.num_samples > 1 && dst.info.num_samples == 1; + if (msaa_copy_pass) { + return msaa_copy_pass->CopyImage(dst, src, copies, msaa_to_non_msaa); + } + UNIMPLEMENTED_MSG("Copying images with different samples is not supported."); } u64 TextureCacheRuntime::GetDeviceLocalMemory() const { @@ -1326,39 +1375,15 @@ Image::Image(TextureCacheRuntime& runtime_, const ImageInfo& info_, GPUVAddr gpu if (runtime->device.HasDebuggingToolAttached()) { original_image.SetObjectNameEXT(VideoCommon::Name(*this).c_str()); } - static constexpr VkImageViewUsageCreateInfo storage_image_view_usage_create_info{ - .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO, - .pNext = nullptr, - .usage = VK_IMAGE_USAGE_STORAGE_BIT, - }; current_image = *original_image; + storage_image_views.resize(info.resources.levels); if (IsPixelFormatASTC(info.format) && !runtime->device.IsOptimalAstcSupported() && Settings::values.astc_recompression.GetValue() == Settings::AstcRecompression::Uncompressed) { const auto& device = runtime->device.GetLogical(); - storage_image_views.reserve(info.resources.levels); for (s32 level = 0; level < info.resources.levels; ++level) { - storage_image_views.push_back(device.CreateImageView(VkImageViewCreateInfo{ - .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, - .pNext = &storage_image_view_usage_create_info, - .flags = 0, - .image = *original_image, - .viewType = VK_IMAGE_VIEW_TYPE_2D_ARRAY, - .format = VK_FORMAT_A8B8G8R8_UNORM_PACK32, - .components{ - .r = VK_COMPONENT_SWIZZLE_IDENTITY, - .g = VK_COMPONENT_SWIZZLE_IDENTITY, - .b = VK_COMPONENT_SWIZZLE_IDENTITY, - .a = VK_COMPONENT_SWIZZLE_IDENTITY, - }, - .subresourceRange{ - .aspectMask = aspect_mask, - .baseMipLevel = static_cast<u32>(level), - .levelCount = 1, - .baseArrayLayer = 0, - .layerCount = VK_REMAINING_ARRAY_LAYERS, - }, - })); + storage_image_views[level] = + MakeStorageView(device, level, *original_image, VK_FORMAT_A8B8G8R8_UNORM_PACK32); } } } @@ -1489,6 +1514,17 @@ void Image::DownloadMemory(const StagingBufferRef& map, std::span<const BufferIm DownloadMemory(buffers, offsets, copies); } +VkImageView Image::StorageImageView(s32 level) noexcept { + auto& view = storage_image_views[level]; + if (!view) { + const auto format_info = + MaxwellToVK::SurfaceFormat(runtime->device, FormatType::Optimal, true, info.format); + view = + MakeStorageView(runtime->device.GetLogical(), level, current_image, format_info.format); + } + return *view; +} + bool Image::IsRescaled() const noexcept { return True(flags & ImageFlagBits::Rescaled); } @@ -1626,8 +1662,8 @@ bool Image::NeedsScaleHelper() const { return true; } static constexpr auto OPTIMAL_FORMAT = FormatType::Optimal; - const PixelFormat format = StorageFormat(info.format); - const auto vk_format = MaxwellToVK::SurfaceFormat(device, OPTIMAL_FORMAT, false, format).format; + const auto vk_format = + MaxwellToVK::SurfaceFormat(device, OPTIMAL_FORMAT, false, info.format).format; const auto blit_usage = VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT; const bool needs_blit_helper = !device.IsFormatSupported(vk_format, blit_usage, OPTIMAL_FORMAT); return needs_blit_helper; @@ -1649,7 +1685,8 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI }; if (!info.IsRenderTarget()) { swizzle = info.Swizzle(); - TryTransformSwizzleIfNeeded(format, swizzle, device->MustEmulateBGR565()); + TryTransformSwizzleIfNeeded(format, swizzle, device->MustEmulateBGR565(), + !device->IsExt4444FormatsSupported()); if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != 0) { std::ranges::transform(swizzle, swizzle.begin(), ConvertGreenRed); } diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h index 565ce19a9..d6c5a15cc 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.h +++ b/src/video_core/renderer_vulkan/vk_texture_cache.h @@ -117,6 +117,7 @@ public: BlitImageHelper& blit_image_helper; RenderPassCache& render_pass_cache; std::optional<ASTCDecoderPass> astc_decoder_pass; + std::unique_ptr<MSAACopyPass> msaa_copy_pass; const Settings::ResolutionScalingInfo& resolution; std::array<std::vector<VkFormat>, VideoCore::Surface::MaxPixelFormat> view_formats; @@ -161,15 +162,13 @@ public: return aspect_mask; } - [[nodiscard]] VkImageView StorageImageView(s32 level) const noexcept { - return *storage_image_views[level]; - } - /// Returns true when the image is already initialized and mark it as initialized [[nodiscard]] bool ExchangeInitialization() noexcept { return std::exchange(initialized, true); } + VkImageView StorageImageView(s32 level) noexcept; + bool IsRescaled() const noexcept; bool ScaleUp(bool ignore = false); diff --git a/src/video_core/surface.cpp b/src/video_core/surface.cpp index e16cd5e73..5b3c7aa5a 100644 --- a/src/video_core/surface.cpp +++ b/src/video_core/surface.cpp @@ -85,6 +85,8 @@ PixelFormat PixelFormatFromDepthFormat(Tegra::DepthFormat format) { return PixelFormat::S8_UINT; case Tegra::DepthFormat::Z32_FLOAT_X24S8_UINT: return PixelFormat::D32_FLOAT_S8_UINT; + case Tegra::DepthFormat::X8Z24_UNORM: + return PixelFormat::X8_D24_UNORM; default: UNIMPLEMENTED_MSG("Unimplemented format={}", format); return PixelFormat::S8_UINT_D24_UNORM; @@ -202,6 +204,7 @@ PixelFormat PixelFormatFromRenderTargetFormat(Tegra::RenderTargetFormat format) PixelFormat PixelFormatFromGPUPixelFormat(Service::android::PixelFormat format) { switch (format) { case Service::android::PixelFormat::Rgba8888: + case Service::android::PixelFormat::Rgbx8888: return PixelFormat::A8B8G8R8_UNORM; case Service::android::PixelFormat::Rgb565: return PixelFormat::R5G6B5_UNORM; diff --git a/src/video_core/surface.h b/src/video_core/surface.h index 9b9c4d9bc..a5e8e2f62 100644 --- a/src/video_core/surface.h +++ b/src/video_core/surface.h @@ -115,6 +115,7 @@ enum class PixelFormat { // Depth formats D32_FLOAT = MaxColorFormat, D16_UNORM, + X8_D24_UNORM, MaxDepthFormat, @@ -251,6 +252,7 @@ constexpr std::array<u8, MaxPixelFormat> BLOCK_WIDTH_TABLE = {{ 1, // E5B9G9R9_FLOAT 1, // D32_FLOAT 1, // D16_UNORM + 1, // X8_D24_UNORM 1, // S8_UINT 1, // D24_UNORM_S8_UINT 1, // S8_UINT_D24_UNORM @@ -360,6 +362,7 @@ constexpr std::array<u8, MaxPixelFormat> BLOCK_HEIGHT_TABLE = {{ 1, // E5B9G9R9_FLOAT 1, // D32_FLOAT 1, // D16_UNORM + 1, // X8_D24_UNORM 1, // S8_UINT 1, // D24_UNORM_S8_UINT 1, // S8_UINT_D24_UNORM @@ -469,6 +472,7 @@ constexpr std::array<u8, MaxPixelFormat> BITS_PER_BLOCK_TABLE = {{ 32, // E5B9G9R9_FLOAT 32, // D32_FLOAT 16, // D16_UNORM + 32, // X8_D24_UNORM 8, // S8_UINT 32, // D24_UNORM_S8_UINT 32, // S8_UINT_D24_UNORM diff --git a/src/video_core/texture_cache/format_lookup_table.cpp b/src/video_core/texture_cache/format_lookup_table.cpp index 11ced6c38..8c774f512 100644 --- a/src/video_core/texture_cache/format_lookup_table.cpp +++ b/src/video_core/texture_cache/format_lookup_table.cpp @@ -138,8 +138,16 @@ PixelFormat PixelFormatFromTextureInfo(TextureFormat format, ComponentType red, return PixelFormat::E5B9G9R9_FLOAT; case Hash(TextureFormat::Z32, FLOAT): return PixelFormat::D32_FLOAT; + case Hash(TextureFormat::Z32, FLOAT, UINT, UINT, UINT, LINEAR): + return PixelFormat::D32_FLOAT; case Hash(TextureFormat::Z16, UNORM): return PixelFormat::D16_UNORM; + case Hash(TextureFormat::Z16, UNORM, UINT, UINT, UINT, LINEAR): + return PixelFormat::D16_UNORM; + case Hash(TextureFormat::X8Z24, UNORM): + return PixelFormat::X8_D24_UNORM; + case Hash(TextureFormat::X8Z24, UNORM, UINT, UINT, UINT, LINEAR): + return PixelFormat::X8_D24_UNORM; case Hash(TextureFormat::Z24S8, UINT, UNORM, UNORM, UNORM, LINEAR): return PixelFormat::S8_UINT_D24_UNORM; case Hash(TextureFormat::Z24S8, UINT, UNORM, UINT, UINT, LINEAR): diff --git a/src/video_core/texture_cache/formatter.h b/src/video_core/texture_cache/formatter.h index 9ee57a076..cabbfcb2d 100644 --- a/src/video_core/texture_cache/formatter.h +++ b/src/video_core/texture_cache/formatter.h @@ -211,6 +211,8 @@ struct fmt::formatter<VideoCore::Surface::PixelFormat> : fmt::formatter<fmt::str return "D32_FLOAT"; case PixelFormat::D16_UNORM: return "D16_UNORM"; + case PixelFormat::X8_D24_UNORM: + return "X8_D24_UNORM"; case PixelFormat::S8_UINT: return "S8_UINT"; case PixelFormat::D24_UNORM_S8_UINT: diff --git a/src/video_core/texture_cache/image_base.h b/src/video_core/texture_cache/image_base.h index 55d49d017..0587d7b72 100644 --- a/src/video_core/texture_cache/image_base.h +++ b/src/video_core/texture_cache/image_base.h @@ -41,7 +41,7 @@ enum class ImageFlagBits : u32 { IsRescalable = 1 << 15, AsynchronousDecode = 1 << 16, - IsDecoding = 1 << 17, ///< Is currently being decoded asynchornously. + IsDecoding = 1 << 17, ///< Is currently being decoded asynchronously. }; DECLARE_ENUM_FLAG_OPERATORS(ImageFlagBits) diff --git a/src/video_core/texture_cache/image_view_base.cpp b/src/video_core/texture_cache/image_view_base.cpp index 0c5f4450d..18b9250f9 100644 --- a/src/video_core/texture_cache/image_view_base.cpp +++ b/src/video_core/texture_cache/image_view_base.cpp @@ -85,6 +85,7 @@ bool ImageViewBase::SupportsAnisotropy() const noexcept { // Depth formats case PixelFormat::D32_FLOAT: case PixelFormat::D16_UNORM: + case PixelFormat::X8_D24_UNORM: // Stencil formats case PixelFormat::S8_UINT: // DepthStencil formats diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp index 0f8ef4277..2e8160db0 100644 --- a/src/video_core/texture_cache/util.cpp +++ b/src/video_core/texture_cache/util.cpp @@ -1194,7 +1194,7 @@ std::optional<SubresourceBase> FindSubresource(const ImageInfo& candidate, const return std::nullopt; } } else { - // Format comaptibility is not relaxed, ensure we are creating a view on a compatible format + // Format compatibility is not relaxed, ensure we are creating a view on a compatible format if (!IsViewCompatible(existing.format, candidate.format, broken_views, native_bgr)) { return std::nullopt; } diff --git a/src/video_core/vulkan_common/vulkan_device.cpp b/src/video_core/vulkan_common/vulkan_device.cpp index 617417040..876cec2e8 100644 --- a/src/video_core/vulkan_common/vulkan_device.cpp +++ b/src/video_core/vulkan_common/vulkan_device.cpp @@ -76,12 +76,20 @@ constexpr std::array VK_FORMAT_R32G32B32_SFLOAT{ VK_FORMAT_UNDEFINED, }; +constexpr std::array VK_FORMAT_A4B4G4R4_UNORM_PACK16{ + VK_FORMAT_R4G4B4A4_UNORM_PACK16, + VK_FORMAT_UNDEFINED, +}; + } // namespace Alternatives enum class NvidiaArchitecture { - AmpereOrNewer, + KeplerOrOlder, + Maxwell, + Pascal, + Volta, Turing, - VoltaOrOlder, + AmpereOrNewer, }; template <typename T> @@ -110,6 +118,8 @@ constexpr const VkFormat* GetFormatAlternatives(VkFormat format) { return Alternatives::R8G8B8_SSCALED.data(); case VK_FORMAT_R32G32B32_SFLOAT: return Alternatives::VK_FORMAT_R32G32B32_SFLOAT.data(); + case VK_FORMAT_A4B4G4R4_UNORM_PACK16_EXT: + return Alternatives::VK_FORMAT_A4B4G4R4_UNORM_PACK16.data(); default: return nullptr; } @@ -193,6 +203,7 @@ std::unordered_map<VkFormat, VkFormatProperties> GetFormatProperties(vk::Physica VK_FORMAT_BC7_UNORM_BLOCK, VK_FORMAT_D16_UNORM, VK_FORMAT_D16_UNORM_S8_UINT, + VK_FORMAT_X8_D24_UNORM_PACK32, VK_FORMAT_D24_UNORM_S8_UINT, VK_FORMAT_D32_SFLOAT, VK_FORMAT_D32_SFLOAT_S8_UINT, @@ -238,6 +249,7 @@ std::unordered_map<VkFormat, VkFormatProperties> GetFormatProperties(vk::Physica VK_FORMAT_R32_SINT, VK_FORMAT_R32_UINT, VK_FORMAT_R4G4B4A4_UNORM_PACK16, + VK_FORMAT_A4B4G4R4_UNORM_PACK16_EXT, VK_FORMAT_R4G4_UNORM_PACK8, VK_FORMAT_R5G5B5A1_UNORM_PACK16, VK_FORMAT_R5G6B5_UNORM_PACK16, @@ -313,13 +325,38 @@ NvidiaArchitecture GetNvidiaArchitecture(vk::PhysicalDevice physical, physical.GetProperties2(physical_properties); if (shading_rate_props.primitiveFragmentShadingRateWithMultipleViewports) { // Only Ampere and newer support this feature + // TODO: Find a way to differentiate Ampere and Ada return NvidiaArchitecture::AmpereOrNewer; } - } - if (exts.contains(VK_NV_SHADING_RATE_IMAGE_EXTENSION_NAME)) { return NvidiaArchitecture::Turing; } - return NvidiaArchitecture::VoltaOrOlder; + + if (exts.contains(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME)) { + VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT advanced_blending_props{}; + advanced_blending_props.sType = + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT; + VkPhysicalDeviceProperties2 physical_properties{}; + physical_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2; + physical_properties.pNext = &advanced_blending_props; + physical.GetProperties2(physical_properties); + if (advanced_blending_props.advancedBlendMaxColorAttachments == 1) { + return NvidiaArchitecture::Maxwell; + } + + if (exts.contains(VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME)) { + VkPhysicalDeviceConservativeRasterizationPropertiesEXT conservative_raster_props{}; + conservative_raster_props.sType = + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT; + physical_properties.pNext = &conservative_raster_props; + physical.GetProperties2(physical_properties); + if (conservative_raster_props.degenerateLinesRasterized) { + return NvidiaArchitecture::Volta; + } + return NvidiaArchitecture::Pascal; + } + } + + return NvidiaArchitecture::KeplerOrOlder; } std::vector<const char*> ExtensionListForVulkan( @@ -420,7 +457,8 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR first_next = &diagnostics_nv; } - is_blit_depth_stencil_supported = TestDepthStencilBlits(); + is_blit_depth24_stencil8_supported = TestDepthStencilBlits(VK_FORMAT_D24_UNORM_S8_UINT); + is_blit_depth32_stencil8_supported = TestDepthStencilBlits(VK_FORMAT_D32_SFLOAT_S8_UINT); is_optimal_astc_supported = ComputeIsOptimalAstcSupported(); is_warp_potentially_bigger = !extensions.subgroup_size_control || properties.subgroup_size_control.maxSubgroupSize > GuestWarpSize; @@ -495,19 +533,14 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR if (is_nvidia) { const u32 nv_major_version = (properties.properties.driverVersion >> 22) & 0x3ff; const auto arch = GetNvidiaArchitecture(physical, supported_extensions); - switch (arch) { - case NvidiaArchitecture::AmpereOrNewer: + if (arch >= NvidiaArchitecture::AmpereOrNewer) { LOG_WARNING(Render_Vulkan, "Ampere and newer have broken float16 math"); features.shader_float16_int8.shaderFloat16 = false; - break; - case NvidiaArchitecture::Turing: - break; - case NvidiaArchitecture::VoltaOrOlder: + } else if (arch <= NvidiaArchitecture::Volta) { if (nv_major_version < 527) { LOG_WARNING(Render_Vulkan, "Volta and older have broken VK_KHR_push_descriptor"); RemoveExtension(extensions.push_descriptor, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } - break; } if (nv_major_version >= 510) { LOG_WARNING(Render_Vulkan, "NVIDIA Drivers >= 510 do not support MSAA image blits"); @@ -652,7 +685,15 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR "ANV drivers 22.3.0 to 23.1.0 have broken VK_KHR_push_descriptor"); RemoveExtension(extensions.push_descriptor, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } + } else if (extensions.push_descriptor && is_nvidia) { + const auto arch = GetNvidiaArchitecture(physical, supported_extensions); + if (arch <= NvidiaArchitecture::Pascal) { + LOG_WARNING(Render_Vulkan, + "Pascal and older architectures have broken VK_KHR_push_descriptor"); + RemoveExtension(extensions.push_descriptor, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); + } } + if (is_mvk) { LOG_WARNING(Render_Vulkan, "MVK driver breaks when using more than 16 vertex attributes/bindings"); @@ -774,14 +815,13 @@ bool Device::ComputeIsOptimalAstcSupported() const { return true; } -bool Device::TestDepthStencilBlits() const { +bool Device::TestDepthStencilBlits(VkFormat format) const { static constexpr VkFormatFeatureFlags required_features = VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT; const auto test_features = [](VkFormatProperties props) { return (props.optimalTilingFeatures & required_features) == required_features; }; - return test_features(format_properties.at(VK_FORMAT_D32_SFLOAT_S8_UINT)) && - test_features(format_properties.at(VK_FORMAT_D24_UNORM_S8_UINT)); + return test_features(format_properties.at(format)); } bool Device::IsFormatSupported(VkFormat wanted_format, VkFormatFeatureFlags wanted_usage, @@ -1051,6 +1091,13 @@ void Device::RemoveUnsuitableExtensions() { RemoveExtensionFeatureIfUnsuitable(extensions.custom_border_color, features.custom_border_color, VK_EXT_CUSTOM_BORDER_COLOR_EXTENSION_NAME); + // VK_EXT_depth_bias_control + extensions.depth_bias_control = + features.depth_bias_control.depthBiasControl && + features.depth_bias_control.leastRepresentableValueForceUnormRepresentation; + RemoveExtensionFeatureIfUnsuitable(extensions.depth_bias_control, features.depth_bias_control, + VK_EXT_DEPTH_BIAS_CONTROL_EXTENSION_NAME); + // VK_EXT_depth_clip_control extensions.depth_clip_control = features.depth_clip_control.depthClipControl; RemoveExtensionFeatureIfUnsuitable(extensions.depth_clip_control, features.depth_clip_control, diff --git a/src/video_core/vulkan_common/vulkan_device.h b/src/video_core/vulkan_common/vulkan_device.h index 488fdd313..282a2925d 100644 --- a/src/video_core/vulkan_common/vulkan_device.h +++ b/src/video_core/vulkan_common/vulkan_device.h @@ -41,10 +41,12 @@ VK_DEFINE_HANDLE(VmaAllocator) // Define all features which may be used by the implementation and require an extension here. #define FOR_EACH_VK_FEATURE_EXT(FEATURE) \ FEATURE(EXT, CustomBorderColor, CUSTOM_BORDER_COLOR, custom_border_color) \ + FEATURE(EXT, DepthBiasControl, DEPTH_BIAS_CONTROL, depth_bias_control) \ FEATURE(EXT, DepthClipControl, DEPTH_CLIP_CONTROL, depth_clip_control) \ FEATURE(EXT, ExtendedDynamicState, EXTENDED_DYNAMIC_STATE, extended_dynamic_state) \ FEATURE(EXT, ExtendedDynamicState2, EXTENDED_DYNAMIC_STATE_2, extended_dynamic_state2) \ FEATURE(EXT, ExtendedDynamicState3, EXTENDED_DYNAMIC_STATE_3, extended_dynamic_state3) \ + FEATURE(EXT, 4444Formats, 4444_FORMATS, format_a4b4g4r4) \ FEATURE(EXT, IndexTypeUint8, INDEX_TYPE_UINT8, index_type_uint8) \ FEATURE(EXT, LineRasterization, LINE_RASTERIZATION, line_rasterization) \ FEATURE(EXT, PrimitiveTopologyListRestart, PRIMITIVE_TOPOLOGY_LIST_RESTART, \ @@ -60,6 +62,7 @@ VK_DEFINE_HANDLE(VmaAllocator) // Define miscellaneous extensions which may be used by the implementation here. #define FOR_EACH_VK_EXTENSION(EXTENSION) \ + EXTENSION(EXT, CONDITIONAL_RENDERING, conditional_rendering) \ EXTENSION(EXT, CONSERVATIVE_RASTERIZATION, conservative_rasterization) \ EXTENSION(EXT, DEPTH_RANGE_UNRESTRICTED, depth_range_unrestricted) \ EXTENSION(EXT, MEMORY_BUDGET, memory_budget) \ @@ -92,11 +95,14 @@ VK_DEFINE_HANDLE(VmaAllocator) // Define extensions where the absence of the extension may result in a degraded experience. #define FOR_EACH_VK_RECOMMENDED_EXTENSION(EXTENSION_NAME) \ + EXTENSION_NAME(VK_EXT_CONDITIONAL_RENDERING_EXTENSION_NAME) \ EXTENSION_NAME(VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME) \ + EXTENSION_NAME(VK_EXT_DEPTH_BIAS_CONTROL_EXTENSION_NAME) \ EXTENSION_NAME(VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME) \ EXTENSION_NAME(VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME) \ EXTENSION_NAME(VK_EXT_EXTENDED_DYNAMIC_STATE_2_EXTENSION_NAME) \ EXTENSION_NAME(VK_EXT_EXTENDED_DYNAMIC_STATE_3_EXTENSION_NAME) \ + EXTENSION_NAME(VK_EXT_4444_FORMATS_EXTENSION_NAME) \ EXTENSION_NAME(VK_EXT_LINE_RASTERIZATION_EXTENSION_NAME) \ EXTENSION_NAME(VK_EXT_ROBUSTNESS_2_EXTENSION_NAME) \ EXTENSION_NAME(VK_EXT_VERTEX_INPUT_DYNAMIC_STATE_EXTENSION_NAME) \ @@ -143,7 +149,11 @@ VK_DEFINE_HANDLE(VmaAllocator) // Define features where the absence of the feature may result in a degraded experience. #define FOR_EACH_VK_RECOMMENDED_FEATURE(FEATURE_NAME) \ FEATURE_NAME(custom_border_color, customBorderColors) \ + FEATURE_NAME(depth_bias_control, depthBiasControl) \ + FEATURE_NAME(depth_bias_control, leastRepresentableValueForceUnormRepresentation) \ + FEATURE_NAME(depth_bias_control, depthBiasExact) \ FEATURE_NAME(extended_dynamic_state, extendedDynamicState) \ + FEATURE_NAME(format_a4b4g4r4, formatA4B4G4R4) \ FEATURE_NAME(index_type_uint8, indexTypeUint8) \ FEATURE_NAME(primitive_topology_list_restart, primitiveTopologyListRestart) \ FEATURE_NAME(provoking_vertex, provokingVertexLast) \ @@ -304,7 +314,7 @@ public: return GetDriverID() != VK_DRIVER_ID_QUALCOMM_PROPRIETARY; } - /// Returns true if the device suppors float64 natively. + /// Returns true if the device supports float64 natively. bool IsFloat64Supported() const { return features.features.shaderFloat64; } @@ -319,6 +329,11 @@ public: return features.shader_float16_int8.shaderInt8; } + /// Returns true if the device supports binding multisample images as storage images. + bool IsStorageImageMultisampleSupported() const { + return features.features.shaderStorageImageMultisample; + } + /// Returns true if the device warp size can potentially be bigger than guest's warp size. bool IsWarpSizePotentiallyBiggerThanGuest() const { return is_warp_potentially_bigger; @@ -359,9 +374,14 @@ public: return features.features.depthBounds; } - /// Returns true when blitting from and to depth stencil images is supported. - bool IsBlitDepthStencilSupported() const { - return is_blit_depth_stencil_supported; + /// Returns true when blitting from and to D24S8 images is supported. + bool IsBlitDepth24Stencil8Supported() const { + return is_blit_depth24_stencil8_supported; + } + + /// Returns true when blitting from and to D32S8 images is supported. + bool IsBlitDepth32Stencil8Supported() const { + return is_blit_depth32_stencil8_supported; } /// Returns true if the device supports VK_NV_viewport_swizzle. @@ -449,6 +469,11 @@ public: return extensions.depth_clip_control; } + /// Returns true if the device supports VK_EXT_depth_bias_control. + bool IsExtDepthBiasControlSupported() const { + return extensions.depth_bias_control; + } + /// Returns true if the device supports VK_EXT_shader_viewport_index_layer. bool IsExtShaderViewportIndexLayerSupported() const { return extensions.shader_viewport_index_layer; @@ -488,6 +513,11 @@ public: return extensions.extended_dynamic_state3; } + /// Returns true if the device supports VK_EXT_4444_formats. + bool IsExt4444FormatsSupported() const { + return features.format_a4b4g4r4.formatA4B4G4R4; + } + /// Returns true if the device supports VK_EXT_extended_dynamic_state3. bool IsExtExtendedDynamicState3BlendingSupported() const { return dynamic_state3_blending; @@ -528,6 +558,10 @@ public: return extensions.shader_atomic_int64; } + bool IsExtConditionalRendering() const { + return extensions.conditional_rendering; + } + bool HasTimelineSemaphore() const; /// Returns the minimum supported version of SPIR-V. @@ -600,6 +634,10 @@ public: return features.robustness2.nullDescriptor; } + bool HasExactDepthBiasControl() const { + return features.depth_bias_control.depthBiasExact; + } + u32 GetMaxVertexInputAttributes() const { return properties.properties.limits.maxVertexInputAttributes; } @@ -666,7 +704,7 @@ private: bool ComputeIsOptimalAstcSupported() const; /// Returns true if the device natively supports blitting depth stencil images. - bool TestDepthStencilBlits() const; + bool TestDepthStencilBlits(VkFormat format) const; private: VkInstance instance; ///< Vulkan instance. @@ -730,25 +768,26 @@ private: VkPhysicalDeviceProperties2 properties2{}; // Misc features - bool is_optimal_astc_supported{}; ///< Support for all guest ASTC formats. - bool is_blit_depth_stencil_supported{}; ///< Support for blitting from and to depth stencil. - bool is_warp_potentially_bigger{}; ///< Host warp size can be bigger than guest. - bool is_integrated{}; ///< Is GPU an iGPU. - bool is_virtual{}; ///< Is GPU a virtual GPU. - bool is_non_gpu{}; ///< Is SoftwareRasterizer, FPGA, non-GPU device. - bool has_broken_compute{}; ///< Compute shaders can cause crashes - bool has_broken_cube_compatibility{}; ///< Has broken cube compatibility bit - bool has_renderdoc{}; ///< Has RenderDoc attached - bool has_nsight_graphics{}; ///< Has Nsight Graphics attached - bool supports_d24_depth{}; ///< Supports D24 depth buffers. - bool cant_blit_msaa{}; ///< Does not support MSAA<->MSAA blitting. - bool must_emulate_scaled_formats{}; ///< Requires scaled vertex format emulation - bool must_emulate_bgr565{}; ///< Emulates BGR565 by swizzling RGB565 format. - bool dynamic_state3_blending{}; ///< Has all blending features of dynamic_state3. - bool dynamic_state3_enables{}; ///< Has all enables features of dynamic_state3. - bool supports_conditional_barriers{}; ///< Allows barriers in conditional control flow. - u64 device_access_memory{}; ///< Total size of device local memory in bytes. - u32 sets_per_pool{}; ///< Sets per Description Pool + bool is_optimal_astc_supported{}; ///< Support for all guest ASTC formats. + bool is_blit_depth24_stencil8_supported{}; ///< Support for blitting from and to D24S8. + bool is_blit_depth32_stencil8_supported{}; ///< Support for blitting from and to D32S8. + bool is_warp_potentially_bigger{}; ///< Host warp size can be bigger than guest. + bool is_integrated{}; ///< Is GPU an iGPU. + bool is_virtual{}; ///< Is GPU a virtual GPU. + bool is_non_gpu{}; ///< Is SoftwareRasterizer, FPGA, non-GPU device. + bool has_broken_compute{}; ///< Compute shaders can cause crashes + bool has_broken_cube_compatibility{}; ///< Has broken cube compatibility bit + bool has_renderdoc{}; ///< Has RenderDoc attached + bool has_nsight_graphics{}; ///< Has Nsight Graphics attached + bool supports_d24_depth{}; ///< Supports D24 depth buffers. + bool cant_blit_msaa{}; ///< Does not support MSAA<->MSAA blitting. + bool must_emulate_scaled_formats{}; ///< Requires scaled vertex format emulation + bool must_emulate_bgr565{}; ///< Emulates BGR565 by swizzling RGB565 format. + bool dynamic_state3_blending{}; ///< Has all blending features of dynamic_state3. + bool dynamic_state3_enables{}; ///< Has all enables features of dynamic_state3. + bool supports_conditional_barriers{}; ///< Allows barriers in conditional control flow. + u64 device_access_memory{}; ///< Total size of device local memory in bytes. + u32 sets_per_pool{}; ///< Sets per Description Pool // Telemetry parameters std::set<std::string, std::less<>> supported_extensions; ///< Reported Vulkan extensions. diff --git a/src/video_core/vulkan_common/vulkan_memory_allocator.cpp b/src/video_core/vulkan_common/vulkan_memory_allocator.cpp index 3ef381a38..82767fdf0 100644 --- a/src/video_core/vulkan_common/vulkan_memory_allocator.cpp +++ b/src/video_core/vulkan_common/vulkan_memory_allocator.cpp @@ -9,6 +9,7 @@ #include "common/alignment.h" #include "common/assert.h" #include "common/common_types.h" +#include "common/literals.h" #include "common/logging/log.h" #include "common/polyfill_ranges.h" #include "video_core/vulkan_common/vma.h" @@ -69,8 +70,7 @@ struct Range { case MemoryUsage::Download: return VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT; case MemoryUsage::DeviceLocal: - return VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | - VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT; + return {}; } return {}; } @@ -212,7 +212,20 @@ MemoryAllocator::MemoryAllocator(const Device& device_) : device{device_}, allocator{device.GetAllocator()}, properties{device_.GetPhysical().GetMemoryProperties().memoryProperties}, buffer_image_granularity{ - device_.GetPhysical().GetProperties().limits.bufferImageGranularity} {} + device_.GetPhysical().GetProperties().limits.bufferImageGranularity} { + // GPUs not supporting rebar may only have a region with less than 256MB host visible/device + // local memory. In that case, opening 2 RenderDoc captures side-by-side is not possible due to + // the heap running out of memory. With RenderDoc attached and only a small host/device region, + // only allow the stream buffer in this memory heap. + if (device.HasDebuggingToolAttached()) { + using namespace Common::Literals; + ForEachDeviceLocalHostVisibleHeap(device, [this](size_t index, VkMemoryHeap& heap) { + if (heap.size <= 256_MiB) { + valid_memory_types &= ~(1u << index); + } + }); + } +} MemoryAllocator::~MemoryAllocator() = default; @@ -244,7 +257,7 @@ vk::Buffer MemoryAllocator::CreateBuffer(const VkBufferCreateInfo& ci, MemoryUsa .usage = MemoryUsageVma(usage), .requiredFlags = 0, .preferredFlags = MemoryUsagePreferedVmaFlags(usage), - .memoryTypeBits = 0, + .memoryTypeBits = usage == MemoryUsage::Stream ? 0u : valid_memory_types, .pool = VK_NULL_HANDLE, .pUserData = nullptr, .priority = 0.f, diff --git a/src/video_core/vulkan_common/vulkan_memory_allocator.h b/src/video_core/vulkan_common/vulkan_memory_allocator.h index f449bc8d0..38a182bcb 100644 --- a/src/video_core/vulkan_common/vulkan_memory_allocator.h +++ b/src/video_core/vulkan_common/vulkan_memory_allocator.h @@ -7,6 +7,7 @@ #include <span> #include <vector> #include "common/common_types.h" +#include "video_core/vulkan_common/vulkan_device.h" #include "video_core/vulkan_common/vulkan_wrapper.h" VK_DEFINE_HANDLE(VmaAllocator) @@ -26,6 +27,18 @@ enum class MemoryUsage { Stream, ///< Requests device local host visible buffer, falling back host memory. }; +template <typename F> +void ForEachDeviceLocalHostVisibleHeap(const Device& device, F&& f) { + auto memory_props = device.GetPhysical().GetMemoryProperties().memoryProperties; + for (size_t i = 0; i < memory_props.memoryTypeCount; i++) { + auto& memory_type = memory_props.memoryTypes[i]; + if ((memory_type.propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) && + (memory_type.propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)) { + f(memory_type.heapIndex, memory_props.memoryHeaps[memory_type.heapIndex]); + } + } +} + /// Ownership handle of a memory commitment. /// Points to a subregion of a memory allocation. class MemoryCommit { @@ -124,6 +137,7 @@ private: std::vector<std::unique_ptr<MemoryAllocation>> allocations; ///< Current allocations. VkDeviceSize buffer_image_granularity; // The granularity for adjacent offsets between buffers // and optimal images + u32 valid_memory_types{~0u}; }; } // namespace Vulkan diff --git a/src/video_core/vulkan_common/vulkan_wrapper.cpp b/src/video_core/vulkan_common/vulkan_wrapper.cpp index c3f388d89..2f3254a97 100644 --- a/src/video_core/vulkan_common/vulkan_wrapper.cpp +++ b/src/video_core/vulkan_common/vulkan_wrapper.cpp @@ -75,6 +75,7 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept { X(vkBeginCommandBuffer); X(vkBindBufferMemory); X(vkBindImageMemory); + X(vkCmdBeginConditionalRenderingEXT); X(vkCmdBeginQuery); X(vkCmdBeginRenderPass); X(vkCmdBeginTransformFeedbackEXT); @@ -91,6 +92,7 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept { X(vkCmdCopyBufferToImage); X(vkCmdCopyImage); X(vkCmdCopyImageToBuffer); + X(vkCmdCopyQueryPoolResults); X(vkCmdDispatch); X(vkCmdDispatchIndirect); X(vkCmdDraw); @@ -99,6 +101,8 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept { X(vkCmdDrawIndexedIndirect); X(vkCmdDrawIndirectCount); X(vkCmdDrawIndexedIndirectCount); + X(vkCmdDrawIndirectByteCountEXT); + X(vkCmdEndConditionalRenderingEXT); X(vkCmdEndQuery); X(vkCmdEndRenderPass); X(vkCmdEndTransformFeedbackEXT); @@ -109,6 +113,7 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept { X(vkCmdPushDescriptorSetWithTemplateKHR); X(vkCmdSetBlendConstants); X(vkCmdSetDepthBias); + X(vkCmdSetDepthBias2EXT); X(vkCmdSetDepthBounds); X(vkCmdSetEvent); X(vkCmdSetScissor); diff --git a/src/video_core/vulkan_common/vulkan_wrapper.h b/src/video_core/vulkan_common/vulkan_wrapper.h index 049fa8038..0487cd3b6 100644 --- a/src/video_core/vulkan_common/vulkan_wrapper.h +++ b/src/video_core/vulkan_common/vulkan_wrapper.h @@ -117,6 +117,9 @@ public: virtual ~Exception() = default; const char* what() const noexcept override; + VkResult GetResult() const noexcept { + return result; + } private: VkResult result; @@ -185,6 +188,7 @@ struct DeviceDispatch : InstanceDispatch { PFN_vkBeginCommandBuffer vkBeginCommandBuffer{}; PFN_vkBindBufferMemory vkBindBufferMemory{}; PFN_vkBindImageMemory vkBindImageMemory{}; + PFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT{}; PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT{}; PFN_vkCmdBeginQuery vkCmdBeginQuery{}; PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass{}; @@ -202,6 +206,7 @@ struct DeviceDispatch : InstanceDispatch { PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage{}; PFN_vkCmdCopyImage vkCmdCopyImage{}; PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer{}; + PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults{}; PFN_vkCmdDispatch vkCmdDispatch{}; PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect{}; PFN_vkCmdDraw vkCmdDraw{}; @@ -210,6 +215,8 @@ struct DeviceDispatch : InstanceDispatch { PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect{}; PFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount{}; PFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount{}; + PFN_vkCmdDrawIndirectByteCountEXT vkCmdDrawIndirectByteCountEXT{}; + PFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT{}; PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT{}; PFN_vkCmdEndQuery vkCmdEndQuery{}; PFN_vkCmdEndRenderPass vkCmdEndRenderPass{}; @@ -222,6 +229,7 @@ struct DeviceDispatch : InstanceDispatch { PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants{}; PFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT{}; PFN_vkCmdSetDepthBias vkCmdSetDepthBias{}; + PFN_vkCmdSetDepthBias2EXT vkCmdSetDepthBias2EXT{}; PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds{}; PFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT{}; PFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT{}; @@ -1182,6 +1190,13 @@ public: count_offset, draw_count, stride); } + void DrawIndirectByteCountEXT(u32 instance_count, u32 first_instance, VkBuffer counter_buffer, + VkDeviceSize counter_buffer_offset, u32 counter_offset, + u32 stride) { + dld->vkCmdDrawIndirectByteCountEXT(handle, instance_count, first_instance, counter_buffer, + counter_buffer_offset, counter_offset, stride); + } + void ClearAttachments(Span<VkClearAttachment> attachments, Span<VkClearRect> rects) const noexcept { dld->vkCmdClearAttachments(handle, attachments.size(), attachments.data(), rects.size(), @@ -1270,6 +1285,13 @@ public: regions.data()); } + void CopyQueryPoolResults(VkQueryPool query_pool, u32 first_query, u32 query_count, + VkBuffer dst_buffer, VkDeviceSize dst_offset, VkDeviceSize stride, + VkQueryResultFlags flags) const noexcept { + dld->vkCmdCopyQueryPoolResults(handle, query_pool, first_query, query_count, dst_buffer, + dst_offset, stride, flags); + } + void FillBuffer(VkBuffer dst_buffer, VkDeviceSize dst_offset, VkDeviceSize size, u32 data) const noexcept { dld->vkCmdFillBuffer(handle, dst_buffer, dst_offset, size, data); @@ -1315,6 +1337,18 @@ public: dld->vkCmdSetDepthBias(handle, constant_factor, clamp, slope_factor); } + void SetDepthBias(float constant_factor, float clamp, float slope_factor, + VkDepthBiasRepresentationInfoEXT* extra) const noexcept { + VkDepthBiasInfoEXT info{ + .sType = VK_STRUCTURE_TYPE_DEPTH_BIAS_INFO_EXT, + .pNext = extra, + .depthBiasConstantFactor = constant_factor, + .depthBiasClamp = clamp, + .depthBiasSlopeFactor = slope_factor, + }; + dld->vkCmdSetDepthBias2EXT(handle, &info); + } + void SetDepthBounds(float min_depth_bounds, float max_depth_bounds) const noexcept { dld->vkCmdSetDepthBounds(handle, min_depth_bounds, max_depth_bounds); } @@ -1448,6 +1482,15 @@ public: counter_buffers, counter_buffer_offsets); } + void BeginConditionalRenderingEXT( + const VkConditionalRenderingBeginInfoEXT& info) const noexcept { + dld->vkCmdBeginConditionalRenderingEXT(handle, &info); + } + + void EndConditionalRenderingEXT() const noexcept { + dld->vkCmdEndConditionalRenderingEXT(handle); + } + void BeginDebugUtilsLabelEXT(const char* label, std::span<float, 4> color) const noexcept { const VkDebugUtilsLabelEXT label_info{ .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT, |