diff options
Diffstat (limited to 'src/video_core')
21 files changed, 501 insertions, 107 deletions
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index a0009a36f..308d013d6 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -246,10 +246,14 @@ add_library(video_core STATIC texture_cache/util.h textures/astc.h textures/astc.cpp + textures/bcn.cpp + textures/bcn.h textures/decoders.cpp textures/decoders.h textures/texture.cpp textures/texture.h + textures/workers.cpp + textures/workers.h transform_feedback.cpp transform_feedback.h video_core.cpp @@ -275,7 +279,7 @@ add_library(video_core STATIC create_target_directory_groups(video_core) target_link_libraries(video_core PUBLIC common core) -target_link_libraries(video_core PUBLIC glad shader_recompiler) +target_link_libraries(video_core PUBLIC glad shader_recompiler stb) if (YUZU_USE_BUNDLED_FFMPEG AND NOT WIN32) add_dependencies(video_core ffmpeg-build) diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 98756e4da..65494097b 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -30,8 +30,8 @@ BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_, } const s64 device_memory = static_cast<s64>(runtime.GetDeviceLocalMemory()); - const s64 min_spacing_expected = device_memory - 1_GiB - 512_MiB; - const s64 min_spacing_critical = device_memory - 1_GiB; + const s64 min_spacing_expected = device_memory - 1_GiB; + const s64 min_spacing_critical = device_memory - 512_MiB; const s64 mem_threshold = std::min(device_memory, TARGET_THRESHOLD); const s64 min_vacancy_expected = (6 * mem_threshold) / 10; const s64 min_vacancy_critical = (3 * mem_threshold) / 10; @@ -1664,7 +1664,7 @@ typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr s // cbufs, which do not store the sizes adjacent to the addresses, so use the fully // mapped buffer size for now. const u32 memory_layout_size = static_cast<u32>(gpu_memory->GetMemoryLayoutSize(gpu_addr)); - return memory_layout_size; + return std::min(memory_layout_size, static_cast<u32>(8_MiB)); }(); const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr); if (!cpu_addr || size == 0) { diff --git a/src/video_core/renderer_opengl/gl_texture_cache.cpp b/src/video_core/renderer_opengl/gl_texture_cache.cpp index 31118886f..1e0823836 100644 --- a/src/video_core/renderer_opengl/gl_texture_cache.cpp +++ b/src/video_core/renderer_opengl/gl_texture_cache.cpp @@ -233,6 +233,8 @@ void ApplySwizzle(GLuint handle, PixelFormat format, std::array<SwizzleSource, 4 const VideoCommon::ImageInfo& info) { if (IsPixelFormatASTC(info.format) && info.size.depth == 1 && !runtime.HasNativeASTC()) { return Settings::values.accelerate_astc.GetValue() && + Settings::values.astc_recompression.GetValue() == + Settings::AstcRecompression::Uncompressed && !Settings::values.async_astc.GetValue(); } // Disable other accelerated uploads for now as they don't implement swizzled uploads @@ -437,6 +439,19 @@ OGLTexture MakeImage(const VideoCommon::ImageInfo& info, GLenum gl_internal_form return GL_R32UI; } +[[nodiscard]] GLenum SelectAstcFormat(PixelFormat format, bool is_srgb) { + switch (Settings::values.astc_recompression.GetValue()) { + case Settings::AstcRecompression::Bc1: + return is_srgb ? GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT : GL_COMPRESSED_RGBA_S3TC_DXT1_EXT; + break; + case Settings::AstcRecompression::Bc3: + return is_srgb ? GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT : GL_COMPRESSED_RGBA_S3TC_DXT5_EXT; + break; + default: + return is_srgb ? GL_SRGB8_ALPHA8 : GL_RGBA8; + } +} + } // Anonymous namespace ImageBufferMap::~ImageBufferMap() { @@ -739,9 +754,16 @@ Image::Image(TextureCacheRuntime& runtime_, const VideoCommon::ImageInfo& info_, if (IsConverted(runtime->device, info.format, info.type)) { flags |= ImageFlagBits::Converted; flags |= ImageFlagBits::CostlyLoad; - gl_internal_format = IsPixelFormatSRGB(info.format) ? GL_SRGB8_ALPHA8 : GL_RGBA8; + + const bool is_srgb = IsPixelFormatSRGB(info.format); + gl_internal_format = is_srgb ? GL_SRGB8_ALPHA8 : GL_RGBA8; gl_format = GL_RGBA; gl_type = GL_UNSIGNED_INT_8_8_8_8_REV; + + if (IsPixelFormatASTC(info.format)) { + gl_internal_format = SelectAstcFormat(info.format, is_srgb); + gl_format = GL_NONE; + } } else { const auto& tuple = MaxwellToGL::GetFormatTuple(info.format); gl_internal_format = tuple.internal_format; @@ -1130,7 +1152,12 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI views{runtime.null_image_views} { const Device& device = runtime.device; if (True(image.flags & ImageFlagBits::Converted)) { - internal_format = IsPixelFormatSRGB(info.format) ? GL_SRGB8_ALPHA8 : GL_RGBA8; + const bool is_srgb = IsPixelFormatSRGB(info.format); + internal_format = is_srgb ? GL_SRGB8_ALPHA8 : GL_RGBA8; + + if (IsPixelFormatASTC(info.format)) { + internal_format = SelectAstcFormat(info.format, is_srgb); + } } else { internal_format = MaxwellToGL::GetFormatTuple(format).internal_format; } diff --git a/src/video_core/renderer_opengl/gl_texture_cache.h b/src/video_core/renderer_opengl/gl_texture_cache.h index 1190999a8..3e9b3302b 100644 --- a/src/video_core/renderer_opengl/gl_texture_cache.h +++ b/src/video_core/renderer_opengl/gl_texture_cache.h @@ -144,6 +144,10 @@ public: return state_tracker; } + void BarrierFeedbackLoop() const noexcept { + // OpenGL does not require a barrier for attachment feedback loops. + } + private: struct StagingBuffers { explicit StagingBuffers(GLenum storage_flags_, GLenum map_flags_); diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp index 8853cf0f7..b75d7220d 100644 --- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp +++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp @@ -6,6 +6,7 @@ #include "common/assert.h" #include "common/common_types.h" #include "common/logging/log.h" +#include "common/settings.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/renderer_vulkan/maxwell_to_vk.h" #include "video_core/surface.h" @@ -237,14 +238,25 @@ FormatInfo SurfaceFormat(const Device& device, FormatType format_type, bool with PixelFormat pixel_format) { ASSERT(static_cast<size_t>(pixel_format) < std::size(tex_format_tuples)); FormatTuple tuple = tex_format_tuples[static_cast<size_t>(pixel_format)]; - // Use A8B8G8R8_UNORM on hardware that doesn't support ASTC natively + // Transcode on hardware that doesn't support ASTC natively if (!device.IsOptimalAstcSupported() && VideoCore::Surface::IsPixelFormatASTC(pixel_format)) { const bool is_srgb = with_srgb && VideoCore::Surface::IsPixelFormatSRGB(pixel_format); - if (is_srgb) { - tuple.format = VK_FORMAT_A8B8G8R8_SRGB_PACK32; - } else { - tuple.format = VK_FORMAT_A8B8G8R8_UNORM_PACK32; - tuple.usage |= Storage; + + switch (Settings::values.astc_recompression.GetValue()) { + case Settings::AstcRecompression::Uncompressed: + if (is_srgb) { + tuple.format = VK_FORMAT_A8B8G8R8_SRGB_PACK32; + } else { + tuple.format = VK_FORMAT_A8B8G8R8_UNORM_PACK32; + tuple.usage |= Storage; + } + break; + case Settings::AstcRecompression::Bc1: + tuple.format = is_srgb ? VK_FORMAT_BC1_RGBA_SRGB_BLOCK : VK_FORMAT_BC1_RGBA_UNORM_BLOCK; + break; + case Settings::AstcRecompression::Bc3: + tuple.format = is_srgb ? VK_FORMAT_BC3_SRGB_BLOCK : VK_FORMAT_BC3_UNORM_BLOCK; + break; } } const bool attachable = (tuple.usage & Attachable) != 0; diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp index f1bcd5cd6..506b78f08 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp @@ -481,12 +481,13 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) { if constexpr (Spec::enabled_stages[4]) { prepare_stage(4); } + texture_cache.UpdateRenderTargets(false); + texture_cache.CheckFeedbackLoop(views); ConfigureDraw(rescaling, render_area); } void GraphicsPipeline::ConfigureDraw(const RescalingPushConstant& rescaling, const RenderAreaPushConstant& render_area) { - texture_cache.UpdateRenderTargets(false); scheduler.RequestRenderpass(texture_cache.GetFramebuffer()); if (!is_built.load(std::memory_order::relaxed)) { diff --git a/src/video_core/renderer_vulkan/vk_master_semaphore.cpp b/src/video_core/renderer_vulkan/vk_master_semaphore.cpp index 47c74e4d8..8b65aeaeb 100644 --- a/src/video_core/renderer_vulkan/vk_master_semaphore.cpp +++ b/src/video_core/renderer_vulkan/vk_master_semaphore.cpp @@ -10,11 +10,16 @@ namespace Vulkan { +constexpr u64 FENCE_RESERVE_SIZE = 8; + MasterSemaphore::MasterSemaphore(const Device& device_) : device(device_) { if (!device.HasTimelineSemaphore()) { static constexpr VkFenceCreateInfo fence_ci{ .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, .pNext = nullptr, .flags = 0}; - fence = device.GetLogical().CreateFence(fence_ci); + free_queue.resize(FENCE_RESERVE_SIZE); + std::ranges::generate(free_queue, + [&] { return device.GetLogical().CreateFence(fence_ci); }); + wait_thread = std::jthread([this](std::stop_token token) { WaitThread(token); }); return; } @@ -167,16 +172,53 @@ VkResult MasterSemaphore::SubmitQueueFence(vk::CommandBuffer& cmdbuf, VkSemaphor .pSignalSemaphores = &signal_semaphore, }; + auto fence = GetFreeFence(); auto result = device.GetGraphicsQueue().Submit(submit_info, *fence); if (result == VK_SUCCESS) { + std::scoped_lock lock{wait_mutex}; + wait_queue.emplace(host_tick, std::move(fence)); + wait_cv.notify_one(); + } + + return result; +} + +void MasterSemaphore::WaitThread(std::stop_token token) { + while (!token.stop_requested()) { + u64 host_tick; + vk::Fence fence; + { + std::unique_lock lock{wait_mutex}; + Common::CondvarWait(wait_cv, lock, token, [this] { return !wait_queue.empty(); }); + if (token.stop_requested()) { + return; + } + std::tie(host_tick, fence) = std::move(wait_queue.front()); + wait_queue.pop(); + } + fence.Wait(); fence.Reset(); gpu_tick.store(host_tick); gpu_tick.notify_all(); + + std::scoped_lock lock{free_mutex}; + free_queue.push_front(std::move(fence)); } +} - return result; +vk::Fence MasterSemaphore::GetFreeFence() { + std::scoped_lock lock{free_mutex}; + if (free_queue.empty()) { + static constexpr VkFenceCreateInfo fence_ci{ + .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, .pNext = nullptr, .flags = 0}; + return device.GetLogical().CreateFence(fence_ci); + } + + auto fence = std::move(free_queue.back()); + free_queue.pop_back(); + return fence; } } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_master_semaphore.h b/src/video_core/renderer_vulkan/vk_master_semaphore.h index f2f61f781..1e7c90215 100644 --- a/src/video_core/renderer_vulkan/vk_master_semaphore.h +++ b/src/video_core/renderer_vulkan/vk_master_semaphore.h @@ -5,8 +5,10 @@ #include <atomic> #include <condition_variable> +#include <deque> #include <mutex> #include <thread> +#include <queue> #include "common/common_types.h" #include "common/polyfill_thread.h" @@ -17,6 +19,8 @@ namespace Vulkan { class Device; class MasterSemaphore { + using Waitable = std::pair<u64, vk::Fence>; + public: explicit MasterSemaphore(const Device& device); ~MasterSemaphore(); @@ -57,13 +61,22 @@ private: VkResult SubmitQueueFence(vk::CommandBuffer& cmdbuf, VkSemaphore signal_semaphore, VkSemaphore wait_semaphore, u64 host_tick); + void WaitThread(std::stop_token token); + + vk::Fence GetFreeFence(); + private: const Device& device; ///< Device. - vk::Fence fence; ///< Fence. vk::Semaphore semaphore; ///< Timeline semaphore. std::atomic<u64> gpu_tick{0}; ///< Current known GPU tick. std::atomic<u64> current_tick{1}; ///< Current logical tick. + std::mutex wait_mutex; + std::mutex free_mutex; + std::condition_variable_any wait_cv; + std::queue<Waitable> wait_queue; ///< Queue for the fences to be waited on by the wait thread. + std::deque<vk::Fence> free_queue; ///< Holds available fences for submission. std::jthread debug_thread; ///< Debug thread to workaround validation layer bugs. + std::jthread wait_thread; ///< Helper thread that waits for submitted fences. }; } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp index 4d0481f2a..8711e2a87 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp @@ -861,6 +861,10 @@ VkBuffer TextureCacheRuntime::GetTemporaryBuffer(size_t needed_size) { return *buffers[level]; } +void TextureCacheRuntime::BarrierFeedbackLoop() { + scheduler.RequestOutsideRenderPassOperationContext(); +} + void TextureCacheRuntime::ReinterpretImage(Image& dst, Image& src, std::span<const VideoCommon::ImageCopy> copies) { std::vector<VkBufferImageCopy> vk_in_copies(copies.size()); @@ -1268,7 +1272,9 @@ Image::Image(TextureCacheRuntime& runtime_, const ImageInfo& info_, GPUVAddr gpu if (IsPixelFormatASTC(info.format) && !runtime->device.IsOptimalAstcSupported()) { if (Settings::values.async_astc.GetValue()) { flags |= VideoCommon::ImageFlagBits::AsynchronousDecode; - } else if (Settings::values.accelerate_astc.GetValue() && info.size.depth == 1) { + } else if (Settings::values.astc_recompression.GetValue() == + Settings::AstcRecompression::Uncompressed && + Settings::values.accelerate_astc.GetValue() && info.size.depth == 1) { flags |= VideoCommon::ImageFlagBits::AcceleratedUpload; } flags |= VideoCommon::ImageFlagBits::Converted; @@ -1283,7 +1289,9 @@ Image::Image(TextureCacheRuntime& runtime_, const ImageInfo& info_, GPUVAddr gpu .usage = VK_IMAGE_USAGE_STORAGE_BIT, }; current_image = *original_image; - if (IsPixelFormatASTC(info.format) && !runtime->device.IsOptimalAstcSupported()) { + if (IsPixelFormatASTC(info.format) && !runtime->device.IsOptimalAstcSupported() && + Settings::values.astc_recompression.GetValue() == + Settings::AstcRecompression::Uncompressed) { const auto& device = runtime->device.GetLogical(); storage_image_views.reserve(info.resources.levels); for (s32 level = 0; level < info.resources.levels; ++level) { diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h index 4166b3d20..0f7a5ffd4 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.h +++ b/src/video_core/renderer_vulkan/vk_texture_cache.h @@ -103,6 +103,8 @@ public: [[nodiscard]] VkBuffer GetTemporaryBuffer(size_t needed_size); + void BarrierFeedbackLoop(); + const Device& device; Scheduler& scheduler; MemoryAllocator& memory_allocator; diff --git a/src/video_core/texture_cache/image_base.cpp b/src/video_core/texture_cache/image_base.cpp index 91512022f..d79594ce5 100644 --- a/src/video_core/texture_cache/image_base.cpp +++ b/src/video_core/texture_cache/image_base.cpp @@ -155,7 +155,7 @@ void ImageBase::CheckAliasState() { flags &= ~ImageFlagBits::Alias; } -void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_id) { +bool AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_id) { static constexpr auto OPTIONS = RelaxedOptions::Size | RelaxedOptions::Format; ASSERT(lhs.info.type == rhs.info.type); std::optional<SubresourceBase> base; @@ -169,7 +169,7 @@ void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i } if (!base) { LOG_ERROR(HW_GPU, "Image alias should have been flipped"); - return; + return false; } const PixelFormat lhs_format = lhs.info.format; const PixelFormat rhs_format = rhs.info.format; @@ -248,12 +248,13 @@ void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i } ASSERT(lhs_alias.copies.empty() == rhs_alias.copies.empty()); if (lhs_alias.copies.empty()) { - return; + return false; } lhs.aliased_images.push_back(std::move(lhs_alias)); rhs.aliased_images.push_back(std::move(rhs_alias)); lhs.flags &= ~ImageFlagBits::IsRescalable; rhs.flags &= ~ImageFlagBits::IsRescalable; + return true; } } // namespace VideoCommon diff --git a/src/video_core/texture_cache/image_base.h b/src/video_core/texture_cache/image_base.h index 329396bb6..1b8a17ee8 100644 --- a/src/video_core/texture_cache/image_base.h +++ b/src/video_core/texture_cache/image_base.h @@ -142,6 +142,6 @@ struct ImageAllocBase { std::vector<ImageId> images; }; -void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_id); +bool AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_id); } // namespace VideoCommon diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index b24086fce..2cf082c5d 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -49,8 +49,8 @@ TextureCache<P>::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface& if constexpr (HAS_DEVICE_MEMORY_INFO) { const s64 device_memory = static_cast<s64>(runtime.GetDeviceLocalMemory()); - const s64 min_spacing_expected = device_memory - 1_GiB - 512_MiB; - const s64 min_spacing_critical = device_memory - 1_GiB; + const s64 min_spacing_expected = device_memory - 1_GiB; + const s64 min_spacing_critical = device_memory - 512_MiB; const s64 mem_threshold = std::min(device_memory, TARGET_THRESHOLD); const s64 min_vacancy_expected = (6 * mem_threshold) / 10; const s64 min_vacancy_critical = (3 * mem_threshold) / 10; @@ -86,10 +86,12 @@ void TextureCache<P>::RunGarbageCollector() { // used by the async decoder thread. return false; } + if (!aggressive_mode && True(image.flags & ImageFlagBits::CostlyLoad)) { + return false; + } const bool must_download = image.IsSafeDownload() && False(image.flags & ImageFlagBits::BadOverlap); - if (!high_priority_mode && - (must_download || True(image.flags & ImageFlagBits::CostlyLoad))) { + if (!high_priority_mode && must_download) { return false; } if (must_download) { @@ -137,7 +139,6 @@ void TextureCache<P>::TickFrame() { TickAsyncDecode(); runtime.TickFrame(); - critical_gc = 0; ++frame_tick; if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) { @@ -184,6 +185,42 @@ void TextureCache<P>::FillComputeImageViews(std::span<ImageViewInOut> views) { } template <class P> +void TextureCache<P>::CheckFeedbackLoop(std::span<const ImageViewInOut> views) { + const bool requires_barrier = [&] { + for (const auto& view : views) { + if (!view.id) { + continue; + } + auto& image_view = slot_image_views[view.id]; + + // Check color targets + for (const auto& ct_view_id : render_targets.color_buffer_ids) { + if (ct_view_id) { + auto& ct_view = slot_image_views[ct_view_id]; + if (image_view.image_id == ct_view.image_id) { + return true; + } + } + } + + // Check zeta target + if (render_targets.depth_buffer_id) { + auto& zt_view = slot_image_views[render_targets.depth_buffer_id]; + if (image_view.image_id == zt_view.image_id) { + return true; + } + } + } + + return false; + }(); + + if (requires_barrier) { + runtime.BarrierFeedbackLoop(); + } +} + +template <class P> typename P::Sampler* TextureCache<P>::GetGraphicsSampler(u32 index) { if (index > channel_state->graphics_sampler_table.Limit()) { LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index); @@ -1274,17 +1311,18 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA const size_t size_bytes = CalculateGuestSizeInBytes(new_info); const bool broken_views = runtime.HasBrokenTextureViewFormats(); const bool native_bgr = runtime.HasNativeBgr(); - boost::container::small_vector<ImageId, 4> overlap_ids; - std::unordered_set<ImageId> overlaps_found; - boost::container::small_vector<ImageId, 4> left_aliased_ids; - boost::container::small_vector<ImageId, 4> right_aliased_ids; - std::unordered_set<ImageId> ignore_textures; - boost::container::small_vector<ImageId, 4> bad_overlap_ids; - boost::container::small_vector<ImageId, 4> all_siblings; + join_overlap_ids.clear(); + join_overlaps_found.clear(); + join_left_aliased_ids.clear(); + join_right_aliased_ids.clear(); + join_ignore_textures.clear(); + join_bad_overlap_ids.clear(); + join_copies_to_do.clear(); + join_alias_indices.clear(); const bool this_is_linear = info.type == ImageType::Linear; const auto region_check = [&](ImageId overlap_id, ImageBase& overlap) { if (True(overlap.flags & ImageFlagBits::Remapped)) { - ignore_textures.insert(overlap_id); + join_ignore_textures.insert(overlap_id); return; } const bool overlap_is_linear = overlap.info.type == ImageType::Linear; @@ -1294,11 +1332,11 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA if (this_is_linear && overlap_is_linear) { if (info.pitch == overlap.info.pitch && gpu_addr == overlap.gpu_addr) { // Alias linear images with the same pitch - left_aliased_ids.push_back(overlap_id); + join_left_aliased_ids.push_back(overlap_id); } return; } - overlaps_found.insert(overlap_id); + join_overlaps_found.insert(overlap_id); static constexpr bool strict_size = true; const std::optional<OverlapResult> solution = ResolveOverlap( new_info, gpu_addr, cpu_addr, overlap, strict_size, broken_views, native_bgr); @@ -1306,33 +1344,33 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA gpu_addr = solution->gpu_addr; cpu_addr = solution->cpu_addr; new_info.resources = solution->resources; - overlap_ids.push_back(overlap_id); - all_siblings.push_back(overlap_id); + join_overlap_ids.push_back(overlap_id); + join_copies_to_do.emplace_back(JoinCopy{false, overlap_id}); return; } static constexpr auto options = RelaxedOptions::Size | RelaxedOptions::Format; const ImageBase new_image_base(new_info, gpu_addr, cpu_addr); if (IsSubresource(new_info, overlap, gpu_addr, options, broken_views, native_bgr)) { - left_aliased_ids.push_back(overlap_id); + join_left_aliased_ids.push_back(overlap_id); overlap.flags |= ImageFlagBits::Alias; - all_siblings.push_back(overlap_id); + join_copies_to_do.emplace_back(JoinCopy{true, overlap_id}); } else if (IsSubresource(overlap.info, new_image_base, overlap.gpu_addr, options, broken_views, native_bgr)) { - right_aliased_ids.push_back(overlap_id); + join_right_aliased_ids.push_back(overlap_id); overlap.flags |= ImageFlagBits::Alias; - all_siblings.push_back(overlap_id); + join_copies_to_do.emplace_back(JoinCopy{true, overlap_id}); } else { - bad_overlap_ids.push_back(overlap_id); + join_bad_overlap_ids.push_back(overlap_id); } }; ForEachImageInRegion(cpu_addr, size_bytes, region_check); const auto region_check_gpu = [&](ImageId overlap_id, ImageBase& overlap) { - if (!overlaps_found.contains(overlap_id)) { + if (!join_overlaps_found.contains(overlap_id)) { if (True(overlap.flags & ImageFlagBits::Remapped)) { - ignore_textures.insert(overlap_id); + join_ignore_textures.insert(overlap_id); } if (overlap.gpu_addr == gpu_addr && overlap.guest_size_bytes == size_bytes) { - ignore_textures.insert(overlap_id); + join_ignore_textures.insert(overlap_id); } } }; @@ -1340,11 +1378,11 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA bool can_rescale = info.rescaleable; bool any_rescaled = false; - for (const ImageId sibling_id : all_siblings) { + for (const auto& copy : join_copies_to_do) { if (!can_rescale) { break; } - Image& sibling = slot_images[sibling_id]; + Image& sibling = slot_images[copy.id]; can_rescale &= ImageCanRescale(sibling); any_rescaled |= True(sibling.flags & ImageFlagBits::Rescaled); } @@ -1352,13 +1390,13 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA can_rescale &= any_rescaled; if (can_rescale) { - for (const ImageId sibling_id : all_siblings) { - Image& sibling = slot_images[sibling_id]; + for (const auto& copy : join_copies_to_do) { + Image& sibling = slot_images[copy.id]; ScaleUp(sibling); } } else { - for (const ImageId sibling_id : all_siblings) { - Image& sibling = slot_images[sibling_id]; + for (const auto& copy : join_copies_to_do) { + Image& sibling = slot_images[copy.id]; ScaleDown(sibling); } } @@ -1370,7 +1408,7 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA new_image.flags |= ImageFlagBits::Sparse; } - for (const ImageId overlap_id : ignore_textures) { + for (const ImageId overlap_id : join_ignore_textures) { Image& overlap = slot_images[overlap_id]; if (True(overlap.flags & ImageFlagBits::GpuModified)) { UNIMPLEMENTED(); @@ -1391,14 +1429,60 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA ScaleDown(new_image); } - std::ranges::sort(overlap_ids, [this](const ImageId lhs, const ImageId rhs) { - const ImageBase& lhs_image = slot_images[lhs]; - const ImageBase& rhs_image = slot_images[rhs]; + std::ranges::sort(join_copies_to_do, [this](const JoinCopy& lhs, const JoinCopy& rhs) { + const ImageBase& lhs_image = slot_images[lhs.id]; + const ImageBase& rhs_image = slot_images[rhs.id]; return lhs_image.modification_tick < rhs_image.modification_tick; }); - for (const ImageId overlap_id : overlap_ids) { - Image& overlap = slot_images[overlap_id]; + ImageBase& new_image_base = new_image; + for (const ImageId aliased_id : join_right_aliased_ids) { + ImageBase& aliased = slot_images[aliased_id]; + size_t alias_index = new_image_base.aliased_images.size(); + if (!AddImageAlias(new_image_base, aliased, new_image_id, aliased_id)) { + continue; + } + join_alias_indices.emplace(aliased_id, alias_index); + new_image.flags |= ImageFlagBits::Alias; + } + for (const ImageId aliased_id : join_left_aliased_ids) { + ImageBase& aliased = slot_images[aliased_id]; + size_t alias_index = new_image_base.aliased_images.size(); + if (!AddImageAlias(aliased, new_image_base, aliased_id, new_image_id)) { + continue; + } + join_alias_indices.emplace(aliased_id, alias_index); + new_image.flags |= ImageFlagBits::Alias; + } + for (const ImageId aliased_id : join_bad_overlap_ids) { + ImageBase& aliased = slot_images[aliased_id]; + aliased.overlapping_images.push_back(new_image_id); + new_image.overlapping_images.push_back(aliased_id); + if (aliased.info.resources.levels == 1 && aliased.info.block.depth == 0 && + aliased.overlapping_images.size() > 1) { + aliased.flags |= ImageFlagBits::BadOverlap; + } + if (new_image.info.resources.levels == 1 && new_image.info.block.depth == 0 && + new_image.overlapping_images.size() > 1) { + new_image.flags |= ImageFlagBits::BadOverlap; + } + } + + for (const auto& copy_object : join_copies_to_do) { + Image& overlap = slot_images[copy_object.id]; + if (copy_object.is_alias) { + if (!overlap.IsSafeDownload()) { + continue; + } + const auto alias_pointer = join_alias_indices.find(copy_object.id); + if (alias_pointer == join_alias_indices.end()) { + continue; + } + const AliasedImage& aliased = new_image.aliased_images[alias_pointer->second]; + CopyImage(new_image_id, aliased.id, aliased.copies); + new_image.modification_tick = overlap.modification_tick; + continue; + } if (True(overlap.flags & ImageFlagBits::GpuModified)) { new_image.flags |= ImageFlagBits::GpuModified; const auto& resolution = Settings::values.resolution_info; @@ -1411,35 +1495,15 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA } else { runtime.CopyImage(new_image, overlap, std::move(copies)); } + new_image.modification_tick = overlap.modification_tick; } if (True(overlap.flags & ImageFlagBits::Tracked)) { - UntrackImage(overlap, overlap_id); - } - UnregisterImage(overlap_id); - DeleteImage(overlap_id); - } - ImageBase& new_image_base = new_image; - for (const ImageId aliased_id : right_aliased_ids) { - ImageBase& aliased = slot_images[aliased_id]; - AddImageAlias(new_image_base, aliased, new_image_id, aliased_id); - new_image.flags |= ImageFlagBits::Alias; - } - for (const ImageId aliased_id : left_aliased_ids) { - ImageBase& aliased = slot_images[aliased_id]; - AddImageAlias(aliased, new_image_base, aliased_id, new_image_id); - new_image.flags |= ImageFlagBits::Alias; - } - for (const ImageId aliased_id : bad_overlap_ids) { - ImageBase& aliased = slot_images[aliased_id]; - aliased.overlapping_images.push_back(new_image_id); - new_image.overlapping_images.push_back(aliased_id); - if (aliased.info.resources.levels == 1 && aliased.overlapping_images.size() > 1) { - aliased.flags |= ImageFlagBits::BadOverlap; - } - if (new_image.info.resources.levels == 1 && new_image.overlapping_images.size() > 1) { - new_image.flags |= ImageFlagBits::BadOverlap; + UntrackImage(overlap, copy_object.id); } + UnregisterImage(copy_object.id); + DeleteImage(copy_object.id); } + RegisterImage(new_image_id); return new_image_id; } @@ -1469,7 +1533,7 @@ std::optional<typename TextureCache<P>::BlitImages> TextureCache<P>::GetBlitImag if (!copy.must_accelerate) { do { if (!src_id && !dst_id) { - break; + return std::nullopt; } if (src_id && True(slot_images[src_id].flags & ImageFlagBits::GpuModified)) { break; @@ -1847,10 +1911,6 @@ void TextureCache<P>::RegisterImage(ImageId image_id) { tentative_size = EstimatedDecompressedSize(tentative_size, image.info.format); } total_used_memory += Common::AlignUp(tentative_size, 1024); - if (total_used_memory > critical_memory && critical_gc < GC_EMERGENCY_COUNTS) { - RunGarbageCollector(); - critical_gc++; - } image.lru_index = lru_cache.Insert(image_id, frame_tick); ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, [this, image_id](u64 page) { diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h index 0720494e5..3bfa92154 100644 --- a/src/video_core/texture_cache/texture_cache_base.h +++ b/src/video_core/texture_cache/texture_cache_base.h @@ -10,7 +10,9 @@ #include <span> #include <type_traits> #include <unordered_map> +#include <unordered_set> #include <vector> +#include <boost/container/small_vector.hpp> #include <queue> #include "common/common_types.h" @@ -148,6 +150,9 @@ public: /// Fill image_view_ids with the compute images in indices void FillComputeImageViews(std::span<ImageViewInOut> views); + /// Handle feedback loops during draws. + void CheckFeedbackLoop(std::span<const ImageViewInOut> views); + /// Get the sampler from the graphics descriptor table in the specified index Sampler* GetGraphicsSampler(u32 index); @@ -424,7 +429,6 @@ private: u64 minimum_memory; u64 expected_memory; u64 critical_memory; - size_t critical_gc; struct BufferDownload { GPUVAddr address; @@ -474,6 +478,20 @@ private: Common::ThreadWorker texture_decode_worker{1, "TextureDecoder"}; std::vector<std::unique_ptr<AsyncDecodeContext>> async_decodes; + + // Join caching + boost::container::small_vector<ImageId, 4> join_overlap_ids; + std::unordered_set<ImageId> join_overlaps_found; + boost::container::small_vector<ImageId, 4> join_left_aliased_ids; + boost::container::small_vector<ImageId, 4> join_right_aliased_ids; + std::unordered_set<ImageId> join_ignore_textures; + boost::container::small_vector<ImageId, 4> join_bad_overlap_ids; + struct JoinCopy { + bool is_alias; + ImageId id; + }; + boost::container::small_vector<JoinCopy, 4> join_copies_to_do; + std::unordered_map<ImageId, size_t> join_alias_indices; }; } // namespace VideoCommon diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp index f1071aa23..95a5b47d8 100644 --- a/src/video_core/texture_cache/util.cpp +++ b/src/video_core/texture_cache/util.cpp @@ -18,6 +18,8 @@ #include "common/bit_util.h" #include "common/common_types.h" #include "common/div_ceil.h" +#include "common/scratch_buffer.h" +#include "common/settings.h" #include "video_core/compatible_formats.h" #include "video_core/engines/maxwell_3d.h" #include "video_core/memory_manager.h" @@ -28,6 +30,7 @@ #include "video_core/texture_cache/samples_helper.h" #include "video_core/texture_cache/util.h" #include "video_core/textures/astc.h" +#include "video_core/textures/bcn.h" #include "video_core/textures/decoders.h" namespace VideoCommon { @@ -120,7 +123,9 @@ template <u32 GOB_EXTENT> return { .width = AdjustMipBlockSize<GOB_SIZE_X>(num_tiles.width, block_size.width, level), .height = AdjustMipBlockSize<GOB_SIZE_Y>(num_tiles.height, block_size.height, level), - .depth = AdjustMipBlockSize<GOB_SIZE_Z>(num_tiles.depth, block_size.depth, level), + .depth = level == 0 + ? block_size.depth + : AdjustMipBlockSize<GOB_SIZE_Z>(num_tiles.depth, block_size.depth, level), }; } @@ -162,6 +167,13 @@ template <u32 GOB_EXTENT> } [[nodiscard]] constexpr Extent3D TileShift(const LevelInfo& info, u32 level) { + if (level == 0) { + return Extent3D{ + .width = info.block.width, + .height = info.block.height, + .depth = info.block.depth, + }; + } const Extent3D blocks = NumLevelBlocks(info, level); return Extent3D{ .width = AdjustTileSize(info.block.width, GOB_SIZE_X, blocks.width), @@ -585,6 +597,21 @@ u32 CalculateConvertedSizeBytes(const ImageInfo& info) noexcept { return info.size.width * BytesPerBlock(info.format); } static constexpr Extent2D TILE_SIZE{1, 1}; + if (IsPixelFormatASTC(info.format) && Settings::values.astc_recompression.GetValue() != + Settings::AstcRecompression::Uncompressed) { + const u32 bpp_div = + Settings::values.astc_recompression.GetValue() == Settings::AstcRecompression::Bc1 ? 2 + : 1; + // NumBlocksPerLayer doesn't account for this correctly, so we have to do it manually. + u32 output_size = 0; + for (s32 i = 0; i < info.resources.levels; i++) { + const auto mip_size = AdjustMipSize(info.size, i); + const u32 plane_dim = + Common::AlignUp(mip_size.width, 4U) * Common::AlignUp(mip_size.height, 4U); + output_size += (plane_dim * info.size.depth * info.resources.layers) / bpp_div; + } + return output_size; + } return NumBlocksPerLayer(info, TILE_SIZE) * info.resources.layers * CONVERTED_BYTES_PER_BLOCK; } @@ -885,6 +912,7 @@ BufferCopy UploadBufferCopy(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr, void ConvertImage(std::span<const u8> input, const ImageInfo& info, std::span<u8> output, std::span<BufferImageCopy> copies) { u32 output_offset = 0; + Common::ScratchBuffer<u8> decode_scratch; const Extent2D tile_size = DefaultBlockSize(info.format); for (BufferImageCopy& copy : copies) { @@ -895,22 +923,58 @@ void ConvertImage(std::span<const u8> input, const ImageInfo& info, std::span<u8 ASSERT(copy.image_extent == mip_size); ASSERT(copy.buffer_row_length == Common::AlignUp(mip_size.width, tile_size.width)); ASSERT(copy.buffer_image_height == Common::AlignUp(mip_size.height, tile_size.height)); - if (IsPixelFormatASTC(info.format)) { + + const auto input_offset = input.subspan(copy.buffer_offset); + copy.buffer_offset = output_offset; + copy.buffer_row_length = mip_size.width; + copy.buffer_image_height = mip_size.height; + + const auto recompression_setting = Settings::values.astc_recompression.GetValue(); + const bool astc = IsPixelFormatASTC(info.format); + + if (astc && recompression_setting == Settings::AstcRecompression::Uncompressed) { Tegra::Texture::ASTC::Decompress( - input.subspan(copy.buffer_offset), copy.image_extent.width, - copy.image_extent.height, + input_offset, copy.image_extent.width, copy.image_extent.height, copy.image_subresource.num_layers * copy.image_extent.depth, tile_size.width, tile_size.height, output.subspan(output_offset)); + + output_offset += copy.image_extent.width * copy.image_extent.height * + copy.image_subresource.num_layers * CONVERTED_BYTES_PER_BLOCK; + } else if (astc) { + // BC1 uses 0.5 bytes per texel + // BC3 uses 1 byte per texel + const auto compress = recompression_setting == Settings::AstcRecompression::Bc1 + ? Tegra::Texture::BCN::CompressBC1 + : Tegra::Texture::BCN::CompressBC3; + const auto bpp_div = recompression_setting == Settings::AstcRecompression::Bc1 ? 2 : 1; + + const u32 plane_dim = copy.image_extent.width * copy.image_extent.height; + const u32 level_size = plane_dim * copy.image_extent.depth * + copy.image_subresource.num_layers * CONVERTED_BYTES_PER_BLOCK; + decode_scratch.resize_destructive(level_size); + + Tegra::Texture::ASTC::Decompress( + input_offset, copy.image_extent.width, copy.image_extent.height, + copy.image_subresource.num_layers * copy.image_extent.depth, tile_size.width, + tile_size.height, decode_scratch); + + compress(decode_scratch, copy.image_extent.width, copy.image_extent.height, + copy.image_subresource.num_layers * copy.image_extent.depth, + output.subspan(output_offset)); + + const u32 aligned_plane_dim = Common::AlignUp(copy.image_extent.width, 4) * + Common::AlignUp(copy.image_extent.height, 4); + + copy.buffer_size = + (aligned_plane_dim * copy.image_extent.depth * copy.image_subresource.num_layers) / + bpp_div; + output_offset += static_cast<u32>(copy.buffer_size); } else { - DecompressBC4(input.subspan(copy.buffer_offset), copy.image_extent, - output.subspan(output_offset)); - } - copy.buffer_offset = output_offset; - copy.buffer_row_length = mip_size.width; - copy.buffer_image_height = mip_size.height; + DecompressBC4(input_offset, copy.image_extent, output.subspan(output_offset)); - output_offset += copy.image_extent.width * copy.image_extent.height * - copy.image_subresource.num_layers * CONVERTED_BYTES_PER_BLOCK; + output_offset += copy.image_extent.width * copy.image_extent.height * + copy.image_subresource.num_layers * CONVERTED_BYTES_PER_BLOCK; + } } } @@ -1233,7 +1297,9 @@ u32 MapSizeBytes(const ImageBase& image) { static_assert(CalculateLevelSize(LevelInfo{{1920, 1080, 1}, {0, 2, 0}, {1, 1}, 2, 0}, 0) == 0x7f8000); -static_assert(CalculateLevelSize(LevelInfo{{32, 32, 1}, {0, 0, 4}, {1, 1}, 4, 0}, 0) == 0x4000); +static_assert(CalculateLevelSize(LevelInfo{{32, 32, 1}, {0, 0, 4}, {1, 1}, 4, 0}, 0) == 0x40000); + +static_assert(CalculateLevelSize(LevelInfo{{128, 8, 1}, {0, 4, 0}, {1, 1}, 4, 0}, 0) == 0x40000); static_assert(CalculateLevelOffset(PixelFormat::R8_SINT, {1920, 1080, 1}, {0, 2, 0}, 0, 7) == 0x2afc00); diff --git a/src/video_core/textures/astc.cpp b/src/video_core/textures/astc.cpp index a68bc0d77..fef0be31d 100644 --- a/src/video_core/textures/astc.cpp +++ b/src/video_core/textures/astc.cpp @@ -16,8 +16,8 @@ #include "common/alignment.h" #include "common/common_types.h" #include "common/polyfill_ranges.h" -#include "common/thread_worker.h" #include "video_core/textures/astc.h" +#include "video_core/textures/workers.h" class InputBitStream { public: @@ -1656,8 +1656,7 @@ void Decompress(std::span<const uint8_t> data, uint32_t width, uint32_t height, const u32 rows = Common::DivideUp(height, block_height); const u32 cols = Common::DivideUp(width, block_width); - static Common::ThreadWorker workers{std::max(std::thread::hardware_concurrency(), 2U) / 2, - "ASTCDecompress"}; + Common::ThreadWorker& workers{GetThreadWorkers()}; for (u32 z = 0; z < depth; ++z) { const u32 depth_offset = z * height * width * 4; diff --git a/src/video_core/textures/bcn.cpp b/src/video_core/textures/bcn.cpp new file mode 100644 index 000000000..671212a49 --- /dev/null +++ b/src/video_core/textures/bcn.cpp @@ -0,0 +1,87 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <stb_dxt.h> +#include <string.h> + +#include "common/alignment.h" +#include "video_core/textures/bcn.h" +#include "video_core/textures/workers.h" + +namespace Tegra::Texture::BCN { + +using BCNCompressor = void(u8* block_output, const u8* block_input, bool any_alpha); + +template <u32 BytesPerBlock, bool ThresholdAlpha = false> +void CompressBCN(std::span<const uint8_t> data, uint32_t width, uint32_t height, uint32_t depth, + std::span<uint8_t> output, BCNCompressor f) { + constexpr u8 alpha_threshold = 128; + constexpr u32 bytes_per_px = 4; + const u32 plane_dim = width * height; + + Common::ThreadWorker& workers{GetThreadWorkers()}; + + for (u32 z = 0; z < depth; z++) { + for (u32 y = 0; y < height; y += 4) { + auto compress_row = [z, y, width, height, plane_dim, f, data, output]() { + for (u32 x = 0; x < width; x += 4) { + // Gather 4x4 block of RGBA texels + u8 input_colors[4][4][4]; + bool any_alpha = false; + + for (u32 j = 0; j < 4; j++) { + for (u32 i = 0; i < 4; i++) { + const size_t coord = + (z * plane_dim + (y + j) * width + (x + i)) * bytes_per_px; + + if ((x + i < width) && (y + j < height)) { + if constexpr (ThresholdAlpha) { + if (data[coord + 3] >= alpha_threshold) { + input_colors[j][i][0] = data[coord + 0]; + input_colors[j][i][1] = data[coord + 1]; + input_colors[j][i][2] = data[coord + 2]; + input_colors[j][i][3] = 255; + } else { + any_alpha = true; + memset(input_colors[j][i], 0, bytes_per_px); + } + } else { + memcpy(input_colors[j][i], &data[coord], bytes_per_px); + } + } else { + memset(input_colors[j][i], 0, bytes_per_px); + } + } + } + + const u32 bytes_per_row = BytesPerBlock * Common::DivideUp(width, 4U); + const u32 bytes_per_plane = bytes_per_row * Common::DivideUp(height, 4U); + f(output.data() + z * bytes_per_plane + (y / 4) * bytes_per_row + + (x / 4) * BytesPerBlock, + reinterpret_cast<u8*>(input_colors), any_alpha); + } + }; + workers.QueueWork(std::move(compress_row)); + } + workers.WaitForRequests(); + } +} + +void CompressBC1(std::span<const uint8_t> data, uint32_t width, uint32_t height, uint32_t depth, + std::span<uint8_t> output) { + CompressBCN<8, true>(data, width, height, depth, output, + [](u8* block_output, const u8* block_input, bool any_alpha) { + stb_compress_bc1_block(block_output, block_input, any_alpha, + STB_DXT_NORMAL); + }); +} + +void CompressBC3(std::span<const uint8_t> data, uint32_t width, uint32_t height, uint32_t depth, + std::span<uint8_t> output) { + CompressBCN<16, false>(data, width, height, depth, output, + [](u8* block_output, const u8* block_input, bool any_alpha) { + stb_compress_bc3_block(block_output, block_input, STB_DXT_NORMAL); + }); +} + +} // namespace Tegra::Texture::BCN diff --git a/src/video_core/textures/bcn.h b/src/video_core/textures/bcn.h new file mode 100644 index 000000000..6464af885 --- /dev/null +++ b/src/video_core/textures/bcn.h @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include <span> +#include <stdint.h> + +namespace Tegra::Texture::BCN { + +void CompressBC1(std::span<const uint8_t> data, uint32_t width, uint32_t height, uint32_t depth, + std::span<uint8_t> output); + +void CompressBC3(std::span<const uint8_t> data, uint32_t width, uint32_t height, uint32_t depth, + std::span<uint8_t> output); + +} // namespace Tegra::Texture::BCN diff --git a/src/video_core/textures/workers.cpp b/src/video_core/textures/workers.cpp new file mode 100644 index 000000000..a71c305f4 --- /dev/null +++ b/src/video_core/textures/workers.cpp @@ -0,0 +1,15 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "video_core/textures/workers.h" + +namespace Tegra::Texture { + +Common::ThreadWorker& GetThreadWorkers() { + static Common::ThreadWorker workers{std::max(std::thread::hardware_concurrency(), 2U) / 2, + "ImageTranscode"}; + + return workers; +} + +} // namespace Tegra::Texture diff --git a/src/video_core/textures/workers.h b/src/video_core/textures/workers.h new file mode 100644 index 000000000..008dd05b3 --- /dev/null +++ b/src/video_core/textures/workers.h @@ -0,0 +1,12 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "common/thread_worker.h" + +namespace Tegra::Texture { + +Common::ThreadWorker& GetThreadWorkers(); + +} diff --git a/src/video_core/vulkan_common/vulkan_device.cpp b/src/video_core/vulkan_common/vulkan_device.cpp index c03f4a56b..aea677cb3 100644 --- a/src/video_core/vulkan_common/vulkan_device.cpp +++ b/src/video_core/vulkan_common/vulkan_device.cpp @@ -1002,6 +1002,11 @@ u64 Device::GetDeviceMemoryUsage() const { } void Device::CollectPhysicalMemoryInfo() { + // Account for resolution scaling in memory limits + const size_t normal_memory = 6_GiB; + const size_t scaler_memory = 1_GiB * Settings::values.resolution_info.ScaleUp(1); + + // Calculate limits using memory budget VkPhysicalDeviceMemoryBudgetPropertiesEXT budget{}; budget.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT; const auto mem_info = @@ -1031,11 +1036,12 @@ void Device::CollectPhysicalMemoryInfo() { if (!is_integrated) { const u64 reserve_memory = std::min<u64>(device_access_memory / 8, 1_GiB); device_access_memory -= reserve_memory; + device_access_memory = std::min<u64>(device_access_memory, normal_memory + scaler_memory); return; } const s64 available_memory = static_cast<s64>(device_access_memory - device_initial_usage); device_access_memory = static_cast<u64>(std::max<s64>( - std::min<s64>(available_memory - 8_GiB, 4_GiB), static_cast<s64>(local_memory))); + std::min<s64>(available_memory - 8_GiB, 4_GiB), std::min<s64>(local_memory, 4_GiB))); } void Device::CollectToolingInfo() { |