From 5cffa342884df531d911555f7b3db9d2f6d1d1f0 Mon Sep 17 00:00:00 2001 From: lat9nq <22451773+lat9nq@users.noreply.github.com> Date: Wed, 3 May 2023 20:42:33 -0400 Subject: settings,video_core: Consolidate ASTC decoding options Just puts them all neatly into one place. --- src/video_core/renderer_opengl/gl_texture_cache.cpp | 8 ++++---- src/video_core/renderer_vulkan/vk_texture_cache.cpp | 19 +++++++++++++------ 2 files changed, 17 insertions(+), 10 deletions(-) (limited to 'src/video_core') diff --git a/src/video_core/renderer_opengl/gl_texture_cache.cpp b/src/video_core/renderer_opengl/gl_texture_cache.cpp index 3b446be07..38ae12d8e 100644 --- a/src/video_core/renderer_opengl/gl_texture_cache.cpp +++ b/src/video_core/renderer_opengl/gl_texture_cache.cpp @@ -232,10 +232,9 @@ void ApplySwizzle(GLuint handle, PixelFormat format, std::arrayViewFormats(info.format))), aspect_mask(ImageAspectMask(info.format)) { if (IsPixelFormatASTC(info.format) && !runtime->device.IsOptimalAstcSupported()) { - if (Settings::values.async_astc.GetValue()) { + switch (Settings::values.accelerate_astc.GetValue()) { + case Settings::AstcDecodeMode::GPU: + if (Settings::values.astc_recompression.GetValue() == + Settings::AstcRecompression::Uncompressed && + info.size.depth == 1) { + flags |= VideoCommon::ImageFlagBits::AcceleratedUpload; + } + break; + case Settings::AstcDecodeMode::CPUAsynchronous: flags |= VideoCommon::ImageFlagBits::AsynchronousDecode; - } else if (Settings::values.astc_recompression.GetValue() == - Settings::AstcRecompression::Uncompressed && - Settings::values.accelerate_astc.GetValue() && info.size.depth == 1) { - flags |= VideoCommon::ImageFlagBits::AcceleratedUpload; + break; + default: + break; } flags |= VideoCommon::ImageFlagBits::Converted; flags |= VideoCommon::ImageFlagBits::CostlyLoad; -- cgit v1.2.3 From a007ac6b9ccc23861f5a5c6967d535220ed794b0 Mon Sep 17 00:00:00 2001 From: lat9nq <22451773+lat9nq@users.noreply.github.com> Date: Sun, 7 May 2023 09:48:26 -0400 Subject: configure_graphics_advance: Generate UI at runtime We can iterate through the AdvancedGraphics settings and generate the UI during runtime. This doesn't help runtime efficiency, but it helps a ton in reducing the amount of work a developer needs in order to add a new setting. --- src/video_core/textures/texture.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src/video_core') diff --git a/src/video_core/textures/texture.cpp b/src/video_core/textures/texture.cpp index d8b88d9bc..39c08b5ae 100644 --- a/src/video_core/textures/texture.cpp +++ b/src/video_core/textures/texture.cpp @@ -72,12 +72,12 @@ float TSCEntry::MaxAnisotropy() const noexcept { } const auto anisotropic_settings = Settings::values.max_anisotropy.GetValue(); s32 added_anisotropic{}; - if (anisotropic_settings == 0) { + if (anisotropic_settings == Settings::AnisotropyMode::Automatic) { added_anisotropic = Settings::values.resolution_info.up_scale >> Settings::values.resolution_info.down_shift; added_anisotropic = std::max(added_anisotropic - 1, 0); } else { - added_anisotropic = Settings::values.max_anisotropy.GetValue() - 1U; + added_anisotropic = static_cast(Settings::values.max_anisotropy.GetValue()) - 1U; } return static_cast(1U << (max_anisotropy + added_anisotropic)); } -- cgit v1.2.3 From d146dd9d123a999e40307a93403239b81b04bffc Mon Sep 17 00:00:00 2001 From: lat9nq <22451773+lat9nq@users.noreply.github.com> Date: Wed, 7 Jun 2023 01:52:23 -0400 Subject: settings,general: Rename non-confirming enums --- src/video_core/host1x/codecs/codec.cpp | 2 +- src/video_core/host1x/codecs/h264.cpp | 2 +- src/video_core/renderer_opengl/gl_compute_pipeline.cpp | 6 +++--- src/video_core/renderer_opengl/gl_device.cpp | 8 ++++---- src/video_core/renderer_opengl/gl_graphics_pipeline.cpp | 6 +++--- src/video_core/renderer_opengl/gl_shader_cache.cpp | 12 ++++++------ src/video_core/renderer_opengl/gl_texture_cache.cpp | 4 ++-- src/video_core/renderer_vulkan/vk_swapchain.cpp | 12 ++++++------ src/video_core/renderer_vulkan/vk_texture_cache.cpp | 6 +++--- 9 files changed, 29 insertions(+), 29 deletions(-) (limited to 'src/video_core') diff --git a/src/video_core/host1x/codecs/codec.cpp b/src/video_core/host1x/codecs/codec.cpp index da07a556f..220cce28a 100644 --- a/src/video_core/host1x/codecs/codec.cpp +++ b/src/video_core/host1x/codecs/codec.cpp @@ -247,7 +247,7 @@ void Codec::Initialize() { av_codec = avcodec_find_decoder(codec); InitializeAvCodecContext(); - if (Settings::values.nvdec_emulation.GetValue() == Settings::NvdecEmulation::GPU) { + if (Settings::values.nvdec_emulation.GetValue() == Settings::NvdecEmulation::Gpu) { InitializeGpuDecoder(); } if (const int res = avcodec_open2(av_codec_ctx, av_codec, nullptr); res < 0) { diff --git a/src/video_core/host1x/codecs/h264.cpp b/src/video_core/host1x/codecs/h264.cpp index 862904e39..ece79b1e2 100644 --- a/src/video_core/host1x/codecs/h264.cpp +++ b/src/video_core/host1x/codecs/h264.cpp @@ -84,7 +84,7 @@ std::span H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters // TODO (ameerj): Where do we get this number, it seems to be particular for each stream const auto nvdec_decoding = Settings::values.nvdec_emulation.GetValue(); - const bool uses_gpu_decoding = nvdec_decoding == Settings::NvdecEmulation::GPU; + const bool uses_gpu_decoding = nvdec_decoding == Settings::NvdecEmulation::Gpu; const u32 max_num_ref_frames = uses_gpu_decoding ? 6u : 16u; writer.WriteUe(max_num_ref_frames); writer.WriteBit(false); diff --git a/src/video_core/renderer_opengl/gl_compute_pipeline.cpp b/src/video_core/renderer_opengl/gl_compute_pipeline.cpp index f9ca55c36..d70501860 100644 --- a/src/video_core/renderer_opengl/gl_compute_pipeline.cpp +++ b/src/video_core/renderer_opengl/gl_compute_pipeline.cpp @@ -34,13 +34,13 @@ ComputePipeline::ComputePipeline(const Device& device, TextureCache& texture_cac : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, program_manager{program_manager_}, info{info_} { switch (device.GetShaderBackend()) { - case Settings::ShaderBackend::GLSL: + case Settings::ShaderBackend::Glsl: source_program = CreateProgram(code, GL_COMPUTE_SHADER); break; - case Settings::ShaderBackend::GLASM: + case Settings::ShaderBackend::Glasm: assembly_program = CompileProgram(code, GL_COMPUTE_PROGRAM_NV); break; - case Settings::ShaderBackend::SPIRV: + case Settings::ShaderBackend::SpirV: source_program = CreateProgram(code_v, GL_COMPUTE_SHADER); break; } diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp index 33e63c17d..ee140c9c2 100644 --- a/src/video_core/renderer_opengl/gl_device.cpp +++ b/src/video_core/renderer_opengl/gl_device.cpp @@ -177,15 +177,15 @@ Device::Device(Core::Frontend::EmuWindow& emu_window) { has_fast_buffer_sub_data = is_nvidia && !disable_fast_buffer_sub_data; shader_backend = Settings::values.shader_backend.GetValue(); - use_assembly_shaders = shader_backend == Settings::ShaderBackend::GLASM && + use_assembly_shaders = shader_backend == Settings::ShaderBackend::Glasm && GLAD_GL_NV_gpu_program5 && GLAD_GL_NV_compute_program5 && GLAD_GL_NV_transform_feedback && GLAD_GL_NV_transform_feedback2; - if (shader_backend == Settings::ShaderBackend::GLASM && !use_assembly_shaders) { + if (shader_backend == Settings::ShaderBackend::Glasm && !use_assembly_shaders) { LOG_ERROR(Render_OpenGL, "Assembly shaders enabled but not supported"); - shader_backend = Settings::ShaderBackend::GLSL; + shader_backend = Settings::ShaderBackend::Glsl; } - if (shader_backend == Settings::ShaderBackend::GLSL && is_nvidia) { + if (shader_backend == Settings::ShaderBackend::Glsl && is_nvidia) { const std::string_view driver_version = version.substr(13); const int version_major = std::atoi(driver_version.substr(0, driver_version.find(".")).data()); diff --git a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp index 71f720c63..f822fa856 100644 --- a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp +++ b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp @@ -236,18 +236,18 @@ GraphicsPipeline::GraphicsPipeline(const Device& device, TextureCache& texture_c force_context_flush](ShaderContext::Context*) mutable { for (size_t stage = 0; stage < 5; ++stage) { switch (backend) { - case Settings::ShaderBackend::GLSL: + case Settings::ShaderBackend::Glsl: if (!sources_[stage].empty()) { source_programs[stage] = CreateProgram(sources_[stage], Stage(stage)); } break; - case Settings::ShaderBackend::GLASM: + case Settings::ShaderBackend::Glasm: if (!sources_[stage].empty()) { assembly_programs[stage] = CompileProgram(sources_[stage], AssemblyStage(stage)); } break; - case Settings::ShaderBackend::SPIRV: + case Settings::ShaderBackend::SpirV: if (!sources_spirv_[stage].empty()) { source_programs[stage] = CreateProgram(sources_spirv_[stage], Stage(stage)); } diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index 7e1d7f92e..618cb6354 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp @@ -522,14 +522,14 @@ std::unique_ptr ShaderCache::CreateGraphicsPipeline( const auto runtime_info{ MakeRuntimeInfo(key, program, previous_program, glasm_use_storage_buffers, use_glasm)}; switch (device.GetShaderBackend()) { - case Settings::ShaderBackend::GLSL: + case Settings::ShaderBackend::Glsl: ConvertLegacyToGeneric(program, runtime_info); sources[stage_index] = EmitGLSL(profile, runtime_info, program, binding); break; - case Settings::ShaderBackend::GLASM: + case Settings::ShaderBackend::Glasm: sources[stage_index] = EmitGLASM(profile, runtime_info, program, binding); break; - case Settings::ShaderBackend::SPIRV: + case Settings::ShaderBackend::SpirV: ConvertLegacyToGeneric(program, runtime_info); sources_spirv[stage_index] = EmitSPIRV(profile, runtime_info, program, binding); break; @@ -582,13 +582,13 @@ std::unique_ptr ShaderCache::CreateComputePipeline( std::string code{}; std::vector code_spirv; switch (device.GetShaderBackend()) { - case Settings::ShaderBackend::GLSL: + case Settings::ShaderBackend::Glsl: code = EmitGLSL(profile, program); break; - case Settings::ShaderBackend::GLASM: + case Settings::ShaderBackend::Glasm: code = EmitGLASM(profile, info, program); break; - case Settings::ShaderBackend::SPIRV: + case Settings::ShaderBackend::SpirV: code_spirv = EmitSPIRV(profile, program); break; } diff --git a/src/video_core/renderer_opengl/gl_texture_cache.cpp b/src/video_core/renderer_opengl/gl_texture_cache.cpp index 38ae12d8e..9cafd2983 100644 --- a/src/video_core/renderer_opengl/gl_texture_cache.cpp +++ b/src/video_core/renderer_opengl/gl_texture_cache.cpp @@ -232,7 +232,7 @@ void ApplySwizzle(GLuint handle, PixelFormat format, std::arraydevice.IsOptimalAstcSupported()) { switch (Settings::values.accelerate_astc.GetValue()) { - case Settings::AstcDecodeMode::GPU: + case Settings::AstcDecodeMode::Gpu: if (Settings::values.astc_recompression.GetValue() == Settings::AstcRecompression::Uncompressed && info.size.depth == 1) { flags |= VideoCommon::ImageFlagBits::AcceleratedUpload; } break; - case Settings::AstcDecodeMode::CPUAsynchronous: + case Settings::AstcDecodeMode::CpuAsynchronous: flags |= VideoCommon::ImageFlagBits::AsynchronousDecode; break; default: -- cgit v1.2.3 From 8366736b67d6febe278b6599badf4e945599bc30 Mon Sep 17 00:00:00 2001 From: lat9nq <22451773+lat9nq@users.noreply.github.com> Date: Tue, 20 Jun 2023 23:29:07 -0400 Subject: settings,opengl,yuzu-qt: Fix AA, Filter maximums The new enum macros don't support setting values directly. For LastAA and LastFilter, this means we need a simpler approach to loop around the toggle in the frontend... --- src/video_core/renderer_opengl/renderer_opengl.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/video_core') diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp index 2a74c1d05..6b8d4e554 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.cpp +++ b/src/video_core/renderer_opengl/renderer_opengl.cpp @@ -473,7 +473,7 @@ void RendererOpenGL::DrawScreen(const Layout::FramebufferLayout& layout) { glBindTextureUnit(0, screen_info.display_texture); auto anti_aliasing = Settings::values.anti_aliasing.GetValue(); - if (anti_aliasing > Settings::AntiAliasing::LastAA) { + if (anti_aliasing >= Settings::AntiAliasing::MaxEnum) { LOG_ERROR(Render_OpenGL, "Invalid antialiasing option selected {}", anti_aliasing); anti_aliasing = Settings::AntiAliasing::None; Settings::values.anti_aliasing.SetValue(anti_aliasing); -- cgit v1.2.3