diff options
-rw-r--r-- | dist/languages/.tx/config | 5 | ||||
-rw-r--r-- | src/core/hle/service/friend/friend.cpp | 13 | ||||
-rw-r--r-- | src/video_core/CMakeLists.txt | 4 | ||||
-rw-r--r-- | src/video_core/host1x/codecs/codec.cpp | 329 | ||||
-rw-r--r-- | src/video_core/host1x/codecs/codec.h | 39 | ||||
-rw-r--r-- | src/video_core/host1x/codecs/h264.cpp | 4 | ||||
-rw-r--r-- | src/video_core/host1x/codecs/h264.h | 1 | ||||
-rw-r--r-- | src/video_core/host1x/ffmpeg/ffmpeg.cpp | 419 | ||||
-rw-r--r-- | src/video_core/host1x/ffmpeg/ffmpeg.h | 213 | ||||
-rw-r--r-- | src/video_core/host1x/nvdec.cpp | 2 | ||||
-rw-r--r-- | src/video_core/host1x/nvdec.h | 2 | ||||
-rw-r--r-- | src/video_core/host1x/vic.cpp | 62 | ||||
-rw-r--r-- | src/video_core/host1x/vic.h | 4 | ||||
-rw-r--r-- | src/yuzu/CMakeLists.txt | 51 | ||||
-rw-r--r-- | src/yuzu/configuration/shared_translation.cpp | 517 | ||||
-rw-r--r-- | src/yuzu/configuration/shared_widget.cpp | 4 | ||||
-rw-r--r-- | src/yuzu/game_list_worker.cpp | 2 |
17 files changed, 1035 insertions, 636 deletions
diff --git a/dist/languages/.tx/config b/dist/languages/.tx/config index 30e76b925..cca7b3d67 100644 --- a/dist/languages/.tx/config +++ b/dist/languages/.tx/config @@ -6,3 +6,8 @@ file_filter = <lang>.ts source_file = en.ts source_lang = en type = QT + +[o:yuzu-emulator:p:yuzu:r:yuzu-android] +file_filter = ../../src/android/app/src/main/res/values-<lang>/strings.xml +source_file = ../../src/android/app/src/main/res/values/strings.xml +type = ANDROID diff --git a/src/core/hle/service/friend/friend.cpp b/src/core/hle/service/friend/friend.cpp index 9d05f9801..0507b14e7 100644 --- a/src/core/hle/service/friend/friend.cpp +++ b/src/core/hle/service/friend/friend.cpp @@ -32,7 +32,7 @@ public: {10200, nullptr, "SendFriendRequestForApplication"}, {10211, nullptr, "AddFacedFriendRequestForApplication"}, {10400, &IFriendService::GetBlockedUserListIds, "GetBlockedUserListIds"}, - {10420, nullptr, "IsBlockedUserListCacheAvailable"}, + {10420, &IFriendService::CheckBlockedUserListAvailability, "CheckBlockedUserListAvailability"}, {10421, nullptr, "EnsureBlockedUserListAvailable"}, {10500, nullptr, "GetProfileList"}, {10600, nullptr, "DeclareOpenOnlinePlaySession"}, @@ -206,6 +206,17 @@ private: rb.Push(true); } + void CheckBlockedUserListAvailability(HLERequestContext& ctx) { + IPC::RequestParser rp{ctx}; + const auto uuid{rp.PopRaw<Common::UUID>()}; + + LOG_WARNING(Service_Friend, "(STUBBED) called, uuid=0x{}", uuid.RawString()); + + IPC::ResponseBuilder rb{ctx, 3}; + rb.Push(ResultSuccess); + rb.Push(true); + } + KernelHelpers::ServiceContext service_context; Kernel::KEvent* completion_event; diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt index cf9266d54..b65b9f2a2 100644 --- a/src/video_core/CMakeLists.txt +++ b/src/video_core/CMakeLists.txt @@ -4,7 +4,7 @@ add_subdirectory(host_shaders) if(LIBVA_FOUND) - set_source_files_properties(host1x/codecs/codec.cpp + set_source_files_properties(host1x/ffmpeg/ffmpeg.cpp PROPERTIES COMPILE_DEFINITIONS LIBVA_FOUND=1) list(APPEND FFmpeg_LIBRARIES ${LIBVA_LIBRARIES}) endif() @@ -66,6 +66,8 @@ add_library(video_core STATIC host1x/codecs/vp9.cpp host1x/codecs/vp9.h host1x/codecs/vp9_types.h + host1x/ffmpeg/ffmpeg.cpp + host1x/ffmpeg/ffmpeg.h host1x/control.cpp host1x/control.h host1x/host1x.cpp diff --git a/src/video_core/host1x/codecs/codec.cpp b/src/video_core/host1x/codecs/codec.cpp index dbcf508e5..1030db681 100644 --- a/src/video_core/host1x/codecs/codec.cpp +++ b/src/video_core/host1x/codecs/codec.cpp @@ -1,11 +1,7 @@ // SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later -#include <algorithm> -#include <fstream> -#include <vector> #include "common/assert.h" -#include "common/scope_exit.h" #include "common/settings.h" #include "video_core/host1x/codecs/codec.h" #include "video_core/host1x/codecs/h264.h" @@ -14,242 +10,17 @@ #include "video_core/host1x/host1x.h" #include "video_core/memory_manager.h" -extern "C" { -#include <libavfilter/buffersink.h> -#include <libavfilter/buffersrc.h> -#include <libavutil/opt.h> -#ifdef LIBVA_FOUND -// for querying VAAPI driver information -#include <libavutil/hwcontext_vaapi.h> -#endif -} - namespace Tegra { -namespace { -constexpr AVPixelFormat PREFERRED_GPU_FMT = AV_PIX_FMT_NV12; -constexpr AVPixelFormat PREFERRED_CPU_FMT = AV_PIX_FMT_YUV420P; -constexpr std::array PREFERRED_GPU_DECODERS = { - AV_HWDEVICE_TYPE_CUDA, -#ifdef _WIN32 - AV_HWDEVICE_TYPE_D3D11VA, - AV_HWDEVICE_TYPE_DXVA2, -#elif defined(__unix__) - AV_HWDEVICE_TYPE_VAAPI, - AV_HWDEVICE_TYPE_VDPAU, -#endif - // last resort for Linux Flatpak (w/ NVIDIA) - AV_HWDEVICE_TYPE_VULKAN, -}; - -void AVPacketDeleter(AVPacket* ptr) { - av_packet_free(&ptr); -} - -using AVPacketPtr = std::unique_ptr<AVPacket, decltype(&AVPacketDeleter)>; - -AVPixelFormat GetGpuFormat(AVCodecContext* av_codec_ctx, const AVPixelFormat* pix_fmts) { - for (const AVPixelFormat* p = pix_fmts; *p != AV_PIX_FMT_NONE; ++p) { - if (*p == av_codec_ctx->pix_fmt) { - return av_codec_ctx->pix_fmt; - } - } - LOG_INFO(Service_NVDRV, "Could not find compatible GPU AV format, falling back to CPU"); - av_buffer_unref(&av_codec_ctx->hw_device_ctx); - av_codec_ctx->pix_fmt = PREFERRED_CPU_FMT; - return PREFERRED_CPU_FMT; -} - -// List all the currently available hwcontext in ffmpeg -std::vector<AVHWDeviceType> ListSupportedContexts() { - std::vector<AVHWDeviceType> contexts{}; - AVHWDeviceType current_device_type = AV_HWDEVICE_TYPE_NONE; - do { - current_device_type = av_hwdevice_iterate_types(current_device_type); - contexts.push_back(current_device_type); - } while (current_device_type != AV_HWDEVICE_TYPE_NONE); - return contexts; -} - -} // namespace - -void AVFrameDeleter(AVFrame* ptr) { - av_frame_free(&ptr); -} Codec::Codec(Host1x::Host1x& host1x_, const Host1x::NvdecCommon::NvdecRegisters& regs) : host1x(host1x_), state{regs}, h264_decoder(std::make_unique<Decoder::H264>(host1x)), vp8_decoder(std::make_unique<Decoder::VP8>(host1x)), vp9_decoder(std::make_unique<Decoder::VP9>(host1x)) {} -Codec::~Codec() { - if (!initialized) { - return; - } - // Free libav memory - avcodec_free_context(&av_codec_ctx); - av_buffer_unref(&av_gpu_decoder); - - if (filters_initialized) { - avfilter_graph_free(&av_filter_graph); - } -} - -bool Codec::CreateGpuAvDevice() { - static constexpr auto HW_CONFIG_METHOD = AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX; - static const auto supported_contexts = ListSupportedContexts(); - for (const auto& type : PREFERRED_GPU_DECODERS) { - if (std::none_of(supported_contexts.begin(), supported_contexts.end(), - [&type](const auto& context) { return context == type; })) { - LOG_DEBUG(Service_NVDRV, "{} explicitly unsupported", av_hwdevice_get_type_name(type)); - continue; - } - // Avoid memory leak from not cleaning up after av_hwdevice_ctx_create - av_buffer_unref(&av_gpu_decoder); - const int hwdevice_res = av_hwdevice_ctx_create(&av_gpu_decoder, type, nullptr, nullptr, 0); - if (hwdevice_res < 0) { - LOG_DEBUG(Service_NVDRV, "{} av_hwdevice_ctx_create failed {}", - av_hwdevice_get_type_name(type), hwdevice_res); - continue; - } -#ifdef LIBVA_FOUND - if (type == AV_HWDEVICE_TYPE_VAAPI) { - // we need to determine if this is an impersonated VAAPI driver - AVHWDeviceContext* hwctx = - static_cast<AVHWDeviceContext*>(static_cast<void*>(av_gpu_decoder->data)); - AVVAAPIDeviceContext* vactx = static_cast<AVVAAPIDeviceContext*>(hwctx->hwctx); - const char* vendor_name = vaQueryVendorString(vactx->display); - if (strstr(vendor_name, "VDPAU backend")) { - // VDPAU impersonated VAAPI impl's are super buggy, we need to skip them - LOG_DEBUG(Service_NVDRV, "Skipping vdapu impersonated VAAPI driver"); - continue; - } else { - // according to some user testing, certain vaapi driver (Intel?) could be buggy - // so let's log the driver name which may help the developers/supporters - LOG_DEBUG(Service_NVDRV, "Using VAAPI driver: {}", vendor_name); - } - } -#endif - for (int i = 0;; i++) { - const AVCodecHWConfig* config = avcodec_get_hw_config(av_codec, i); - if (!config) { - LOG_DEBUG(Service_NVDRV, "{} decoder does not support device type {}.", - av_codec->name, av_hwdevice_get_type_name(type)); - break; - } - if ((config->methods & HW_CONFIG_METHOD) != 0 && config->device_type == type) { - LOG_INFO(Service_NVDRV, "Using {} GPU decoder", av_hwdevice_get_type_name(type)); - av_codec_ctx->pix_fmt = config->pix_fmt; - return true; - } - } - } - return false; -} - -void Codec::InitializeAvCodecContext() { - av_codec_ctx = avcodec_alloc_context3(av_codec); - av_opt_set(av_codec_ctx->priv_data, "tune", "zerolatency", 0); - av_codec_ctx->thread_count = 0; - av_codec_ctx->thread_type &= ~FF_THREAD_FRAME; -} - -void Codec::InitializeGpuDecoder() { - if (!CreateGpuAvDevice()) { - av_buffer_unref(&av_gpu_decoder); - return; - } - auto* hw_device_ctx = av_buffer_ref(av_gpu_decoder); - ASSERT_MSG(hw_device_ctx, "av_buffer_ref failed"); - av_codec_ctx->hw_device_ctx = hw_device_ctx; - av_codec_ctx->get_format = GetGpuFormat; -} - -void Codec::InitializeAvFilters(AVFrame* frame) { - const AVFilter* buffer_src = avfilter_get_by_name("buffer"); - const AVFilter* buffer_sink = avfilter_get_by_name("buffersink"); - AVFilterInOut* inputs = avfilter_inout_alloc(); - AVFilterInOut* outputs = avfilter_inout_alloc(); - SCOPE_EXIT({ - avfilter_inout_free(&inputs); - avfilter_inout_free(&outputs); - }); - - // Don't know how to get the accurate time_base but it doesn't matter for yadif filter - // so just use 1/1 to make buffer filter happy - std::string args = fmt::format("video_size={}x{}:pix_fmt={}:time_base=1/1", frame->width, - frame->height, frame->format); - - av_filter_graph = avfilter_graph_alloc(); - int ret = avfilter_graph_create_filter(&av_filter_src_ctx, buffer_src, "in", args.c_str(), - nullptr, av_filter_graph); - if (ret < 0) { - LOG_ERROR(Service_NVDRV, "avfilter_graph_create_filter source error: {}", ret); - return; - } - - ret = avfilter_graph_create_filter(&av_filter_sink_ctx, buffer_sink, "out", nullptr, nullptr, - av_filter_graph); - if (ret < 0) { - LOG_ERROR(Service_NVDRV, "avfilter_graph_create_filter sink error: {}", ret); - return; - } - - inputs->name = av_strdup("out"); - inputs->filter_ctx = av_filter_sink_ctx; - inputs->pad_idx = 0; - inputs->next = nullptr; - - outputs->name = av_strdup("in"); - outputs->filter_ctx = av_filter_src_ctx; - outputs->pad_idx = 0; - outputs->next = nullptr; - - const char* description = "yadif=1:-1:0"; - ret = avfilter_graph_parse_ptr(av_filter_graph, description, &inputs, &outputs, nullptr); - if (ret < 0) { - LOG_ERROR(Service_NVDRV, "avfilter_graph_parse_ptr error: {}", ret); - return; - } - - ret = avfilter_graph_config(av_filter_graph, nullptr); - if (ret < 0) { - LOG_ERROR(Service_NVDRV, "avfilter_graph_config error: {}", ret); - return; - } - - filters_initialized = true; -} +Codec::~Codec() = default; void Codec::Initialize() { - const AVCodecID codec = [&] { - switch (current_codec) { - case Host1x::NvdecCommon::VideoCodec::H264: - return AV_CODEC_ID_H264; - case Host1x::NvdecCommon::VideoCodec::VP8: - return AV_CODEC_ID_VP8; - case Host1x::NvdecCommon::VideoCodec::VP9: - return AV_CODEC_ID_VP9; - default: - UNIMPLEMENTED_MSG("Unknown codec {}", current_codec); - return AV_CODEC_ID_NONE; - } - }(); - av_codec = avcodec_find_decoder(codec); - - InitializeAvCodecContext(); - if (Settings::values.nvdec_emulation.GetValue() == Settings::NvdecEmulation::Gpu) { - InitializeGpuDecoder(); - } - if (const int res = avcodec_open2(av_codec_ctx, av_codec, nullptr); res < 0) { - LOG_ERROR(Service_NVDRV, "avcodec_open2() Failed with result {}", res); - avcodec_free_context(&av_codec_ctx); - av_buffer_unref(&av_gpu_decoder); - return; - } - if (!av_codec_ctx->hw_device_ctx) { - LOG_INFO(Service_NVDRV, "Using FFmpeg software decoding"); - } - initialized = true; + initialized = decode_api.Initialize(current_codec); } void Codec::SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec) { @@ -264,14 +35,18 @@ void Codec::Decode() { if (is_first_frame) { Initialize(); } + if (!initialized) { return; } + + // Assemble bitstream. bool vp9_hidden_frame = false; - const auto& frame_data = [&]() { + size_t configuration_size = 0; + const auto packet_data = [&]() { switch (current_codec) { case Tegra::Host1x::NvdecCommon::VideoCodec::H264: - return h264_decoder->ComposeFrame(state, is_first_frame); + return h264_decoder->ComposeFrame(state, &configuration_size, is_first_frame); case Tegra::Host1x::NvdecCommon::VideoCodec::VP8: return vp8_decoder->ComposeFrame(state); case Tegra::Host1x::NvdecCommon::VideoCodec::VP9: @@ -283,89 +58,35 @@ void Codec::Decode() { return std::span<const u8>{}; } }(); - AVPacketPtr packet{av_packet_alloc(), AVPacketDeleter}; - if (!packet) { - LOG_ERROR(Service_NVDRV, "av_packet_alloc failed"); - return; - } - packet->data = const_cast<u8*>(frame_data.data()); - packet->size = static_cast<s32>(frame_data.size()); - if (const int res = avcodec_send_packet(av_codec_ctx, packet.get()); res != 0) { - LOG_DEBUG(Service_NVDRV, "avcodec_send_packet error {}", res); + + // Send assembled bitstream to decoder. + if (!decode_api.SendPacket(packet_data, configuration_size)) { return; } - // Only receive/store visible frames + + // Only receive/store visible frames. if (vp9_hidden_frame) { return; } - AVFramePtr initial_frame{av_frame_alloc(), AVFrameDeleter}; - AVFramePtr final_frame{nullptr, AVFrameDeleter}; - ASSERT_MSG(initial_frame, "av_frame_alloc initial_frame failed"); - if (const int ret = avcodec_receive_frame(av_codec_ctx, initial_frame.get()); ret) { - LOG_DEBUG(Service_NVDRV, "avcodec_receive_frame error {}", ret); - return; - } - if (initial_frame->width == 0 || initial_frame->height == 0) { - LOG_WARNING(Service_NVDRV, "Zero width or height in frame"); - return; - } - bool is_interlaced = initial_frame->interlaced_frame != 0; - if (av_codec_ctx->hw_device_ctx) { - final_frame = AVFramePtr{av_frame_alloc(), AVFrameDeleter}; - ASSERT_MSG(final_frame, "av_frame_alloc final_frame failed"); - // Can't use AV_PIX_FMT_YUV420P and share code with software decoding in vic.cpp - // because Intel drivers crash unless using AV_PIX_FMT_NV12 - final_frame->format = PREFERRED_GPU_FMT; - const int ret = av_hwframe_transfer_data(final_frame.get(), initial_frame.get(), 0); - ASSERT_MSG(!ret, "av_hwframe_transfer_data error {}", ret); - } else { - final_frame = std::move(initial_frame); - } - if (final_frame->format != PREFERRED_CPU_FMT && final_frame->format != PREFERRED_GPU_FMT) { - UNIMPLEMENTED_MSG("Unexpected video format: {}", final_frame->format); - return; - } - if (!is_interlaced) { - av_frames.push(std::move(final_frame)); - } else { - if (!filters_initialized) { - InitializeAvFilters(final_frame.get()); - } - if (const int ret = av_buffersrc_add_frame_flags(av_filter_src_ctx, final_frame.get(), - AV_BUFFERSRC_FLAG_KEEP_REF); - ret) { - LOG_DEBUG(Service_NVDRV, "av_buffersrc_add_frame_flags error {}", ret); - return; - } - while (true) { - auto filter_frame = AVFramePtr{av_frame_alloc(), AVFrameDeleter}; - int ret = av_buffersink_get_frame(av_filter_sink_ctx, filter_frame.get()); + // Receive output frames from decoder. + decode_api.ReceiveFrames(frames); - if (ret == AVERROR(EAGAIN) || ret == AVERROR(AVERROR_EOF)) - break; - if (ret < 0) { - LOG_DEBUG(Service_NVDRV, "av_buffersink_get_frame error {}", ret); - return; - } - - av_frames.push(std::move(filter_frame)); - } - } - while (av_frames.size() > 10) { - LOG_TRACE(Service_NVDRV, "av_frames.push overflow dropped frame"); - av_frames.pop(); + while (frames.size() > 10) { + LOG_DEBUG(HW_GPU, "ReceiveFrames overflow, dropped frame"); + frames.pop(); } } -AVFramePtr Codec::GetCurrentFrame() { +std::unique_ptr<FFmpeg::Frame> Codec::GetCurrentFrame() { // Sometimes VIC will request more frames than have been decoded. - // in this case, return a nullptr and don't overwrite previous frame data - if (av_frames.empty()) { - return AVFramePtr{nullptr, AVFrameDeleter}; + // in this case, return a blank frame and don't overwrite previous data. + if (frames.empty()) { + return {}; } - AVFramePtr frame = std::move(av_frames.front()); - av_frames.pop(); + + auto frame = std::move(frames.front()); + frames.pop(); return frame; } diff --git a/src/video_core/host1x/codecs/codec.h b/src/video_core/host1x/codecs/codec.h index 06fe00a4b..f700ae129 100644 --- a/src/video_core/host1x/codecs/codec.h +++ b/src/video_core/host1x/codecs/codec.h @@ -4,28 +4,15 @@ #pragma once #include <memory> +#include <optional> #include <string_view> #include <queue> #include "common/common_types.h" +#include "video_core/host1x/ffmpeg/ffmpeg.h" #include "video_core/host1x/nvdec_common.h" -extern "C" { -#if defined(__GNUC__) || defined(__clang__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif -#include <libavcodec/avcodec.h> -#include <libavfilter/avfilter.h> -#if defined(__GNUC__) || defined(__clang__) -#pragma GCC diagnostic pop -#endif -} - namespace Tegra { -void AVFrameDeleter(AVFrame* ptr); -using AVFramePtr = std::unique_ptr<AVFrame, decltype(&AVFrameDeleter)>; - namespace Decoder { class H264; class VP8; @@ -51,7 +38,7 @@ public: void Decode(); /// Returns next decoded frame - [[nodiscard]] AVFramePtr GetCurrentFrame(); + [[nodiscard]] std::unique_ptr<FFmpeg::Frame> GetCurrentFrame(); /// Returns the value of current_codec [[nodiscard]] Host1x::NvdecCommon::VideoCodec GetCurrentCodec() const; @@ -60,25 +47,9 @@ public: [[nodiscard]] std::string_view GetCurrentCodecName() const; private: - void InitializeAvCodecContext(); - - void InitializeAvFilters(AVFrame* frame); - - void InitializeGpuDecoder(); - - bool CreateGpuAvDevice(); - bool initialized{}; - bool filters_initialized{}; Host1x::NvdecCommon::VideoCodec current_codec{Host1x::NvdecCommon::VideoCodec::None}; - - const AVCodec* av_codec{nullptr}; - AVCodecContext* av_codec_ctx{nullptr}; - AVBufferRef* av_gpu_decoder{nullptr}; - - AVFilterContext* av_filter_src_ctx{nullptr}; - AVFilterContext* av_filter_sink_ctx{nullptr}; - AVFilterGraph* av_filter_graph{nullptr}; + FFmpeg::DecodeApi decode_api; Host1x::Host1x& host1x; const Host1x::NvdecCommon::NvdecRegisters& state; @@ -86,7 +57,7 @@ private: std::unique_ptr<Decoder::VP8> vp8_decoder; std::unique_ptr<Decoder::VP9> vp9_decoder; - std::queue<AVFramePtr> av_frames{}; + std::queue<std::unique_ptr<FFmpeg::Frame>> frames{}; }; } // namespace Tegra diff --git a/src/video_core/host1x/codecs/h264.cpp b/src/video_core/host1x/codecs/h264.cpp index ece79b1e2..309a7f1d5 100644 --- a/src/video_core/host1x/codecs/h264.cpp +++ b/src/video_core/host1x/codecs/h264.cpp @@ -30,7 +30,7 @@ H264::H264(Host1x::Host1x& host1x_) : host1x{host1x_} {} H264::~H264() = default; std::span<const u8> H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state, - bool is_first_frame) { + size_t* out_configuration_size, bool is_first_frame) { H264DecoderContext context; host1x.MemoryManager().ReadBlock(state.picture_info_offset, &context, sizeof(H264DecoderContext)); @@ -39,6 +39,7 @@ std::span<const u8> H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters if (!is_first_frame && frame_number != 0) { frame.resize_destructive(context.stream_len); host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size()); + *out_configuration_size = 0; return frame; } @@ -157,6 +158,7 @@ std::span<const u8> H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters frame.resize(encoded_header.size() + context.stream_len); std::memcpy(frame.data(), encoded_header.data(), encoded_header.size()); + *out_configuration_size = encoded_header.size(); host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data() + encoded_header.size(), context.stream_len); diff --git a/src/video_core/host1x/codecs/h264.h b/src/video_core/host1x/codecs/h264.h index d6b556322..1deaf4632 100644 --- a/src/video_core/host1x/codecs/h264.h +++ b/src/video_core/host1x/codecs/h264.h @@ -67,6 +67,7 @@ public: /// Compose the H264 frame for FFmpeg decoding [[nodiscard]] std::span<const u8> ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state, + size_t* out_configuration_size, bool is_first_frame = false); private: diff --git a/src/video_core/host1x/ffmpeg/ffmpeg.cpp b/src/video_core/host1x/ffmpeg/ffmpeg.cpp new file mode 100644 index 000000000..dcd07e6d2 --- /dev/null +++ b/src/video_core/host1x/ffmpeg/ffmpeg.cpp @@ -0,0 +1,419 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "common/assert.h" +#include "common/logging/log.h" +#include "common/scope_exit.h" +#include "common/settings.h" +#include "video_core/host1x/ffmpeg/ffmpeg.h" + +extern "C" { +#ifdef LIBVA_FOUND +// for querying VAAPI driver information +#include <libavutil/hwcontext_vaapi.h> +#endif +} + +namespace FFmpeg { + +namespace { + +constexpr AVPixelFormat PreferredGpuFormat = AV_PIX_FMT_NV12; +constexpr AVPixelFormat PreferredCpuFormat = AV_PIX_FMT_YUV420P; +constexpr std::array PreferredGpuDecoders = { + AV_HWDEVICE_TYPE_CUDA, +#ifdef _WIN32 + AV_HWDEVICE_TYPE_D3D11VA, + AV_HWDEVICE_TYPE_DXVA2, +#elif defined(__unix__) + AV_HWDEVICE_TYPE_VAAPI, + AV_HWDEVICE_TYPE_VDPAU, +#endif + // last resort for Linux Flatpak (w/ NVIDIA) + AV_HWDEVICE_TYPE_VULKAN, +}; + +AVPixelFormat GetGpuFormat(AVCodecContext* codec_context, const AVPixelFormat* pix_fmts) { + for (const AVPixelFormat* p = pix_fmts; *p != AV_PIX_FMT_NONE; ++p) { + if (*p == codec_context->pix_fmt) { + return codec_context->pix_fmt; + } + } + + LOG_INFO(HW_GPU, "Could not find compatible GPU AV format, falling back to CPU"); + av_buffer_unref(&codec_context->hw_device_ctx); + + codec_context->pix_fmt = PreferredCpuFormat; + return codec_context->pix_fmt; +} + +std::string AVError(int errnum) { + char errbuf[AV_ERROR_MAX_STRING_SIZE] = {}; + av_make_error_string(errbuf, sizeof(errbuf) - 1, errnum); + return errbuf; +} + +} // namespace + +Packet::Packet(std::span<const u8> data) { + m_packet = av_packet_alloc(); + m_packet->data = const_cast<u8*>(data.data()); + m_packet->size = static_cast<s32>(data.size()); +} + +Packet::~Packet() { + av_packet_free(&m_packet); +} + +Frame::Frame() { + m_frame = av_frame_alloc(); +} + +Frame::~Frame() { + av_frame_free(&m_frame); +} + +Decoder::Decoder(Tegra::Host1x::NvdecCommon::VideoCodec codec) { + const AVCodecID av_codec = [&] { + switch (codec) { + case Tegra::Host1x::NvdecCommon::VideoCodec::H264: + return AV_CODEC_ID_H264; + case Tegra::Host1x::NvdecCommon::VideoCodec::VP8: + return AV_CODEC_ID_VP8; + case Tegra::Host1x::NvdecCommon::VideoCodec::VP9: + return AV_CODEC_ID_VP9; + default: + UNIMPLEMENTED_MSG("Unknown codec {}", codec); + return AV_CODEC_ID_NONE; + } + }(); + + m_codec = avcodec_find_decoder(av_codec); +} + +bool Decoder::SupportsDecodingOnDevice(AVPixelFormat* out_pix_fmt, AVHWDeviceType type) const { + for (int i = 0;; i++) { + const AVCodecHWConfig* config = avcodec_get_hw_config(m_codec, i); + if (!config) { + LOG_DEBUG(HW_GPU, "{} decoder does not support device type {}", m_codec->name, + av_hwdevice_get_type_name(type)); + break; + } + if ((config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX) != 0 && + config->device_type == type) { + LOG_INFO(HW_GPU, "Using {} GPU decoder", av_hwdevice_get_type_name(type)); + *out_pix_fmt = config->pix_fmt; + return true; + } + } + + return false; +} + +std::vector<AVHWDeviceType> HardwareContext::GetSupportedDeviceTypes() { + std::vector<AVHWDeviceType> types; + AVHWDeviceType current_device_type = AV_HWDEVICE_TYPE_NONE; + + while (true) { + current_device_type = av_hwdevice_iterate_types(current_device_type); + if (current_device_type == AV_HWDEVICE_TYPE_NONE) { + return types; + } + + types.push_back(current_device_type); + } +} + +HardwareContext::~HardwareContext() { + av_buffer_unref(&m_gpu_decoder); +} + +bool HardwareContext::InitializeForDecoder(DecoderContext& decoder_context, + const Decoder& decoder) { + const auto supported_types = GetSupportedDeviceTypes(); + for (const auto type : PreferredGpuDecoders) { + AVPixelFormat hw_pix_fmt; + + if (std::ranges::find(supported_types, type) == supported_types.end()) { + LOG_DEBUG(HW_GPU, "{} explicitly unsupported", av_hwdevice_get_type_name(type)); + continue; + } + + if (!this->InitializeWithType(type)) { + continue; + } + + if (decoder.SupportsDecodingOnDevice(&hw_pix_fmt, type)) { + decoder_context.InitializeHardwareDecoder(*this, hw_pix_fmt); + return true; + } + } + + return false; +} + +bool HardwareContext::InitializeWithType(AVHWDeviceType type) { + av_buffer_unref(&m_gpu_decoder); + + if (const int ret = av_hwdevice_ctx_create(&m_gpu_decoder, type, nullptr, nullptr, 0); + ret < 0) { + LOG_DEBUG(HW_GPU, "av_hwdevice_ctx_create({}) failed: {}", av_hwdevice_get_type_name(type), + AVError(ret)); + return false; + } + +#ifdef LIBVA_FOUND + if (type == AV_HWDEVICE_TYPE_VAAPI) { + // We need to determine if this is an impersonated VAAPI driver. + auto* hwctx = reinterpret_cast<AVHWDeviceContext*>(m_gpu_decoder->data); + auto* vactx = static_cast<AVVAAPIDeviceContext*>(hwctx->hwctx); + const char* vendor_name = vaQueryVendorString(vactx->display); + if (strstr(vendor_name, "VDPAU backend")) { + // VDPAU impersonated VAAPI impls are super buggy, we need to skip them. + LOG_DEBUG(HW_GPU, "Skipping VDPAU impersonated VAAPI driver"); + return false; + } else { + // According to some user testing, certain VAAPI drivers (Intel?) could be buggy. + // Log the driver name just in case. + LOG_DEBUG(HW_GPU, "Using VAAPI driver: {}", vendor_name); + } + } +#endif + + return true; +} + +DecoderContext::DecoderContext(const Decoder& decoder) { + m_codec_context = avcodec_alloc_context3(decoder.GetCodec()); + av_opt_set(m_codec_context->priv_data, "tune", "zerolatency", 0); + m_codec_context->thread_count = 0; + m_codec_context->thread_type &= ~FF_THREAD_FRAME; +} + +DecoderContext::~DecoderContext() { + av_buffer_unref(&m_codec_context->hw_device_ctx); + avcodec_free_context(&m_codec_context); +} + +void DecoderContext::InitializeHardwareDecoder(const HardwareContext& context, + AVPixelFormat hw_pix_fmt) { + m_codec_context->hw_device_ctx = av_buffer_ref(context.GetBufferRef()); + m_codec_context->get_format = GetGpuFormat; + m_codec_context->pix_fmt = hw_pix_fmt; +} + +bool DecoderContext::OpenContext(const Decoder& decoder) { + if (const int ret = avcodec_open2(m_codec_context, decoder.GetCodec(), nullptr); ret < 0) { + LOG_ERROR(HW_GPU, "avcodec_open2 error: {}", AVError(ret)); + return false; + } + + if (!m_codec_context->hw_device_ctx) { + LOG_INFO(HW_GPU, "Using FFmpeg software decoding"); + } + + return true; +} + +bool DecoderContext::SendPacket(const Packet& packet) { + if (const int ret = avcodec_send_packet(m_codec_context, packet.GetPacket()); ret < 0) { + LOG_ERROR(HW_GPU, "avcodec_send_packet error: {}", AVError(ret)); + return false; + } + + return true; +} + +std::unique_ptr<Frame> DecoderContext::ReceiveFrame(bool* out_is_interlaced) { + auto dst_frame = std::make_unique<Frame>(); + + const auto ReceiveImpl = [&](AVFrame* frame) { + if (const int ret = avcodec_receive_frame(m_codec_context, frame); ret < 0) { + LOG_ERROR(HW_GPU, "avcodec_receive_frame error: {}", AVError(ret)); + return false; + } + + *out_is_interlaced = frame->interlaced_frame != 0; + return true; + }; + + if (m_codec_context->hw_device_ctx) { + // If we have a hardware context, make a separate frame here to receive the + // hardware result before sending it to the output. + Frame intermediate_frame; + + if (!ReceiveImpl(intermediate_frame.GetFrame())) { + return {}; + } + + dst_frame->SetFormat(PreferredGpuFormat); + if (const int ret = + av_hwframe_transfer_data(dst_frame->GetFrame(), intermediate_frame.GetFrame(), 0); + ret < 0) { + LOG_ERROR(HW_GPU, "av_hwframe_transfer_data error: {}", AVError(ret)); + return {}; + } + } else { + // Otherwise, decode the frame as normal. + if (!ReceiveImpl(dst_frame->GetFrame())) { + return {}; + } + } + + return dst_frame; +} + +DeinterlaceFilter::DeinterlaceFilter(const Frame& frame) { + const AVFilter* buffer_src = avfilter_get_by_name("buffer"); + const AVFilter* buffer_sink = avfilter_get_by_name("buffersink"); + AVFilterInOut* inputs = avfilter_inout_alloc(); + AVFilterInOut* outputs = avfilter_inout_alloc(); + SCOPE_EXIT({ + avfilter_inout_free(&inputs); + avfilter_inout_free(&outputs); + }); + + // Don't know how to get the accurate time_base but it doesn't matter for yadif filter + // so just use 1/1 to make buffer filter happy + std::string args = fmt::format("video_size={}x{}:pix_fmt={}:time_base=1/1", frame.GetWidth(), + frame.GetHeight(), static_cast<int>(frame.GetPixelFormat())); + + m_filter_graph = avfilter_graph_alloc(); + int ret = avfilter_graph_create_filter(&m_source_context, buffer_src, "in", args.c_str(), + nullptr, m_filter_graph); + if (ret < 0) { + LOG_ERROR(HW_GPU, "avfilter_graph_create_filter source error: {}", AVError(ret)); + return; + } + + ret = avfilter_graph_create_filter(&m_sink_context, buffer_sink, "out", nullptr, nullptr, + m_filter_graph); + if (ret < 0) { + LOG_ERROR(HW_GPU, "avfilter_graph_create_filter sink error: {}", AVError(ret)); + return; + } + + inputs->name = av_strdup("out"); + inputs->filter_ctx = m_sink_context; + inputs->pad_idx = 0; + inputs->next = nullptr; + + outputs->name = av_strdup("in"); + outputs->filter_ctx = m_source_context; + outputs->pad_idx = 0; + outputs->next = nullptr; + + const char* description = "yadif=1:-1:0"; + ret = avfilter_graph_parse_ptr(m_filter_graph, description, &inputs, &outputs, nullptr); + if (ret < 0) { + LOG_ERROR(HW_GPU, "avfilter_graph_parse_ptr error: {}", AVError(ret)); + return; + } + + ret = avfilter_graph_config(m_filter_graph, nullptr); + if (ret < 0) { + LOG_ERROR(HW_GPU, "avfilter_graph_config error: {}", AVError(ret)); + return; + } + + m_initialized = true; +} + +bool DeinterlaceFilter::AddSourceFrame(const Frame& frame) { + if (const int ret = av_buffersrc_add_frame_flags(m_source_context, frame.GetFrame(), + AV_BUFFERSRC_FLAG_KEEP_REF); + ret < 0) { + LOG_ERROR(HW_GPU, "av_buffersrc_add_frame_flags error: {}", AVError(ret)); + return false; + } + + return true; +} + +std::unique_ptr<Frame> DeinterlaceFilter::DrainSinkFrame() { + auto dst_frame = std::make_unique<Frame>(); + const int ret = av_buffersink_get_frame(m_sink_context, dst_frame->GetFrame()); + + if (ret == AVERROR(EAGAIN) || ret == AVERROR(AVERROR_EOF)) { + return {}; + } + + if (ret < 0) { + LOG_ERROR(HW_GPU, "av_buffersink_get_frame error: {}", AVError(ret)); + return {}; + } + + return dst_frame; +} + +DeinterlaceFilter::~DeinterlaceFilter() { + avfilter_graph_free(&m_filter_graph); +} + +void DecodeApi::Reset() { + m_deinterlace_filter.reset(); + m_hardware_context.reset(); + m_decoder_context.reset(); + m_decoder.reset(); +} + +bool DecodeApi::Initialize(Tegra::Host1x::NvdecCommon::VideoCodec codec) { + this->Reset(); + m_decoder.emplace(codec); + m_decoder_context.emplace(*m_decoder); + + // Enable GPU decoding if requested. + if (Settings::values.nvdec_emulation.GetValue() == Settings::NvdecEmulation::Gpu) { + m_hardware_context.emplace(); + m_hardware_context->InitializeForDecoder(*m_decoder_context, *m_decoder); + } + + // Open the decoder context. + if (!m_decoder_context->OpenContext(*m_decoder)) { + this->Reset(); + return false; + } + + return true; +} + +bool DecodeApi::SendPacket(std::span<const u8> packet_data, size_t configuration_size) { + FFmpeg::Packet packet(packet_data); + return m_decoder_context->SendPacket(packet); +} + +void DecodeApi::ReceiveFrames(std::queue<std::unique_ptr<Frame>>& frame_queue) { + // Receive raw frame from decoder. + bool is_interlaced; + auto frame = m_decoder_context->ReceiveFrame(&is_interlaced); + if (!frame) { + return; + } + + if (!is_interlaced) { + // If the frame is not interlaced, we can pend it now. + frame_queue.push(std::move(frame)); + } else { + // Create the deinterlacer if needed. + if (!m_deinterlace_filter) { + m_deinterlace_filter.emplace(*frame); + } + + // Add the frame we just received. + if (!m_deinterlace_filter->AddSourceFrame(*frame)) { + return; + } + + // Pend output fields. + while (true) { + auto filter_frame = m_deinterlace_filter->DrainSinkFrame(); + if (!filter_frame) { + break; + } + + frame_queue.push(std::move(filter_frame)); + } + } +} + +} // namespace FFmpeg diff --git a/src/video_core/host1x/ffmpeg/ffmpeg.h b/src/video_core/host1x/ffmpeg/ffmpeg.h new file mode 100644 index 000000000..1de0bbd83 --- /dev/null +++ b/src/video_core/host1x/ffmpeg/ffmpeg.h @@ -0,0 +1,213 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include <memory> +#include <optional> +#include <span> +#include <vector> +#include <queue> + +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "video_core/host1x/nvdec_common.h" + +extern "C" { +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + +#include <libavcodec/avcodec.h> +#include <libavfilter/avfilter.h> +#include <libavfilter/buffersink.h> +#include <libavfilter/buffersrc.h> +#include <libavutil/avutil.h> +#include <libavutil/opt.h> + +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic pop +#endif +} + +namespace FFmpeg { + +class Packet; +class Frame; +class Decoder; +class HardwareContext; +class DecoderContext; +class DeinterlaceFilter; + +// Wraps an AVPacket, a container for compressed bitstream data. +class Packet { +public: + YUZU_NON_COPYABLE(Packet); + YUZU_NON_MOVEABLE(Packet); + + explicit Packet(std::span<const u8> data); + ~Packet(); + + AVPacket* GetPacket() const { + return m_packet; + } + +private: + AVPacket* m_packet{}; +}; + +// Wraps an AVFrame, a container for audio and video stream data. +class Frame { +public: + YUZU_NON_COPYABLE(Frame); + YUZU_NON_MOVEABLE(Frame); + + explicit Frame(); + ~Frame(); + + int GetWidth() const { + return m_frame->width; + } + + int GetHeight() const { + return m_frame->height; + } + + AVPixelFormat GetPixelFormat() const { + return static_cast<AVPixelFormat>(m_frame->format); + } + + int GetStride(int plane) const { + return m_frame->linesize[plane]; + } + + int* GetStrides() const { + return m_frame->linesize; + } + + u8* GetData(int plane) const { + return m_frame->data[plane]; + } + + u8** GetPlanes() const { + return m_frame->data; + } + + void SetFormat(int format) { + m_frame->format = format; + } + + AVFrame* GetFrame() const { + return m_frame; + } + +private: + AVFrame* m_frame{}; +}; + +// Wraps an AVCodec, a type containing information about a codec. +class Decoder { +public: + YUZU_NON_COPYABLE(Decoder); + YUZU_NON_MOVEABLE(Decoder); + + explicit Decoder(Tegra::Host1x::NvdecCommon::VideoCodec codec); + ~Decoder() = default; + + bool SupportsDecodingOnDevice(AVPixelFormat* out_pix_fmt, AVHWDeviceType type) const; + + const AVCodec* GetCodec() const { + return m_codec; + } + +private: + const AVCodec* m_codec{}; +}; + +// Wraps AVBufferRef for an accelerated decoder. +class HardwareContext { +public: + YUZU_NON_COPYABLE(HardwareContext); + YUZU_NON_MOVEABLE(HardwareContext); + + static std::vector<AVHWDeviceType> GetSupportedDeviceTypes(); + + explicit HardwareContext() = default; + ~HardwareContext(); + + bool InitializeForDecoder(DecoderContext& decoder_context, const Decoder& decoder); + + AVBufferRef* GetBufferRef() const { + return m_gpu_decoder; + } + +private: + bool InitializeWithType(AVHWDeviceType type); + + AVBufferRef* m_gpu_decoder{}; +}; + +// Wraps an AVCodecContext. +class DecoderContext { +public: + YUZU_NON_COPYABLE(DecoderContext); + YUZU_NON_MOVEABLE(DecoderContext); + + explicit DecoderContext(const Decoder& decoder); + ~DecoderContext(); + + void InitializeHardwareDecoder(const HardwareContext& context, AVPixelFormat hw_pix_fmt); + bool OpenContext(const Decoder& decoder); + bool SendPacket(const Packet& packet); + std::unique_ptr<Frame> ReceiveFrame(bool* out_is_interlaced); + + AVCodecContext* GetCodecContext() const { + return m_codec_context; + } + +private: + AVCodecContext* m_codec_context{}; +}; + +// Wraps an AVFilterGraph. +class DeinterlaceFilter { +public: + YUZU_NON_COPYABLE(DeinterlaceFilter); + YUZU_NON_MOVEABLE(DeinterlaceFilter); + + explicit DeinterlaceFilter(const Frame& frame); + ~DeinterlaceFilter(); + + bool AddSourceFrame(const Frame& frame); + std::unique_ptr<Frame> DrainSinkFrame(); + +private: + AVFilterGraph* m_filter_graph{}; + AVFilterContext* m_source_context{}; + AVFilterContext* m_sink_context{}; + bool m_initialized{}; +}; + +class DecodeApi { +public: + YUZU_NON_COPYABLE(DecodeApi); + YUZU_NON_MOVEABLE(DecodeApi); + + DecodeApi() = default; + ~DecodeApi() = default; + + bool Initialize(Tegra::Host1x::NvdecCommon::VideoCodec codec); + void Reset(); + + bool SendPacket(std::span<const u8> packet_data, size_t configuration_size); + void ReceiveFrames(std::queue<std::unique_ptr<Frame>>& frame_queue); + +private: + std::optional<FFmpeg::Decoder> m_decoder; + std::optional<FFmpeg::DecoderContext> m_decoder_context; + std::optional<FFmpeg::HardwareContext> m_hardware_context; + std::optional<FFmpeg::DeinterlaceFilter> m_deinterlace_filter; +}; + +} // namespace FFmpeg diff --git a/src/video_core/host1x/nvdec.cpp b/src/video_core/host1x/nvdec.cpp index a4bd5b79f..b8f5866d3 100644 --- a/src/video_core/host1x/nvdec.cpp +++ b/src/video_core/host1x/nvdec.cpp @@ -28,7 +28,7 @@ void Nvdec::ProcessMethod(u32 method, u32 argument) { } } -AVFramePtr Nvdec::GetFrame() { +std::unique_ptr<FFmpeg::Frame> Nvdec::GetFrame() { return codec->GetCurrentFrame(); } diff --git a/src/video_core/host1x/nvdec.h b/src/video_core/host1x/nvdec.h index 3949d5181..ddddb8d28 100644 --- a/src/video_core/host1x/nvdec.h +++ b/src/video_core/host1x/nvdec.h @@ -23,7 +23,7 @@ public: void ProcessMethod(u32 method, u32 argument); /// Return most recently decoded frame - [[nodiscard]] AVFramePtr GetFrame(); + [[nodiscard]] std::unique_ptr<FFmpeg::Frame> GetFrame(); private: /// Invoke codec to decode a frame diff --git a/src/video_core/host1x/vic.cpp b/src/video_core/host1x/vic.cpp index 10d7ef884..2a5eba415 100644 --- a/src/video_core/host1x/vic.cpp +++ b/src/video_core/host1x/vic.cpp @@ -82,27 +82,26 @@ void Vic::Execute() { return; } const VicConfig config{host1x.MemoryManager().Read<u64>(config_struct_address + 0x20)}; - const AVFramePtr frame_ptr = nvdec_processor->GetFrame(); - const auto* frame = frame_ptr.get(); + auto frame = nvdec_processor->GetFrame(); if (!frame) { return; } const u64 surface_width = config.surface_width_minus1 + 1; const u64 surface_height = config.surface_height_minus1 + 1; - if (static_cast<u64>(frame->width) != surface_width || - static_cast<u64>(frame->height) != surface_height) { + if (static_cast<u64>(frame->GetWidth()) != surface_width || + static_cast<u64>(frame->GetHeight()) != surface_height) { // TODO: Properly support multiple video streams with differing frame dimensions LOG_WARNING(Service_NVDRV, "Frame dimensions {}x{} don't match surface dimensions {}x{}", - frame->width, frame->height, surface_width, surface_height); + frame->GetWidth(), frame->GetHeight(), surface_width, surface_height); } switch (config.pixel_format) { case VideoPixelFormat::RGBA8: case VideoPixelFormat::BGRA8: case VideoPixelFormat::RGBX8: - WriteRGBFrame(frame, config); + WriteRGBFrame(std::move(frame), config); break; case VideoPixelFormat::YUV420: - WriteYUVFrame(frame, config); + WriteYUVFrame(std::move(frame), config); break; default: UNIMPLEMENTED_MSG("Unknown video pixel format {:X}", config.pixel_format.Value()); @@ -110,10 +109,14 @@ void Vic::Execute() { } } -void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) { +void Vic::WriteRGBFrame(std::unique_ptr<FFmpeg::Frame> frame, const VicConfig& config) { LOG_TRACE(Service_NVDRV, "Writing RGB Frame"); - if (!scaler_ctx || frame->width != scaler_width || frame->height != scaler_height) { + const auto frame_width = frame->GetWidth(); + const auto frame_height = frame->GetHeight(); + const auto frame_format = frame->GetPixelFormat(); + + if (!scaler_ctx || frame_width != scaler_width || frame_height != scaler_height) { const AVPixelFormat target_format = [pixel_format = config.pixel_format]() { switch (pixel_format) { case VideoPixelFormat::RGBA8: @@ -129,27 +132,26 @@ void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) { sws_freeContext(scaler_ctx); // Frames are decoded into either YUV420 or NV12 formats. Convert to desired RGB format - scaler_ctx = sws_getContext(frame->width, frame->height, - static_cast<AVPixelFormat>(frame->format), frame->width, - frame->height, target_format, 0, nullptr, nullptr, nullptr); - scaler_width = frame->width; - scaler_height = frame->height; + scaler_ctx = sws_getContext(frame_width, frame_height, frame_format, frame_width, + frame_height, target_format, 0, nullptr, nullptr, nullptr); + scaler_width = frame_width; + scaler_height = frame_height; converted_frame_buffer.reset(); } if (!converted_frame_buffer) { - const size_t frame_size = frame->width * frame->height * 4; + const size_t frame_size = frame_width * frame_height * 4; converted_frame_buffer = AVMallocPtr{static_cast<u8*>(av_malloc(frame_size)), av_free}; } - const std::array<int, 4> converted_stride{frame->width * 4, frame->height * 4, 0, 0}; + const std::array<int, 4> converted_stride{frame_width * 4, frame_height * 4, 0, 0}; u8* const converted_frame_buf_addr{converted_frame_buffer.get()}; - sws_scale(scaler_ctx, frame->data, frame->linesize, 0, frame->height, &converted_frame_buf_addr, - converted_stride.data()); + sws_scale(scaler_ctx, frame->GetPlanes(), frame->GetStrides(), 0, frame_height, + &converted_frame_buf_addr, converted_stride.data()); // Use the minimum of surface/frame dimensions to avoid buffer overflow. const u32 surface_width = static_cast<u32>(config.surface_width_minus1) + 1; const u32 surface_height = static_cast<u32>(config.surface_height_minus1) + 1; - const u32 width = std::min(surface_width, static_cast<u32>(frame->width)); - const u32 height = std::min(surface_height, static_cast<u32>(frame->height)); + const u32 width = std::min(surface_width, static_cast<u32>(frame_width)); + const u32 height = std::min(surface_height, static_cast<u32>(frame_height)); const u32 blk_kind = static_cast<u32>(config.block_linear_kind); if (blk_kind != 0) { // swizzle pitch linear to block linear @@ -169,23 +171,23 @@ void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) { } } -void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) { +void Vic::WriteYUVFrame(std::unique_ptr<FFmpeg::Frame> frame, const VicConfig& config) { LOG_TRACE(Service_NVDRV, "Writing YUV420 Frame"); const std::size_t surface_width = config.surface_width_minus1 + 1; const std::size_t surface_height = config.surface_height_minus1 + 1; const std::size_t aligned_width = (surface_width + 0xff) & ~0xffUL; // Use the minimum of surface/frame dimensions to avoid buffer overflow. - const auto frame_width = std::min(surface_width, static_cast<size_t>(frame->width)); - const auto frame_height = std::min(surface_height, static_cast<size_t>(frame->height)); + const auto frame_width = std::min(surface_width, static_cast<size_t>(frame->GetWidth())); + const auto frame_height = std::min(surface_height, static_cast<size_t>(frame->GetHeight())); - const auto stride = static_cast<size_t>(frame->linesize[0]); + const auto stride = static_cast<size_t>(frame->GetStride(0)); luma_buffer.resize_destructive(aligned_width * surface_height); chroma_buffer.resize_destructive(aligned_width * surface_height / 2); // Populate luma buffer - const u8* luma_src = frame->data[0]; + const u8* luma_src = frame->GetData(0); for (std::size_t y = 0; y < frame_height; ++y) { const std::size_t src = y * stride; const std::size_t dst = y * aligned_width; @@ -196,16 +198,16 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) { // Chroma const std::size_t half_height = frame_height / 2; - const auto half_stride = static_cast<size_t>(frame->linesize[1]); + const auto half_stride = static_cast<size_t>(frame->GetStride(1)); - switch (frame->format) { + switch (frame->GetPixelFormat()) { case AV_PIX_FMT_YUV420P: { // Frame from FFmpeg software // Populate chroma buffer from both channels with interleaving. const std::size_t half_width = frame_width / 2; u8* chroma_buffer_data = chroma_buffer.data(); - const u8* chroma_b_src = frame->data[1]; - const u8* chroma_r_src = frame->data[2]; + const u8* chroma_b_src = frame->GetData(1); + const u8* chroma_r_src = frame->GetData(2); for (std::size_t y = 0; y < half_height; ++y) { const std::size_t src = y * half_stride; const std::size_t dst = y * aligned_width; @@ -219,7 +221,7 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) { case AV_PIX_FMT_NV12: { // Frame from VA-API hardware // This is already interleaved so just copy - const u8* chroma_src = frame->data[1]; + const u8* chroma_src = frame->GetData(1); for (std::size_t y = 0; y < half_height; ++y) { const std::size_t src = y * stride; const std::size_t dst = y * aligned_width; diff --git a/src/video_core/host1x/vic.h b/src/video_core/host1x/vic.h index 3d9753047..6c868f062 100644 --- a/src/video_core/host1x/vic.h +++ b/src/video_core/host1x/vic.h @@ -39,9 +39,9 @@ public: private: void Execute(); - void WriteRGBFrame(const AVFrame* frame, const VicConfig& config); + void WriteRGBFrame(std::unique_ptr<FFmpeg::Frame> frame, const VicConfig& config); - void WriteYUVFrame(const AVFrame* frame, const VicConfig& config); + void WriteYUVFrame(std::unique_ptr<FFmpeg::Frame> frame, const VicConfig& config); Host1x& host1x; std::shared_ptr<Tegra::Host1x::Nvdec> nvdec_processor; diff --git a/src/yuzu/CMakeLists.txt b/src/yuzu/CMakeLists.txt index 33e1fb663..181b2817c 100644 --- a/src/yuzu/CMakeLists.txt +++ b/src/yuzu/CMakeLists.txt @@ -252,6 +252,7 @@ file(GLOB_RECURSE THEMES ${PROJECT_SOURCE_DIR}/dist/qt_themes/*) if (ENABLE_QT_TRANSLATION) set(YUZU_QT_LANGUAGES "${PROJECT_SOURCE_DIR}/dist/languages" CACHE PATH "Path to the translation bundle for the Qt frontend") option(GENERATE_QT_TRANSLATION "Generate en.ts as the translation source file" OFF) + option(WORKAROUND_BROKEN_LUPDATE "Run lupdate directly through CMake if Qt's convenience wrappers don't work" OFF) # Update source TS file if enabled if (GENERATE_QT_TRANSLATION) @@ -259,19 +260,51 @@ if (ENABLE_QT_TRANSLATION) # these calls to qt_create_translation also creates a rule to generate en.qm which conflicts with providing english plurals # so we have to set a OUTPUT_LOCATION so that we don't have multiple rules to generate en.qm set_source_files_properties(${YUZU_QT_LANGUAGES}/en.ts PROPERTIES OUTPUT_LOCATION "${CMAKE_CURRENT_BINARY_DIR}/translations") - qt_create_translation(QM_FILES - ${SRCS} - ${UIS} - ${YUZU_QT_LANGUAGES}/en.ts - OPTIONS - -source-language en_US - -target-language en_US - ) + if (WORKAROUND_BROKEN_LUPDATE) + add_custom_command(OUTPUT ${YUZU_QT_LANGUAGES}/en.ts + COMMAND lupdate + -source-language en_US + -target-language en_US + ${SRCS} + ${UIS} + -ts ${YUZU_QT_LANGUAGES}/en.ts + DEPENDS + ${SRCS} + ${UIS} + WORKING_DIRECTORY + ${CMAKE_CURRENT_SOURCE_DIR} + ) + else() + qt_create_translation(QM_FILES + ${SRCS} + ${UIS} + ${YUZU_QT_LANGUAGES}/en.ts + OPTIONS + -source-language en_US + -target-language en_US + ) + endif() # Generate plurals into dist/english_plurals/generated_en.ts so it can be used to revise dist/english_plurals/en.ts set(GENERATED_PLURALS_FILE ${PROJECT_SOURCE_DIR}/dist/english_plurals/generated_en.ts) set_source_files_properties(${GENERATED_PLURALS_FILE} PROPERTIES OUTPUT_LOCATION "${CMAKE_CURRENT_BINARY_DIR}/plurals") - qt_create_translation(QM_FILES ${SRCS} ${UIS} ${GENERATED_PLURALS_FILE} OPTIONS -pluralonly -source-language en_US -target-language en_US) + if (WORKAROUND_BROKEN_LUPDATE) + add_custom_command(OUTPUT ${GENERATED_PLURALS_FILE} + COMMAND lupdate + -source-language en_US + -target-language en_US + ${SRCS} + ${UIS} + -ts ${GENERATED_PLURALS_FILE} + DEPENDS + ${SRCS} + ${UIS} + WORKING_DIRECTORY + ${CMAKE_CURRENT_SOURCE_DIR} + ) + else() + qt_create_translation(QM_FILES ${SRCS} ${UIS} ${GENERATED_PLURALS_FILE} OPTIONS -pluralonly -source-language en_US -target-language en_US) + endif() add_custom_target(translation ALL DEPENDS ${YUZU_QT_LANGUAGES}/en.ts ${GENERATED_PLURALS_FILE}) endif() diff --git a/src/yuzu/configuration/shared_translation.cpp b/src/yuzu/configuration/shared_translation.cpp index 1434b1a56..a7b5def32 100644 --- a/src/yuzu/configuration/shared_translation.cpp +++ b/src/yuzu/configuration/shared_translation.cpp @@ -1,17 +1,18 @@ // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later -#include "common/time_zone.h" #include "yuzu/configuration/shared_translation.h" #include <map> #include <memory> #include <tuple> #include <utility> +#include <QCoreApplication> #include <QWidget> #include "common/settings.h" #include "common/settings_enums.h" #include "common/settings_setting.h" +#include "common/time_zone.h" #include "yuzu/uisettings.h" namespace ConfigurationShared { @@ -21,123 +22,135 @@ std::unique_ptr<TranslationMap> InitializeTranslations(QWidget* parent) { const auto& tr = [parent](const char* text) -> QString { return parent->tr(text); }; #define INSERT(SETTINGS, ID, NAME, TOOLTIP) \ - translations->insert(std::pair{SETTINGS::values.ID.Id(), std::pair{tr((NAME)), tr((TOOLTIP))}}) + translations->insert(std::pair{SETTINGS::values.ID.Id(), std::pair{(NAME), (TOOLTIP)}}) // A setting can be ignored by giving it a blank name // Audio - INSERT(Settings, sink_id, "Output Engine:", ""); - INSERT(Settings, audio_output_device_id, "Output Device:", ""); - INSERT(Settings, audio_input_device_id, "Input Device:", ""); - INSERT(Settings, audio_muted, "Mute audio", ""); - INSERT(Settings, volume, "Volume:", ""); - INSERT(Settings, dump_audio_commands, "", ""); - INSERT(UISettings, mute_when_in_background, "Mute audio when in background", ""); + INSERT(Settings, sink_id, tr("Output Engine:"), QStringLiteral()); + INSERT(Settings, audio_output_device_id, tr("Output Device:"), QStringLiteral()); + INSERT(Settings, audio_input_device_id, tr("Input Device:"), QStringLiteral()); + INSERT(Settings, audio_muted, tr("Mute audio"), QStringLiteral()); + INSERT(Settings, volume, tr("Volume:"), QStringLiteral()); + INSERT(Settings, dump_audio_commands, QStringLiteral(), QStringLiteral()); + INSERT(UISettings, mute_when_in_background, tr("Mute audio when in background"), + QStringLiteral()); // Core - INSERT(Settings, use_multi_core, "Multicore CPU Emulation", ""); - INSERT(Settings, memory_layout_mode, "Memory Layout", ""); - INSERT(Settings, use_speed_limit, "", ""); - INSERT(Settings, speed_limit, "Limit Speed Percent", ""); + INSERT(Settings, use_multi_core, tr("Multicore CPU Emulation"), QStringLiteral()); + INSERT(Settings, memory_layout_mode, tr("Memory Layout"), QStringLiteral()); + INSERT(Settings, use_speed_limit, QStringLiteral(), QStringLiteral()); + INSERT(Settings, speed_limit, tr("Limit Speed Percent"), QStringLiteral()); // Cpu - INSERT(Settings, cpu_accuracy, "Accuracy:", ""); + INSERT(Settings, cpu_accuracy, tr("Accuracy:"), QStringLiteral()); // Cpu Debug // Cpu Unsafe - INSERT(Settings, cpuopt_unsafe_unfuse_fma, - "Unfuse FMA (improve performance on CPUs without FMA)", - "This option improves speed by reducing accuracy of fused-multiply-add instructions on " - "CPUs without native FMA support."); - INSERT(Settings, cpuopt_unsafe_reduce_fp_error, "Faster FRSQRTE and FRECPE", - "This option improves the speed of some approximate floating-point functions by using " - "less accurate native approximations."); - INSERT(Settings, cpuopt_unsafe_ignore_standard_fpcr, "Faster ASIMD instructions (32 bits only)", - "This option improves the speed of 32 bits ASIMD floating-point functions by running " - "with incorrect rounding modes."); - INSERT(Settings, cpuopt_unsafe_inaccurate_nan, "Inaccurate NaN handling", - "This option improves speed by removing NaN checking. Please note this also reduces " - "accuracy of certain floating-point instructions."); INSERT( - Settings, cpuopt_unsafe_fastmem_check, "Disable address space checks", - "This option improves speed by eliminating a safety check before every memory read/write " - "in guest. Disabling it may allow a game to read/write the emulator's memory."); - INSERT(Settings, cpuopt_unsafe_ignore_global_monitor, "Ignore global monitor", - "This option improves speed by relying only on the semantics of cmpxchg to ensure " + Settings, cpuopt_unsafe_unfuse_fma, + tr("Unfuse FMA (improve performance on CPUs without FMA)"), + tr("This option improves speed by reducing accuracy of fused-multiply-add instructions on " + "CPUs without native FMA support.")); + INSERT( + Settings, cpuopt_unsafe_reduce_fp_error, tr("Faster FRSQRTE and FRECPE"), + tr("This option improves the speed of some approximate floating-point functions by using " + "less accurate native approximations.")); + INSERT(Settings, cpuopt_unsafe_ignore_standard_fpcr, + tr("Faster ASIMD instructions (32 bits only)"), + tr("This option improves the speed of 32 bits ASIMD floating-point functions by running " + "with incorrect rounding modes.")); + INSERT(Settings, cpuopt_unsafe_inaccurate_nan, tr("Inaccurate NaN handling"), + tr("This option improves speed by removing NaN checking. Please note this also reduces " + "accuracy of certain floating-point instructions.")); + INSERT(Settings, cpuopt_unsafe_fastmem_check, tr("Disable address space checks"), + tr("This option improves speed by eliminating a safety check before every memory " + "read/write " + "in guest. Disabling it may allow a game to read/write the emulator's memory.")); + INSERT( + Settings, cpuopt_unsafe_ignore_global_monitor, tr("Ignore global monitor"), + tr("This option improves speed by relying only on the semantics of cmpxchg to ensure " "safety of exclusive access instructions. Please note this may result in deadlocks and " - "other race conditions."); + "other race conditions.")); // Renderer - INSERT(Settings, renderer_backend, "API:", ""); - INSERT(Settings, vulkan_device, "Device:", ""); - INSERT(Settings, shader_backend, "Shader Backend:", ""); - INSERT(Settings, resolution_setup, "Resolution:", ""); - INSERT(Settings, scaling_filter, "Window Adapting Filter:", ""); - INSERT(Settings, fsr_sharpening_slider, "FSR Sharpness:", ""); - INSERT(Settings, anti_aliasing, "Anti-Aliasing Method:", ""); - INSERT(Settings, fullscreen_mode, "Fullscreen Mode:", ""); - INSERT(Settings, aspect_ratio, "Aspect Ratio:", ""); - INSERT(Settings, use_disk_shader_cache, "Use disk pipeline cache", ""); - INSERT(Settings, use_asynchronous_gpu_emulation, "Use asynchronous GPU emulation", ""); - INSERT(Settings, nvdec_emulation, "NVDEC emulation:", ""); - INSERT(Settings, accelerate_astc, "ASTC Decoding Method:", ""); - INSERT(Settings, astc_recompression, "ASTC Recompression Method:", ""); - INSERT(Settings, vsync_mode, "VSync Mode:", - "FIFO (VSync) does not drop frames or exhibit tearing but is limited by the screen " + INSERT(Settings, renderer_backend, tr("API:"), QStringLiteral()); + INSERT(Settings, vulkan_device, tr("Device:"), QStringLiteral()); + INSERT(Settings, shader_backend, tr("Shader Backend:"), QStringLiteral()); + INSERT(Settings, resolution_setup, tr("Resolution:"), QStringLiteral()); + INSERT(Settings, scaling_filter, tr("Window Adapting Filter:"), QStringLiteral()); + INSERT(Settings, fsr_sharpening_slider, tr("FSR Sharpness:"), QStringLiteral()); + INSERT(Settings, anti_aliasing, tr("Anti-Aliasing Method:"), QStringLiteral()); + INSERT(Settings, fullscreen_mode, tr("Fullscreen Mode:"), QStringLiteral()); + INSERT(Settings, aspect_ratio, tr("Aspect Ratio:"), QStringLiteral()); + INSERT(Settings, use_disk_shader_cache, tr("Use disk pipeline cache"), QStringLiteral()); + INSERT(Settings, use_asynchronous_gpu_emulation, tr("Use asynchronous GPU emulation"), + QStringLiteral()); + INSERT(Settings, nvdec_emulation, tr("NVDEC emulation:"), QStringLiteral()); + INSERT(Settings, accelerate_astc, tr("ASTC Decoding Method:"), QStringLiteral()); + INSERT(Settings, astc_recompression, tr("ASTC Recompression Method:"), QStringLiteral()); + INSERT( + Settings, vsync_mode, tr("VSync Mode:"), + tr("FIFO (VSync) does not drop frames or exhibit tearing but is limited by the screen " "refresh rate.\nFIFO Relaxed is similar to FIFO but allows tearing as it recovers from " "a slow down.\nMailbox can have lower latency than FIFO and does not tear but may drop " "frames.\nImmediate (no synchronization) just presents whatever is available and can " - "exhibit tearing."); - INSERT(Settings, bg_red, "", ""); - INSERT(Settings, bg_green, "", ""); - INSERT(Settings, bg_blue, "", ""); + "exhibit tearing.")); + INSERT(Settings, bg_red, QStringLiteral(), QStringLiteral()); + INSERT(Settings, bg_green, QStringLiteral(), QStringLiteral()); + INSERT(Settings, bg_blue, QStringLiteral(), QStringLiteral()); // Renderer (Advanced Graphics) - INSERT(Settings, async_presentation, "Enable asynchronous presentation (Vulkan only)", ""); - INSERT(Settings, renderer_force_max_clock, "Force maximum clocks (Vulkan only)", - "Runs work in the background while waiting for graphics commands to keep the GPU from " - "lowering its clock speed."); - INSERT(Settings, max_anisotropy, "Anisotropic Filtering:", ""); - INSERT(Settings, gpu_accuracy, "Accuracy Level:", ""); - INSERT(Settings, use_asynchronous_shaders, "Use asynchronous shader building (Hack)", - "Enables asynchronous shader compilation, which may reduce shader stutter. This feature " - "is experimental."); - INSERT(Settings, use_fast_gpu_time, "Use Fast GPU Time (Hack)", - "Enables Fast GPU Time. This option will force most games to run at their highest " - "native resolution."); - INSERT(Settings, use_vulkan_driver_pipeline_cache, "Use Vulkan pipeline cache", - "Enables GPU vendor-specific pipeline cache. This option can improve shader loading " - "time significantly in cases where the Vulkan driver does not store pipeline cache " - "files internally."); - INSERT(Settings, enable_compute_pipelines, "Enable Compute Pipelines (Intel Vulkan Only)", - "Enable compute pipelines, required by some games.\nThis setting only exists for Intel " + INSERT(Settings, async_presentation, tr("Enable asynchronous presentation (Vulkan only)"), + QStringLiteral()); + INSERT( + Settings, renderer_force_max_clock, tr("Force maximum clocks (Vulkan only)"), + tr("Runs work in the background while waiting for graphics commands to keep the GPU from " + "lowering its clock speed.")); + INSERT(Settings, max_anisotropy, tr("Anisotropic Filtering:"), QStringLiteral()); + INSERT(Settings, gpu_accuracy, tr("Accuracy Level:"), QStringLiteral()); + INSERT( + Settings, use_asynchronous_shaders, tr("Use asynchronous shader building (Hack)"), + tr("Enables asynchronous shader compilation, which may reduce shader stutter. This feature " + "is experimental.")); + INSERT(Settings, use_fast_gpu_time, tr("Use Fast GPU Time (Hack)"), + tr("Enables Fast GPU Time. This option will force most games to run at their highest " + "native resolution.")); + INSERT(Settings, use_vulkan_driver_pipeline_cache, tr("Use Vulkan pipeline cache"), + tr("Enables GPU vendor-specific pipeline cache. This option can improve shader loading " + "time significantly in cases where the Vulkan driver does not store pipeline cache " + "files internally.")); + INSERT( + Settings, enable_compute_pipelines, tr("Enable Compute Pipelines (Intel Vulkan Only)"), + tr("Enable compute pipelines, required by some games.\nThis setting only exists for Intel " "proprietary drivers, and may crash if enabled.\nCompute pipelines are always enabled " - "on all other drivers."); - INSERT(Settings, use_reactive_flushing, "Enable Reactive Flushing", - "Uses reactive flushing instead of predictive flushing, allowing more accurate memory " - "syncing."); - INSERT(Settings, use_video_framerate, "Sync to framerate of video playback", - "Run the game at normal speed during video playback, even when the framerate is " - "unlocked."); - INSERT(Settings, barrier_feedback_loops, "Barrier feedback loops", - "Improves rendering of transparency effects in specific games."); + "on all other drivers.")); + INSERT( + Settings, use_reactive_flushing, tr("Enable Reactive Flushing"), + tr("Uses reactive flushing instead of predictive flushing, allowing more accurate memory " + "syncing.")); + INSERT(Settings, use_video_framerate, tr("Sync to framerate of video playback"), + tr("Run the game at normal speed during video playback, even when the framerate is " + "unlocked.")); + INSERT(Settings, barrier_feedback_loops, tr("Barrier feedback loops"), + tr("Improves rendering of transparency effects in specific games.")); // Renderer (Debug) // System - INSERT(Settings, rng_seed, "RNG Seed", ""); - INSERT(Settings, rng_seed_enabled, "", ""); - INSERT(Settings, device_name, "Device Name", ""); - INSERT(Settings, custom_rtc, "Custom RTC", ""); - INSERT(Settings, custom_rtc_enabled, "", ""); - INSERT(Settings, language_index, - "Language:", "Note: this can be overridden when region setting is auto-select"); - INSERT(Settings, region_index, "Region:", ""); - INSERT(Settings, time_zone_index, "Time Zone:", ""); - INSERT(Settings, sound_index, "Sound Output Mode:", ""); - INSERT(Settings, use_docked_mode, "Console Mode:", ""); - INSERT(Settings, current_user, "", ""); + INSERT(Settings, rng_seed, tr("RNG Seed"), QStringLiteral()); + INSERT(Settings, rng_seed_enabled, QStringLiteral(), QStringLiteral()); + INSERT(Settings, device_name, tr("Device Name"), QStringLiteral()); + INSERT(Settings, custom_rtc, tr("Custom RTC"), QStringLiteral()); + INSERT(Settings, custom_rtc_enabled, QStringLiteral(), QStringLiteral()); + INSERT(Settings, language_index, tr("Language:"), + tr("Note: this can be overridden when region setting is auto-select")); + INSERT(Settings, region_index, tr("Region:"), QStringLiteral()); + INSERT(Settings, time_zone_index, tr("Time Zone:"), QStringLiteral()); + INSERT(Settings, sound_index, tr("Sound Output Mode:"), QStringLiteral()); + INSERT(Settings, use_docked_mode, tr("Console Mode:"), QStringLiteral()); + INSERT(Settings, current_user, QStringLiteral(), QStringLiteral()); // Controls @@ -154,11 +167,14 @@ std::unique_ptr<TranslationMap> InitializeTranslations(QWidget* parent) { // Ui // Ui General - INSERT(UISettings, select_user_on_boot, "Prompt for user on game boot", ""); - INSERT(UISettings, pause_when_in_background, "Pause emulation when in background", ""); - INSERT(UISettings, confirm_before_stopping, "Confirm before stopping emulation", ""); - INSERT(UISettings, hide_mouse, "Hide mouse on inactivity", ""); - INSERT(UISettings, controller_applet_disabled, "Disable controller applet", ""); + INSERT(UISettings, select_user_on_boot, tr("Prompt for user on game boot"), QStringLiteral()); + INSERT(UISettings, pause_when_in_background, tr("Pause emulation when in background"), + QStringLiteral()); + INSERT(UISettings, confirm_before_stopping, tr("Confirm before stopping emulation"), + QStringLiteral()); + INSERT(UISettings, hide_mouse, tr("Hide mouse on inactivity"), QStringLiteral()); + INSERT(UISettings, controller_applet_disabled, tr("Disable controller applet"), + QStringLiteral()); // Ui Debugging @@ -178,140 +194,141 @@ std::unique_ptr<ComboboxTranslationMap> ComboboxEnumeration(QWidget* parent) { return parent->tr(text, context); }; -#define PAIR(ENUM, VALUE, TRANSLATION) \ - { static_cast<u32>(Settings::ENUM::VALUE), tr(TRANSLATION) } -#define CTX_PAIR(ENUM, VALUE, TRANSLATION, CONTEXT) \ - { static_cast<u32>(Settings::ENUM::VALUE), tr(TRANSLATION, CONTEXT) } +#define PAIR(ENUM, VALUE, TRANSLATION) {static_cast<u32>(Settings::ENUM::VALUE), (TRANSLATION)} // Intentionally skipping VSyncMode to let the UI fill that one out translations->insert({Settings::EnumMetadata<Settings::AstcDecodeMode>::Index(), { - PAIR(AstcDecodeMode, Cpu, "CPU"), - PAIR(AstcDecodeMode, Gpu, "GPU"), - PAIR(AstcDecodeMode, CpuAsynchronous, "CPU Asynchronous"), - }}); - translations->insert({Settings::EnumMetadata<Settings::AstcRecompression>::Index(), - { - PAIR(AstcRecompression, Uncompressed, "Uncompressed (Best quality)"), - PAIR(AstcRecompression, Bc1, "BC1 (Low quality)"), - PAIR(AstcRecompression, Bc3, "BC3 (Medium quality)"), + PAIR(AstcDecodeMode, Cpu, tr("CPU")), + PAIR(AstcDecodeMode, Gpu, tr("GPU")), + PAIR(AstcDecodeMode, CpuAsynchronous, tr("CPU Asynchronous")), }}); + translations->insert( + {Settings::EnumMetadata<Settings::AstcRecompression>::Index(), + { + PAIR(AstcRecompression, Uncompressed, tr("Uncompressed (Best quality)")), + PAIR(AstcRecompression, Bc1, tr("BC1 (Low quality)")), + PAIR(AstcRecompression, Bc3, tr("BC3 (Medium quality)")), + }}); translations->insert({Settings::EnumMetadata<Settings::RendererBackend>::Index(), { #ifdef HAS_OPENGL - PAIR(RendererBackend, OpenGL, "OpenGL"), + PAIR(RendererBackend, OpenGL, tr("OpenGL")), #endif - PAIR(RendererBackend, Vulkan, "Vulkan"), - PAIR(RendererBackend, Null, "Null"), - }}); - translations->insert({Settings::EnumMetadata<Settings::ShaderBackend>::Index(), - { - PAIR(ShaderBackend, Glsl, "GLSL"), - PAIR(ShaderBackend, Glasm, "GLASM (Assembly Shaders, NVIDIA Only)"), - PAIR(ShaderBackend, SpirV, "SPIR-V (Experimental, Mesa Only)"), + PAIR(RendererBackend, Vulkan, tr("Vulkan")), + PAIR(RendererBackend, Null, tr("Null")), }}); + translations->insert( + {Settings::EnumMetadata<Settings::ShaderBackend>::Index(), + { + PAIR(ShaderBackend, Glsl, tr("GLSL")), + PAIR(ShaderBackend, Glasm, tr("GLASM (Assembly Shaders, NVIDIA Only)")), + PAIR(ShaderBackend, SpirV, tr("SPIR-V (Experimental, Mesa Only)")), + }}); translations->insert({Settings::EnumMetadata<Settings::GpuAccuracy>::Index(), { - PAIR(GpuAccuracy, Normal, "Normal"), - PAIR(GpuAccuracy, High, "High"), - PAIR(GpuAccuracy, Extreme, "Extreme"), - }}); - translations->insert({Settings::EnumMetadata<Settings::CpuAccuracy>::Index(), - { - PAIR(CpuAccuracy, Auto, "Auto"), - PAIR(CpuAccuracy, Accurate, "Accurate"), - PAIR(CpuAccuracy, Unsafe, "Unsafe"), - PAIR(CpuAccuracy, Paranoid, "Paranoid (disables most optimizations)"), + PAIR(GpuAccuracy, Normal, tr("Normal")), + PAIR(GpuAccuracy, High, tr("High")), + PAIR(GpuAccuracy, Extreme, tr("Extreme")), }}); + translations->insert( + {Settings::EnumMetadata<Settings::CpuAccuracy>::Index(), + { + PAIR(CpuAccuracy, Auto, tr("Auto")), + PAIR(CpuAccuracy, Accurate, tr("Accurate")), + PAIR(CpuAccuracy, Unsafe, tr("Unsafe")), + PAIR(CpuAccuracy, Paranoid, tr("Paranoid (disables most optimizations)")), + }}); translations->insert({Settings::EnumMetadata<Settings::FullscreenMode>::Index(), { - PAIR(FullscreenMode, Borderless, "Borderless Windowed"), - PAIR(FullscreenMode, Exclusive, "Exclusive Fullscreen"), + PAIR(FullscreenMode, Borderless, tr("Borderless Windowed")), + PAIR(FullscreenMode, Exclusive, tr("Exclusive Fullscreen")), }}); translations->insert({Settings::EnumMetadata<Settings::NvdecEmulation>::Index(), { - PAIR(NvdecEmulation, Off, "No Video Output"), - PAIR(NvdecEmulation, Cpu, "CPU Video Decoding"), - PAIR(NvdecEmulation, Gpu, "GPU Video Decoding (Default)"), - }}); - translations->insert({Settings::EnumMetadata<Settings::ResolutionSetup>::Index(), - { - PAIR(ResolutionSetup, Res1_2X, "0.5X (360p/540p) [EXPERIMENTAL]"), - PAIR(ResolutionSetup, Res3_4X, "0.75X (540p/810p) [EXPERIMENTAL]"), - PAIR(ResolutionSetup, Res1X, "1X (720p/1080p)"), - PAIR(ResolutionSetup, Res3_2X, "1.5X (1080p/1620p) [EXPERIMENTAL]"), - PAIR(ResolutionSetup, Res2X, "2X (1440p/2160p)"), - PAIR(ResolutionSetup, Res3X, "3X (2160p/3240p)"), - PAIR(ResolutionSetup, Res4X, "4X (2880p/4320p)"), - PAIR(ResolutionSetup, Res5X, "5X (3600p/5400p)"), - PAIR(ResolutionSetup, Res6X, "6X (4320p/6480p)"), - PAIR(ResolutionSetup, Res7X, "7X (5040p/7560p)"), - PAIR(ResolutionSetup, Res8X, "8X (5760p/8640p)"), + PAIR(NvdecEmulation, Off, tr("No Video Output")), + PAIR(NvdecEmulation, Cpu, tr("CPU Video Decoding")), + PAIR(NvdecEmulation, Gpu, tr("GPU Video Decoding (Default)")), }}); + translations->insert( + {Settings::EnumMetadata<Settings::ResolutionSetup>::Index(), + { + PAIR(ResolutionSetup, Res1_2X, tr("0.5X (360p/540p) [EXPERIMENTAL]")), + PAIR(ResolutionSetup, Res3_4X, tr("0.75X (540p/810p) [EXPERIMENTAL]")), + PAIR(ResolutionSetup, Res1X, tr("1X (720p/1080p)")), + PAIR(ResolutionSetup, Res3_2X, tr("1.5X (1080p/1620p) [EXPERIMENTAL]")), + PAIR(ResolutionSetup, Res2X, tr("2X (1440p/2160p)")), + PAIR(ResolutionSetup, Res3X, tr("3X (2160p/3240p)")), + PAIR(ResolutionSetup, Res4X, tr("4X (2880p/4320p)")), + PAIR(ResolutionSetup, Res5X, tr("5X (3600p/5400p)")), + PAIR(ResolutionSetup, Res6X, tr("6X (4320p/6480p)")), + PAIR(ResolutionSetup, Res7X, tr("7X (5040p/7560p)")), + PAIR(ResolutionSetup, Res8X, tr("8X (5760p/8640p)")), + }}); translations->insert({Settings::EnumMetadata<Settings::ScalingFilter>::Index(), { - PAIR(ScalingFilter, NearestNeighbor, "Nearest Neighbor"), - PAIR(ScalingFilter, Bilinear, "Bilinear"), - PAIR(ScalingFilter, Bicubic, "Bicubic"), - PAIR(ScalingFilter, Gaussian, "Gaussian"), - PAIR(ScalingFilter, ScaleForce, "ScaleForce"), - PAIR(ScalingFilter, Fsr, "AMD FidelityFX™️ Super Resolution"), + PAIR(ScalingFilter, NearestNeighbor, tr("Nearest Neighbor")), + PAIR(ScalingFilter, Bilinear, tr("Bilinear")), + PAIR(ScalingFilter, Bicubic, tr("Bicubic")), + PAIR(ScalingFilter, Gaussian, tr("Gaussian")), + PAIR(ScalingFilter, ScaleForce, tr("ScaleForce")), + PAIR(ScalingFilter, Fsr, tr("AMD FidelityFX™️ Super Resolution")), }}); translations->insert({Settings::EnumMetadata<Settings::AntiAliasing>::Index(), { - PAIR(AntiAliasing, None, "None"), - PAIR(AntiAliasing, Fxaa, "FXAA"), - PAIR(AntiAliasing, Smaa, "SMAA"), + PAIR(AntiAliasing, None, tr("None")), + PAIR(AntiAliasing, Fxaa, tr("FXAA")), + PAIR(AntiAliasing, Smaa, tr("SMAA")), }}); translations->insert({Settings::EnumMetadata<Settings::AspectRatio>::Index(), { - PAIR(AspectRatio, R16_9, "Default (16:9)"), - PAIR(AspectRatio, R4_3, "Force 4:3"), - PAIR(AspectRatio, R21_9, "Force 21:9"), - PAIR(AspectRatio, R16_10, "Force 16:10"), - PAIR(AspectRatio, Stretch, "Stretch to Window"), + PAIR(AspectRatio, R16_9, tr("Default (16:9)")), + PAIR(AspectRatio, R4_3, tr("Force 4:3")), + PAIR(AspectRatio, R21_9, tr("Force 21:9")), + PAIR(AspectRatio, R16_10, tr("Force 16:10")), + PAIR(AspectRatio, Stretch, tr("Stretch to Window")), }}); translations->insert({Settings::EnumMetadata<Settings::AnisotropyMode>::Index(), { - PAIR(AnisotropyMode, Automatic, "Automatic"), - PAIR(AnisotropyMode, Default, "Default"), - PAIR(AnisotropyMode, X2, "2x"), - PAIR(AnisotropyMode, X4, "4x"), - PAIR(AnisotropyMode, X8, "8x"), - PAIR(AnisotropyMode, X16, "16x"), + PAIR(AnisotropyMode, Automatic, tr("Automatic")), + PAIR(AnisotropyMode, Default, tr("Default")), + PAIR(AnisotropyMode, X2, tr("2x")), + PAIR(AnisotropyMode, X4, tr("4x")), + PAIR(AnisotropyMode, X8, tr("8x")), + PAIR(AnisotropyMode, X16, tr("16x")), }}); translations->insert( {Settings::EnumMetadata<Settings::Language>::Index(), { - PAIR(Language, Japanese, "Japanese (日本語)"), - PAIR(Language, EnglishAmerican, "American English"), - PAIR(Language, French, "French (français)"), - PAIR(Language, German, "German (Deutsch)"), - PAIR(Language, Italian, "Italian (italiano)"), - PAIR(Language, Spanish, "Spanish (español)"), - PAIR(Language, Chinese, "Chinese"), - PAIR(Language, Korean, "Korean (한국어)"), - PAIR(Language, Dutch, "Dutch (Nederlands)"), - PAIR(Language, Portuguese, "Portuguese (português)"), - PAIR(Language, Russian, "Russian (Русский)"), - PAIR(Language, Taiwanese, "Taiwanese"), - PAIR(Language, EnglishBritish, "British English"), - PAIR(Language, FrenchCanadian, "Canadian French"), - PAIR(Language, SpanishLatin, "Latin American Spanish"), - PAIR(Language, ChineseSimplified, "Simplified Chinese"), - PAIR(Language, ChineseTraditional, "Traditional Chinese (正體中文)"), - PAIR(Language, PortugueseBrazilian, "Brazilian Portuguese (português do Brasil)"), + PAIR(Language, Japanese, tr("Japanese (日本語)")), + PAIR(Language, EnglishAmerican, tr("American English")), + PAIR(Language, French, tr("French (français)")), + PAIR(Language, German, tr("German (Deutsch)")), + PAIR(Language, Italian, tr("Italian (italiano)")), + PAIR(Language, Spanish, tr("Spanish (español)")), + PAIR(Language, Chinese, tr("Chinese")), + PAIR(Language, Korean, tr("Korean (한국어)")), + PAIR(Language, Dutch, tr("Dutch (Nederlands)")), + PAIR(Language, Portuguese, tr("Portuguese (português)")), + PAIR(Language, Russian, tr("Russian (Русский)")), + PAIR(Language, Taiwanese, tr("Taiwanese")), + PAIR(Language, EnglishBritish, tr("British English")), + PAIR(Language, FrenchCanadian, tr("Canadian French")), + PAIR(Language, SpanishLatin, tr("Latin American Spanish")), + PAIR(Language, ChineseSimplified, tr("Simplified Chinese")), + PAIR(Language, ChineseTraditional, tr("Traditional Chinese (正體中文)")), + PAIR(Language, PortugueseBrazilian, tr("Brazilian Portuguese (português do Brasil)")), }}); translations->insert({Settings::EnumMetadata<Settings::Region>::Index(), { - PAIR(Region, Japan, "Japan"), - PAIR(Region, Usa, "USA"), - PAIR(Region, Europe, "Europe"), - PAIR(Region, Australia, "Australia"), - PAIR(Region, China, "China"), - PAIR(Region, Korea, "Korea"), - PAIR(Region, Taiwan, "Taiwan"), + PAIR(Region, Japan, tr("Japan")), + PAIR(Region, Usa, tr("USA")), + PAIR(Region, Europe, tr("Europe")), + PAIR(Region, Australia, tr("Australia")), + PAIR(Region, China, tr("China")), + PAIR(Region, Korea, tr("Korea")), + PAIR(Region, Taiwan, tr("Taiwan")), }}); translations->insert( {Settings::EnumMetadata<Settings::TimeZone>::Index(), @@ -323,72 +340,74 @@ std::unique_ptr<ComboboxTranslationMap> ComboboxEnumeration(QWidget* parent) { {static_cast<u32>(Settings::TimeZone::Default), tr("Default (%1)", "Default time zone") .arg(QString::fromStdString(Common::TimeZone::GetDefaultTimeZone()))}, - PAIR(TimeZone, Cet, "CET"), - PAIR(TimeZone, Cst6Cdt, "CST6CDT"), - PAIR(TimeZone, Cuba, "Cuba"), - PAIR(TimeZone, Eet, "EET"), - PAIR(TimeZone, Egypt, "Egypt"), - PAIR(TimeZone, Eire, "Eire"), - PAIR(TimeZone, Est, "EST"), - PAIR(TimeZone, Est5Edt, "EST5EDT"), - PAIR(TimeZone, Gb, "GB"), - PAIR(TimeZone, GbEire, "GB-Eire"), - PAIR(TimeZone, Gmt, "GMT"), - PAIR(TimeZone, GmtPlusZero, "GMT+0"), - PAIR(TimeZone, GmtMinusZero, "GMT-0"), - PAIR(TimeZone, GmtZero, "GMT0"), - PAIR(TimeZone, Greenwich, "Greenwich"), - PAIR(TimeZone, Hongkong, "Hongkong"), - PAIR(TimeZone, Hst, "HST"), - PAIR(TimeZone, Iceland, "Iceland"), - PAIR(TimeZone, Iran, "Iran"), - PAIR(TimeZone, Israel, "Israel"), - PAIR(TimeZone, Jamaica, "Jamaica"), - PAIR(TimeZone, Japan, "Japan"), - PAIR(TimeZone, Kwajalein, "Kwajalein"), - PAIR(TimeZone, Libya, "Libya"), - PAIR(TimeZone, Met, "MET"), - PAIR(TimeZone, Mst, "MST"), - PAIR(TimeZone, Mst7Mdt, "MST7MDT"), - PAIR(TimeZone, Navajo, "Navajo"), - PAIR(TimeZone, Nz, "NZ"), - PAIR(TimeZone, NzChat, "NZ-CHAT"), - PAIR(TimeZone, Poland, "Poland"), - PAIR(TimeZone, Portugal, "Portugal"), - PAIR(TimeZone, Prc, "PRC"), - PAIR(TimeZone, Pst8Pdt, "PST8PDT"), - PAIR(TimeZone, Roc, "ROC"), - PAIR(TimeZone, Rok, "ROK"), - PAIR(TimeZone, Singapore, "Singapore"), - PAIR(TimeZone, Turkey, "Turkey"), - PAIR(TimeZone, Uct, "UCT"), - PAIR(TimeZone, Universal, "Universal"), - PAIR(TimeZone, Utc, "UTC"), - PAIR(TimeZone, WSu, "W-SU"), - PAIR(TimeZone, Wet, "WET"), - PAIR(TimeZone, Zulu, "Zulu"), + PAIR(TimeZone, Cet, tr("CET")), + PAIR(TimeZone, Cst6Cdt, tr("CST6CDT")), + PAIR(TimeZone, Cuba, tr("Cuba")), + PAIR(TimeZone, Eet, tr("EET")), + PAIR(TimeZone, Egypt, tr("Egypt")), + PAIR(TimeZone, Eire, tr("Eire")), + PAIR(TimeZone, Est, tr("EST")), + PAIR(TimeZone, Est5Edt, tr("EST5EDT")), + PAIR(TimeZone, Gb, tr("GB")), + PAIR(TimeZone, GbEire, tr("GB-Eire")), + PAIR(TimeZone, Gmt, tr("GMT")), + PAIR(TimeZone, GmtPlusZero, tr("GMT+0")), + PAIR(TimeZone, GmtMinusZero, tr("GMT-0")), + PAIR(TimeZone, GmtZero, tr("GMT0")), + PAIR(TimeZone, Greenwich, tr("Greenwich")), + PAIR(TimeZone, Hongkong, tr("Hongkong")), + PAIR(TimeZone, Hst, tr("HST")), + PAIR(TimeZone, Iceland, tr("Iceland")), + PAIR(TimeZone, Iran, tr("Iran")), + PAIR(TimeZone, Israel, tr("Israel")), + PAIR(TimeZone, Jamaica, tr("Jamaica")), + PAIR(TimeZone, Japan, tr("Japan")), + PAIR(TimeZone, Kwajalein, tr("Kwajalein")), + PAIR(TimeZone, Libya, tr("Libya")), + PAIR(TimeZone, Met, tr("MET")), + PAIR(TimeZone, Mst, tr("MST")), + PAIR(TimeZone, Mst7Mdt, tr("MST7MDT")), + PAIR(TimeZone, Navajo, tr("Navajo")), + PAIR(TimeZone, Nz, tr("NZ")), + PAIR(TimeZone, NzChat, tr("NZ-CHAT")), + PAIR(TimeZone, Poland, tr("Poland")), + PAIR(TimeZone, Portugal, tr("Portugal")), + PAIR(TimeZone, Prc, tr("PRC")), + PAIR(TimeZone, Pst8Pdt, tr("PST8PDT")), + PAIR(TimeZone, Roc, tr("ROC")), + PAIR(TimeZone, Rok, tr("ROK")), + PAIR(TimeZone, Singapore, tr("Singapore")), + PAIR(TimeZone, Turkey, tr("Turkey")), + PAIR(TimeZone, Uct, tr("UCT")), + PAIR(TimeZone, Universal, tr("Universal")), + PAIR(TimeZone, Utc, tr("UTC")), + PAIR(TimeZone, WSu, tr("W-SU")), + PAIR(TimeZone, Wet, tr("WET")), + PAIR(TimeZone, Zulu, tr("Zulu")), }}); translations->insert({Settings::EnumMetadata<Settings::AudioMode>::Index(), { - PAIR(AudioMode, Mono, "Mono"), - PAIR(AudioMode, Stereo, "Stereo"), - PAIR(AudioMode, Surround, "Surround"), + PAIR(AudioMode, Mono, tr("Mono")), + PAIR(AudioMode, Stereo, tr("Stereo")), + PAIR(AudioMode, Surround, tr("Surround")), }}); translations->insert({Settings::EnumMetadata<Settings::MemoryLayout>::Index(), { - PAIR(MemoryLayout, Memory_4Gb, "4GB DRAM (Default)"), - PAIR(MemoryLayout, Memory_6Gb, "6GB DRAM (Unsafe)"), - PAIR(MemoryLayout, Memory_8Gb, "8GB DRAM (Unsafe)"), + PAIR(MemoryLayout, Memory_4Gb, tr("4GB DRAM (Default)")), + PAIR(MemoryLayout, Memory_6Gb, tr("6GB DRAM (Unsafe)")), + PAIR(MemoryLayout, Memory_8Gb, tr("8GB DRAM (Unsafe)")), + }}); + translations->insert({Settings::EnumMetadata<Settings::ConsoleMode>::Index(), + { + PAIR(ConsoleMode, Docked, tr("Docked")), + PAIR(ConsoleMode, Handheld, tr("Handheld")), }}); - translations->insert( - {Settings::EnumMetadata<Settings::ConsoleMode>::Index(), - {PAIR(ConsoleMode, Docked, "Docked"), PAIR(ConsoleMode, Handheld, "Handheld")}}); translations->insert( {Settings::EnumMetadata<Settings::ConfirmStop>::Index(), { - PAIR(ConfirmStop, Ask_Always, "Always ask (Default)"), - PAIR(ConfirmStop, Ask_Based_On_Game, "Only if game specifies not to stop"), - PAIR(ConfirmStop, Ask_Never, "Never ask"), + PAIR(ConfirmStop, Ask_Always, tr("Always ask (Default)")), + PAIR(ConfirmStop, Ask_Based_On_Game, tr("Only if game specifies not to stop")), + PAIR(ConfirmStop, Ask_Never, tr("Never ask")), }}); #undef PAIR diff --git a/src/yuzu/configuration/shared_widget.cpp b/src/yuzu/configuration/shared_widget.cpp index ea8d7add4..941683a43 100644 --- a/src/yuzu/configuration/shared_widget.cpp +++ b/src/yuzu/configuration/shared_widget.cpp @@ -194,7 +194,7 @@ QWidget* Widget::CreateRadioGroup(std::function<std::string()>& serializer, return group; } - const auto get_selected = [=]() -> int { + const auto get_selected = [this]() -> int { for (const auto& [id, button] : radio_buttons) { if (button->isChecked()) { return id; @@ -203,7 +203,7 @@ QWidget* Widget::CreateRadioGroup(std::function<std::string()>& serializer, return -1; }; - const auto set_index = [=](u32 value) { + const auto set_index = [this](u32 value) { for (const auto& [id, button] : radio_buttons) { button->setChecked(id == value); } diff --git a/src/yuzu/game_list_worker.cpp b/src/yuzu/game_list_worker.cpp index 69be21027..307eac02d 100644 --- a/src/yuzu/game_list_worker.cpp +++ b/src/yuzu/game_list_worker.cpp @@ -479,6 +479,6 @@ void GameListWorker::run() { } } - RecordEvent([=](GameList* game_list) { game_list->DonePopulating(watch_list); }); + RecordEvent([this](GameList* game_list) { game_list->DonePopulating(watch_list); }); processing_completed.Set(); } |