summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/audio_core/renderer/adsp/audio_renderer.cpp5
-rw-r--r--src/audio_core/renderer/system_manager.cpp9
-rw-r--r--src/audio_core/renderer/system_manager.h10
-rw-r--r--src/audio_core/sink/sink_stream.cpp7
-rw-r--r--src/common/settings.cpp3
-rw-r--r--src/common/settings.h10
-rw-r--r--src/core/core.cpp3
-rw-r--r--src/core/file_sys/romfs.cpp3
-rw-r--r--src/core/file_sys/vfs_concat.cpp161
-rw-r--r--src/core/file_sys/vfs_concat.h28
-rw-r--r--src/core/hid/emulated_controller.cpp9
-rw-r--r--src/core/hle/kernel/k_memory_block_manager.h8
-rw-r--r--src/core/hle/service/hid/controllers/npad.cpp4
-rw-r--r--src/core/hle/service/hid/controllers/npad.h4
-rw-r--r--src/core/hle/service/hid/hid.cpp20
-rw-r--r--src/core/hle/service/nfc/common/amiibo_crypto.cpp3
-rw-r--r--src/core/hle/service/nfc/common/device.cpp75
-rw-r--r--src/core/hle/service/nfc/common/device.h3
-rw-r--r--src/input_common/drivers/joycon.cpp17
-rw-r--r--src/input_common/drivers/joycon.h3
-rw-r--r--src/input_common/helpers/joycon_driver.cpp20
-rw-r--r--src/input_common/helpers/joycon_driver.h1
-rw-r--r--src/input_common/helpers/joycon_protocol/common_protocol.cpp2
-rw-r--r--src/input_common/helpers/joycon_protocol/joycon_types.h51
-rw-r--r--src/input_common/helpers/joycon_protocol/nfc.cpp403
-rw-r--r--src/input_common/helpers/joycon_protocol/nfc.h35
-rw-r--r--src/input_common/input_engine.cpp11
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp8
-rw-r--r--src/shader_recompiler/frontend/maxwell/translate/impl/integer_funnel_shift.cpp2
-rw-r--r--src/shader_recompiler/frontend/maxwell/translate/impl/texture_mipmap_level.cpp7
-rw-r--r--src/video_core/CMakeLists.txt6
-rw-r--r--src/video_core/buffer_cache/buffer_cache.cpp4
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h326
-rw-r--r--src/video_core/buffer_cache/buffer_cache_base.h144
-rw-r--r--src/video_core/host1x/codecs/codec.cpp93
-rw-r--r--src/video_core/host1x/codecs/codec.h8
-rw-r--r--src/video_core/host1x/codecs/h264.cpp4
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.cpp2
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.cpp31
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.h4
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.cpp24
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp3
-rw-r--r--src/video_core/renderer_vulkan/vk_master_semaphore.cpp46
-rw-r--r--src/video_core/renderer_vulkan/vk_master_semaphore.h15
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp3
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp25
-rw-r--r--src/video_core/renderer_vulkan/vk_swapchain.cpp4
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp12
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h2
-rw-r--r--src/video_core/texture_cache/image_base.cpp7
-rw-r--r--src/video_core/texture_cache/image_base.h2
-rw-r--r--src/video_core/texture_cache/texture_cache.h194
-rw-r--r--src/video_core/texture_cache/texture_cache_base.h20
-rw-r--r--src/video_core/texture_cache/util.cpp92
-rw-r--r--src/video_core/textures/astc.cpp5
-rw-r--r--src/video_core/textures/bcn.cpp87
-rw-r--r--src/video_core/textures/bcn.h17
-rw-r--r--src/video_core/textures/workers.cpp15
-rw-r--r--src/video_core/textures/workers.h12
-rw-r--r--src/video_core/vulkan_common/vulkan_device.cpp30
-rw-r--r--src/video_core/vulkan_common/vulkan_memory_allocator.cpp2
-rw-r--r--src/yuzu/configuration/config.cpp7
-rw-r--r--src/yuzu/configuration/config.h1
-rw-r--r--src/yuzu/configuration/configure_dialog.cpp3
-rw-r--r--src/yuzu/configuration/configure_dialog.h2
-rw-r--r--src/yuzu/configuration/configure_graphics.cpp23
-rw-r--r--src/yuzu/configuration/configure_graphics.h6
-rw-r--r--src/yuzu/configuration/configure_graphics_advanced.cpp31
-rw-r--r--src/yuzu/configuration/configure_graphics_advanced.h3
-rw-r--r--src/yuzu/configuration/configure_graphics_advanced.ui55
-rw-r--r--src/yuzu/configuration/configure_hotkeys.cpp65
-rw-r--r--src/yuzu/configuration/configure_hotkeys.h2
-rw-r--r--src/yuzu/configuration/configure_per_game.cpp3
-rw-r--r--src/yuzu/configuration/configure_per_game.h2
-rw-r--r--src/yuzu/game_list.cpp4
-rw-r--r--src/yuzu/game_list.h1
-rw-r--r--src/yuzu/main.cpp38
-rw-r--r--src/yuzu/main.h4
-rw-r--r--src/yuzu_cmd/config.cpp1
-rw-r--r--src/yuzu_cmd/default_ini.h4
-rw-r--r--src/yuzu_cmd/yuzu.cpp4
81 files changed, 1737 insertions, 686 deletions
diff --git a/src/audio_core/renderer/adsp/audio_renderer.cpp b/src/audio_core/renderer/adsp/audio_renderer.cpp
index 503f40349..1cbeed302 100644
--- a/src/audio_core/renderer/adsp/audio_renderer.cpp
+++ b/src/audio_core/renderer/adsp/audio_renderer.cpp
@@ -154,6 +154,11 @@ void AudioRenderer::ThreadFunc() {
return;
case RenderMessage::AudioRenderer_Render: {
+ if (system.IsShuttingDown()) [[unlikely]] {
+ std::this_thread::sleep_for(std::chrono::milliseconds(5));
+ mailbox->ADSPSendMessage(RenderMessage::AudioRenderer_RenderResponse);
+ continue;
+ }
std::array<bool, MaxRendererSessions> buffers_reset{};
std::array<u64, MaxRendererSessions> render_times_taken{};
const auto start_time{system.CoreTiming().GetClockTicks()};
diff --git a/src/audio_core/renderer/system_manager.cpp b/src/audio_core/renderer/system_manager.cpp
index 07d8ed093..300ecdbf1 100644
--- a/src/audio_core/renderer/system_manager.cpp
+++ b/src/audio_core/renderer/system_manager.cpp
@@ -27,7 +27,7 @@ bool SystemManager::InitializeUnsafe() {
if (!active) {
if (adsp.Start()) {
active = true;
- thread = std::jthread([this](std::stop_token stop_token) { ThreadFunc(); });
+ thread = std::jthread([this](std::stop_token stop_token) { ThreadFunc(stop_token); });
}
}
@@ -39,8 +39,7 @@ void SystemManager::Stop() {
return;
}
active = false;
- update.store(true);
- update.notify_all();
+ thread.request_stop();
thread.join();
adsp.Stop();
}
@@ -85,12 +84,12 @@ bool SystemManager::Remove(System& system_) {
return true;
}
-void SystemManager::ThreadFunc() {
+void SystemManager::ThreadFunc(std::stop_token stop_token) {
static constexpr char name[]{"AudioRenderSystemManager"};
MicroProfileOnThreadCreate(name);
Common::SetCurrentThreadName(name);
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
- while (active) {
+ while (active && !stop_token.stop_requested()) {
{
std::scoped_lock l{mutex1};
diff --git a/src/audio_core/renderer/system_manager.h b/src/audio_core/renderer/system_manager.h
index 1f0bbd8b4..9681fd121 100644
--- a/src/audio_core/renderer/system_manager.h
+++ b/src/audio_core/renderer/system_manager.h
@@ -66,13 +66,7 @@ private:
/**
* Main thread responsible for command generation.
*/
- void ThreadFunc();
-
- enum class StreamState {
- Filling,
- Steady,
- Draining,
- };
+ void ThreadFunc(std::stop_token stop_token);
/// Core system
Core::System& core;
@@ -90,8 +84,6 @@ private:
ADSP::ADSP& adsp;
/// AudioRenderer mailbox for communication
ADSP::AudioRenderer_Mailbox* mailbox{};
- /// Atomic for main thread to wait on
- std::atomic<bool> update{};
};
} // namespace AudioCore::AudioRenderer
diff --git a/src/audio_core/sink/sink_stream.cpp b/src/audio_core/sink/sink_stream.cpp
index 13ba26e74..2331aaff9 100644
--- a/src/audio_core/sink/sink_stream.cpp
+++ b/src/audio_core/sink/sink_stream.cpp
@@ -271,8 +271,11 @@ u64 SinkStream::GetExpectedPlayedSampleCount() {
void SinkStream::WaitFreeSpace() {
std::unique_lock lk{release_mutex};
- release_cv.wait(
- lk, [this]() { return queued_buffers < max_queue_size || system.IsShuttingDown(); });
+ release_cv.wait_for(lk, std::chrono::milliseconds(5),
+ [this]() { return queued_buffers < max_queue_size; });
+ if (queued_buffers > max_queue_size + 3) {
+ release_cv.wait(lk, [this]() { return queued_buffers < max_queue_size; });
+ }
}
} // namespace AudioCore::Sink
diff --git a/src/common/settings.cpp b/src/common/settings.cpp
index db1774c71..ff53e80bb 100644
--- a/src/common/settings.cpp
+++ b/src/common/settings.cpp
@@ -61,6 +61,7 @@ void LogSettings() {
log_setting("Renderer_NvdecEmulation", values.nvdec_emulation.GetValue());
log_setting("Renderer_AccelerateASTC", values.accelerate_astc.GetValue());
log_setting("Renderer_AsyncASTC", values.async_astc.GetValue());
+ log_setting("Renderer_AstcRecompression", values.astc_recompression.GetValue());
log_setting("Renderer_UseVsync", values.vsync_mode.GetValue());
log_setting("Renderer_UseReactiveFlushing", values.use_reactive_flushing.GetValue());
log_setting("Renderer_ShaderBackend", values.shader_backend.GetValue());
@@ -224,6 +225,7 @@ void RestoreGlobalState(bool is_powered_on) {
values.nvdec_emulation.SetGlobal(true);
values.accelerate_astc.SetGlobal(true);
values.async_astc.SetGlobal(true);
+ values.astc_recompression.SetGlobal(true);
values.use_reactive_flushing.SetGlobal(true);
values.shader_backend.SetGlobal(true);
values.use_asynchronous_shaders.SetGlobal(true);
@@ -232,6 +234,7 @@ void RestoreGlobalState(bool is_powered_on) {
values.bg_red.SetGlobal(true);
values.bg_green.SetGlobal(true);
values.bg_blue.SetGlobal(true);
+ values.enable_compute_pipelines.SetGlobal(true);
// System
values.language_index.SetGlobal(true);
diff --git a/src/common/settings.h b/src/common/settings.h
index 5f4caaab9..7f865b2a7 100644
--- a/src/common/settings.h
+++ b/src/common/settings.h
@@ -90,6 +90,12 @@ enum class AntiAliasing : u32 {
LastAA = Smaa,
};
+enum class AstcRecompression : u32 {
+ Uncompressed = 0,
+ Bc1 = 1,
+ Bc3 = 2,
+};
+
struct ResolutionScalingInfo {
u32 up_scale{1};
u32 down_shift{0};
@@ -472,6 +478,10 @@ struct Values {
SwitchableSetting<bool> use_fast_gpu_time{true, "use_fast_gpu_time"};
SwitchableSetting<bool> use_vulkan_driver_pipeline_cache{true,
"use_vulkan_driver_pipeline_cache"};
+ SwitchableSetting<bool> enable_compute_pipelines{false, "enable_compute_pipelines"};
+ SwitchableSetting<AstcRecompression, true> astc_recompression{
+ AstcRecompression::Uncompressed, AstcRecompression::Uncompressed, AstcRecompression::Bc3,
+ "astc_recompression"};
SwitchableSetting<u8> bg_red{0, "bg_red"};
SwitchableSetting<u8> bg_green{0, "bg_green"};
diff --git a/src/core/core.cpp b/src/core/core.cpp
index b5f62690e..4406ae30e 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -117,8 +117,7 @@ FileSys::VirtualFile GetGameFileFromPath(const FileSys::VirtualFilesystem& vfs,
return nullptr;
}
- return FileSys::ConcatenatedVfsFile::MakeConcatenatedFile(std::move(concat),
- dir->GetName());
+ return FileSys::ConcatenatedVfsFile::MakeConcatenatedFile(concat, dir->GetName());
}
if (Common::FS::IsDir(path)) {
diff --git a/src/core/file_sys/romfs.cpp b/src/core/file_sys/romfs.cpp
index ddcfe5980..fb5683a6b 100644
--- a/src/core/file_sys/romfs.cpp
+++ b/src/core/file_sys/romfs.cpp
@@ -140,7 +140,8 @@ VirtualFile CreateRomFS(VirtualDir dir, VirtualDir ext) {
return nullptr;
RomFSBuildContext ctx{dir, ext};
- return ConcatenatedVfsFile::MakeConcatenatedFile(0, ctx.Build(), dir->GetName());
+ auto file_map = ctx.Build();
+ return ConcatenatedVfsFile::MakeConcatenatedFile(0, file_map, dir->GetName());
}
} // namespace FileSys
diff --git a/src/core/file_sys/vfs_concat.cpp b/src/core/file_sys/vfs_concat.cpp
index d23623aa0..853b893a1 100644
--- a/src/core/file_sys/vfs_concat.cpp
+++ b/src/core/file_sys/vfs_concat.cpp
@@ -10,84 +10,105 @@
namespace FileSys {
-static bool VerifyConcatenationMapContinuity(const std::multimap<u64, VirtualFile>& map) {
- const auto last_valid = --map.end();
- for (auto iter = map.begin(); iter != last_valid;) {
- const auto old = iter++;
- if (old->first + old->second->GetSize() != iter->first) {
+ConcatenatedVfsFile::ConcatenatedVfsFile(ConcatenationMap&& concatenation_map_, std::string&& name_)
+ : concatenation_map(std::move(concatenation_map_)), name(std::move(name_)) {
+ DEBUG_ASSERT(this->VerifyContinuity());
+}
+
+bool ConcatenatedVfsFile::VerifyContinuity() const {
+ u64 last_offset = 0;
+ for (auto& entry : concatenation_map) {
+ if (entry.offset != last_offset) {
return false;
}
- }
-
- return map.begin()->first == 0;
-}
-ConcatenatedVfsFile::ConcatenatedVfsFile(std::vector<VirtualFile> files_, std::string name_)
- : name(std::move(name_)) {
- std::size_t next_offset = 0;
- for (const auto& file : files_) {
- files.emplace(next_offset, file);
- next_offset += file->GetSize();
+ last_offset = entry.offset + entry.file->GetSize();
}
-}
-ConcatenatedVfsFile::ConcatenatedVfsFile(std::multimap<u64, VirtualFile> files_, std::string name_)
- : files(std::move(files_)), name(std::move(name_)) {
- ASSERT(VerifyConcatenationMapContinuity(files));
+ return true;
}
ConcatenatedVfsFile::~ConcatenatedVfsFile() = default;
-VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(std::vector<VirtualFile> files,
- std::string name) {
- if (files.empty())
+VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(const std::vector<VirtualFile>& files,
+ std::string&& name) {
+ // Fold trivial cases.
+ if (files.empty()) {
return nullptr;
- if (files.size() == 1)
- return files[0];
+ }
+ if (files.size() == 1) {
+ return files.front();
+ }
+
+ // Make the concatenation map from the input.
+ std::vector<ConcatenationEntry> concatenation_map;
+ concatenation_map.reserve(files.size());
+ u64 last_offset = 0;
+
+ for (auto& file : files) {
+ concatenation_map.emplace_back(ConcatenationEntry{
+ .offset = last_offset,
+ .file = file,
+ });
+
+ last_offset += file->GetSize();
+ }
- return VirtualFile(new ConcatenatedVfsFile(std::move(files), std::move(name)));
+ return VirtualFile(new ConcatenatedVfsFile(std::move(concatenation_map), std::move(name)));
}
VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(u8 filler_byte,
- std::multimap<u64, VirtualFile> files,
- std::string name) {
- if (files.empty())
+ const std::multimap<u64, VirtualFile>& files,
+ std::string&& name) {
+ // Fold trivial cases.
+ if (files.empty()) {
return nullptr;
- if (files.size() == 1)
+ }
+ if (files.size() == 1) {
return files.begin()->second;
+ }
- const auto last_valid = --files.end();
- for (auto iter = files.begin(); iter != last_valid;) {
- const auto old = iter++;
- if (old->first + old->second->GetSize() != iter->first) {
- files.emplace(old->first + old->second->GetSize(),
- std::make_shared<StaticVfsFile>(filler_byte, iter->first - old->first -
- old->second->GetSize()));
+ // Make the concatenation map from the input.
+ std::vector<ConcatenationEntry> concatenation_map;
+
+ concatenation_map.reserve(files.size());
+ u64 last_offset = 0;
+
+ // Iteration of a multimap is ordered, so offset will be strictly non-decreasing.
+ for (auto& [offset, file] : files) {
+ if (offset > last_offset) {
+ concatenation_map.emplace_back(ConcatenationEntry{
+ .offset = last_offset,
+ .file = std::make_shared<StaticVfsFile>(filler_byte, offset - last_offset),
+ });
}
- }
- // Ensure the map starts at offset 0 (start of file), otherwise pad to fill.
- if (files.begin()->first != 0)
- files.emplace(0, std::make_shared<StaticVfsFile>(filler_byte, files.begin()->first));
+ concatenation_map.emplace_back(ConcatenationEntry{
+ .offset = offset,
+ .file = file,
+ });
+
+ last_offset = offset + file->GetSize();
+ }
- return VirtualFile(new ConcatenatedVfsFile(std::move(files), std::move(name)));
+ return VirtualFile(new ConcatenatedVfsFile(std::move(concatenation_map), std::move(name)));
}
std::string ConcatenatedVfsFile::GetName() const {
- if (files.empty()) {
+ if (concatenation_map.empty()) {
return "";
}
if (!name.empty()) {
return name;
}
- return files.begin()->second->GetName();
+ return concatenation_map.front().file->GetName();
}
std::size_t ConcatenatedVfsFile::GetSize() const {
- if (files.empty()) {
+ if (concatenation_map.empty()) {
return 0;
}
- return files.rbegin()->first + files.rbegin()->second->GetSize();
+ return concatenation_map.back().offset + concatenation_map.back().file->GetSize();
}
bool ConcatenatedVfsFile::Resize(std::size_t new_size) {
@@ -95,10 +116,10 @@ bool ConcatenatedVfsFile::Resize(std::size_t new_size) {
}
VirtualDir ConcatenatedVfsFile::GetContainingDirectory() const {
- if (files.empty()) {
+ if (concatenation_map.empty()) {
return nullptr;
}
- return files.begin()->second->GetContainingDirectory();
+ return concatenation_map.front().file->GetContainingDirectory();
}
bool ConcatenatedVfsFile::IsWritable() const {
@@ -110,25 +131,45 @@ bool ConcatenatedVfsFile::IsReadable() const {
}
std::size_t ConcatenatedVfsFile::Read(u8* data, std::size_t length, std::size_t offset) const {
- auto entry = --files.end();
- for (auto iter = files.begin(); iter != files.end(); ++iter) {
- if (iter->first > offset) {
- entry = --iter;
+ const ConcatenationEntry key{
+ .offset = offset,
+ .file = nullptr,
+ };
+
+ // Read nothing if the map is empty.
+ if (concatenation_map.empty()) {
+ return 0;
+ }
+
+ // Binary search to find the iterator to the first position we can check.
+ // It must exist, since we are not empty and are comparing unsigned integers.
+ auto it = std::prev(std::upper_bound(concatenation_map.begin(), concatenation_map.end(), key));
+ u64 cur_length = length;
+ u64 cur_offset = offset;
+
+ while (cur_length > 0 && it != concatenation_map.end()) {
+ // Check if we can read the file at this position.
+ const auto& file = it->file;
+ const u64 file_offset = it->offset;
+ const u64 file_size = file->GetSize();
+
+ if (cur_offset >= file_offset + file_size) {
+ // Entirely out of bounds read.
break;
}
- }
- if (entry->first + entry->second->GetSize() <= offset)
- return 0;
+ // Read the file at this position.
+ const u64 intended_read_size = std::min<u64>(cur_length, file_size);
+ const u64 actual_read_size =
+ file->Read(data + (cur_offset - offset), intended_read_size, cur_offset - file_offset);
- const auto read_in =
- std::min<u64>(entry->first + entry->second->GetSize() - offset, entry->second->GetSize());
- if (length > read_in) {
- return entry->second->Read(data, read_in, offset - entry->first) +
- Read(data + read_in, length - read_in, offset + read_in);
+ // Update tracking.
+ cur_offset += actual_read_size;
+ cur_length -= actual_read_size;
+ it++;
}
- return entry->second->Read(data, std::min<u64>(read_in, length), offset - entry->first);
+ return cur_offset - offset;
}
std::size_t ConcatenatedVfsFile::Write(const u8* data, std::size_t length, std::size_t offset) {
diff --git a/src/core/file_sys/vfs_concat.h b/src/core/file_sys/vfs_concat.h
index 9be0261b6..6b329d545 100644
--- a/src/core/file_sys/vfs_concat.h
+++ b/src/core/file_sys/vfs_concat.h
@@ -3,6 +3,7 @@
#pragma once
+#include <compare>
#include <map>
#include <memory>
#include "core/file_sys/vfs.h"
@@ -12,19 +13,33 @@ namespace FileSys {
// Class that wraps multiple vfs files and concatenates them, making reads seamless. Currently
// read-only.
class ConcatenatedVfsFile : public VfsFile {
- explicit ConcatenatedVfsFile(std::vector<VirtualFile> files, std::string name_);
- explicit ConcatenatedVfsFile(std::multimap<u64, VirtualFile> files, std::string name_);
+private:
+ struct ConcatenationEntry {
+ u64 offset;
+ VirtualFile file;
+
+ auto operator<=>(const ConcatenationEntry& other) const {
+ return this->offset <=> other.offset;
+ }
+ };
+ using ConcatenationMap = std::vector<ConcatenationEntry>;
+
+ explicit ConcatenatedVfsFile(std::vector<ConcatenationEntry>&& concatenation_map,
+ std::string&& name);
+ bool VerifyContinuity() const;
public:
~ConcatenatedVfsFile() override;
/// Wrapper function to allow for more efficient handling of files.size() == 0, 1 cases.
- static VirtualFile MakeConcatenatedFile(std::vector<VirtualFile> files, std::string name);
+ static VirtualFile MakeConcatenatedFile(const std::vector<VirtualFile>& files,
+ std::string&& name);
/// Convenience function that turns a map of offsets to files into a concatenated file, filling
/// gaps with a given filler byte.
- static VirtualFile MakeConcatenatedFile(u8 filler_byte, std::multimap<u64, VirtualFile> files,
- std::string name);
+ static VirtualFile MakeConcatenatedFile(u8 filler_byte,
+ const std::multimap<u64, VirtualFile>& files,
+ std::string&& name);
std::string GetName() const override;
std::size_t GetSize() const override;
@@ -37,8 +52,7 @@ public:
bool Rename(std::string_view new_name) override;
private:
- // Maps starting offset to file -- more efficient.
- std::multimap<u64, VirtualFile> files;
+ ConcatenationMap concatenation_map;
std::string name;
};
diff --git a/src/core/hid/emulated_controller.cpp b/src/core/hid/emulated_controller.cpp
index 366880711..bbfea7117 100644
--- a/src/core/hid/emulated_controller.cpp
+++ b/src/core/hid/emulated_controller.cpp
@@ -1283,9 +1283,14 @@ bool EmulatedController::HasNfc() const {
}
bool EmulatedController::WriteNfc(const std::vector<u8>& data) {
- auto& nfc_output_device = output_devices[3];
+ auto& nfc_output_device = output_devices[static_cast<std::size_t>(DeviceIndex::Right)];
+ auto& nfc_virtual_output_device = output_devices[3];
+
+ if (nfc_output_device->SupportsNfc() != Common::Input::NfcState::NotSupported) {
+ return nfc_output_device->WriteNfcData(data) == Common::Input::NfcState::Success;
+ }
- return nfc_output_device->WriteNfcData(data) == Common::Input::NfcState::Success;
+ return nfc_virtual_output_device->WriteNfcData(data) == Common::Input::NfcState::Success;
}
void EmulatedController::SetLedPattern() {
diff --git a/src/core/hle/kernel/k_memory_block_manager.h b/src/core/hle/kernel/k_memory_block_manager.h
index 7c0bd16f0..96496e990 100644
--- a/src/core/hle/kernel/k_memory_block_manager.h
+++ b/src/core/hle/kernel/k_memory_block_manager.h
@@ -144,14 +144,10 @@ private:
class KScopedMemoryBlockManagerAuditor {
public:
- explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager* m) : m_manager(m) {
- ASSERT(m_manager->CheckState());
- }
+ explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager* m) : m_manager(m) {}
explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager& m)
: KScopedMemoryBlockManagerAuditor(std::addressof(m)) {}
- ~KScopedMemoryBlockManagerAuditor() {
- ASSERT(m_manager->CheckState());
- }
+ ~KScopedMemoryBlockManagerAuditor() = default;
private:
KMemoryBlockManager* m_manager;
diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp
index ef4aec4ea..28818c813 100644
--- a/src/core/hle/service/hid/controllers/npad.cpp
+++ b/src/core/hle/service/hid/controllers/npad.cpp
@@ -979,8 +979,8 @@ void Controller_NPad::VibrateController(
}
void Controller_NPad::VibrateControllers(
- const std::vector<Core::HID::VibrationDeviceHandle>& vibration_device_handles,
- const std::vector<Core::HID::VibrationValue>& vibration_values) {
+ std::span<const Core::HID::VibrationDeviceHandle> vibration_device_handles,
+ std::span<const Core::HID::VibrationValue> vibration_values) {
if (!Settings::values.vibration_enabled.GetValue() && !permit_vibration_session_enabled) {
return;
}
diff --git a/src/core/hle/service/hid/controllers/npad.h b/src/core/hle/service/hid/controllers/npad.h
index 9cfe298f1..776411261 100644
--- a/src/core/hle/service/hid/controllers/npad.h
+++ b/src/core/hle/service/hid/controllers/npad.h
@@ -112,8 +112,8 @@ public:
const Core::HID::VibrationValue& vibration_value);
void VibrateControllers(
- const std::vector<Core::HID::VibrationDeviceHandle>& vibration_device_handles,
- const std::vector<Core::HID::VibrationValue>& vibration_values);
+ std::span<const Core::HID::VibrationDeviceHandle> vibration_device_handles,
+ std::span<const Core::HID::VibrationValue> vibration_values);
Core::HID::VibrationValue GetLastVibration(
const Core::HID::VibrationDeviceHandle& vibration_device_handle) const;
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp
index 87e7b864a..2bf1d8a27 100644
--- a/src/core/hle/service/hid/hid.cpp
+++ b/src/core/hle/service/hid/hid.cpp
@@ -1601,16 +1601,16 @@ void Hid::SendVibrationValues(HLERequestContext& ctx) {
IPC::RequestParser rp{ctx};
const auto applet_resource_user_id{rp.Pop<u64>()};
- const auto handles = ctx.ReadBuffer(0);
- const auto vibrations = ctx.ReadBuffer(1);
-
- std::vector<Core::HID::VibrationDeviceHandle> vibration_device_handles(
- handles.size() / sizeof(Core::HID::VibrationDeviceHandle));
- std::vector<Core::HID::VibrationValue> vibration_values(vibrations.size() /
- sizeof(Core::HID::VibrationValue));
-
- std::memcpy(vibration_device_handles.data(), handles.data(), handles.size());
- std::memcpy(vibration_values.data(), vibrations.data(), vibrations.size());
+ const auto handle_data = ctx.ReadBuffer(0);
+ const auto handle_count = ctx.GetReadBufferNumElements<Core::HID::VibrationDeviceHandle>(0);
+ const auto vibration_data = ctx.ReadBuffer(1);
+ const auto vibration_count = ctx.GetReadBufferNumElements<Core::HID::VibrationValue>(1);
+
+ auto vibration_device_handles =
+ std::span(reinterpret_cast<const Core::HID::VibrationDeviceHandle*>(handle_data.data()),
+ handle_count);
+ auto vibration_values = std::span(
+ reinterpret_cast<const Core::HID::VibrationValue*>(vibration_data.data()), vibration_count);
applet_resource->GetController<Controller_NPad>(HidController::NPad)
.VibrateControllers(vibration_device_handles, vibration_values);
diff --git a/src/core/hle/service/nfc/common/amiibo_crypto.cpp b/src/core/hle/service/nfc/common/amiibo_crypto.cpp
index f3901ee8d..b2bcb68c3 100644
--- a/src/core/hle/service/nfc/common/amiibo_crypto.cpp
+++ b/src/core/hle/service/nfc/common/amiibo_crypto.cpp
@@ -52,9 +52,6 @@ bool IsAmiiboValid(const EncryptedNTAG215File& ntag_file) {
if (ntag_file.compability_container != 0xEEFF10F1U) {
return false;
}
- if (amiibo_data.constant_value != 0xA5) {
- return false;
- }
if (amiibo_data.model_info.tag_type != NFC::PackedTagType::Type2) {
return false;
}
diff --git a/src/core/hle/service/nfc/common/device.cpp b/src/core/hle/service/nfc/common/device.cpp
index 322bde2ed..0bd7900e1 100644
--- a/src/core/hle/service/nfc/common/device.cpp
+++ b/src/core/hle/service/nfc/common/device.cpp
@@ -119,18 +119,31 @@ bool NfcDevice::LoadNfcTag(std::span<const u8> data) {
memcpy(&tag_data, data.data(), sizeof(NFP::EncryptedNTAG215File));
is_plain_amiibo = NFP::AmiiboCrypto::IsAmiiboValid(tag_data);
+ is_write_protected = false;
+ device_state = DeviceState::TagFound;
+ deactivate_event->GetReadableEvent().Clear();
+ activate_event->Signal();
+
+ // Fallback for plain amiibos
if (is_plain_amiibo) {
- encrypted_tag_data = NFP::AmiiboCrypto::EncodedDataToNfcData(tag_data);
LOG_INFO(Service_NFP, "Using plain amiibo");
- } else {
- tag_data = {};
+ encrypted_tag_data = NFP::AmiiboCrypto::EncodedDataToNfcData(tag_data);
+ return true;
+ }
+
+ // Fallback for encrypted amiibos without keys
+ if (!NFP::AmiiboCrypto::IsKeyAvailable()) {
+ LOG_INFO(Service_NFC, "Loading amiibo without keys");
memcpy(&encrypted_tag_data, data.data(), sizeof(NFP::EncryptedNTAG215File));
+ BuildAmiiboWithoutKeys();
+ is_plain_amiibo = true;
+ is_write_protected = true;
+ return true;
}
- device_state = DeviceState::TagFound;
- deactivate_event->GetReadableEvent().Clear();
- activate_event->Signal();
+ tag_data = {};
+ memcpy(&encrypted_tag_data, data.data(), sizeof(NFP::EncryptedNTAG215File));
return true;
}
@@ -346,23 +359,15 @@ Result NfcDevice::Mount(NFP::ModelType model_type, NFP::MountTarget mount_target
return ResultWrongDeviceState;
}
- // The loaded amiibo is not encrypted
- if (is_plain_amiibo) {
- device_state = DeviceState::TagMounted;
- mount_target = mount_target_;
- return ResultSuccess;
- }
-
if (!NFP::AmiiboCrypto::IsAmiiboValid(encrypted_tag_data)) {
LOG_ERROR(Service_NFP, "Not an amiibo");
return ResultNotAnAmiibo;
}
- // Mark amiibos as read only when keys are missing
- if (!NFP::AmiiboCrypto::IsKeyAvailable()) {
- LOG_ERROR(Service_NFP, "No keys detected");
+ // The loaded amiibo is not encrypted
+ if (is_plain_amiibo) {
device_state = DeviceState::TagMounted;
- mount_target = NFP::MountTarget::Rom;
+ mount_target = mount_target_;
return ResultSuccess;
}
@@ -421,11 +426,11 @@ Result NfcDevice::Flush() {
tag_data.write_counter++;
- FlushWithBreak(NFP::BreakType::Normal);
+ const auto result = FlushWithBreak(NFP::BreakType::Normal);
is_data_moddified = false;
- return ResultSuccess;
+ return result;
}
Result NfcDevice::FlushDebug() {
@@ -444,11 +449,11 @@ Result NfcDevice::FlushDebug() {
tag_data.write_counter++;
- FlushWithBreak(NFP::BreakType::Normal);
+ const auto result = FlushWithBreak(NFP::BreakType::Normal);
is_data_moddified = false;
- return ResultSuccess;
+ return result;
}
Result NfcDevice::FlushWithBreak(NFP::BreakType break_type) {
@@ -457,6 +462,11 @@ Result NfcDevice::FlushWithBreak(NFP::BreakType break_type) {
return ResultWrongDeviceState;
}
+ if (is_write_protected) {
+ LOG_ERROR(Service_NFP, "No keys available skipping write request");
+ return ResultSuccess;
+ }
+
std::vector<u8> data(sizeof(NFP::EncryptedNTAG215File));
if (is_plain_amiibo) {
memcpy(data.data(), &tag_data, sizeof(tag_data));
@@ -1033,7 +1043,6 @@ Result NfcDevice::GetAll(NFP::NfpData& data) const {
}
NFP::CommonInfo common_info{};
- Service::Mii::MiiManager manager;
const u64 application_id = tag_data.application_id;
GetCommonInfo(common_info);
@@ -1249,6 +1258,28 @@ void NfcDevice::UpdateRegisterInfoCrc() {
tag_data.register_info_crc = crc.checksum();
}
+void NfcDevice::BuildAmiiboWithoutKeys() {
+ Service::Mii::MiiManager manager;
+ auto& settings = tag_data.settings;
+
+ tag_data = NFP::AmiiboCrypto::NfcDataToEncodedData(encrypted_tag_data);
+
+ // Common info
+ tag_data.write_counter = 0;
+ tag_data.amiibo_version = 0;
+ settings.write_date = GetAmiiboDate(GetCurrentPosixTime());
+
+ // Register info
+ SetAmiiboName(settings, {'y', 'u', 'z', 'u', 'A', 'm', 'i', 'i', 'b', 'o'});
+ settings.settings.font_region.Assign(0);
+ settings.init_date = GetAmiiboDate(GetCurrentPosixTime());
+ tag_data.owner_mii = manager.BuildFromStoreData(manager.BuildDefault(0));
+
+ // Admin info
+ settings.settings.amiibo_initialized.Assign(1);
+ settings.settings.appdata_initialized.Assign(0);
+}
+
u64 NfcDevice::GetHandle() const {
// Generate a handle based of the npad id
return static_cast<u64>(npad_id);
diff --git a/src/core/hle/service/nfc/common/device.h b/src/core/hle/service/nfc/common/device.h
index 98e1945c1..6a37e8458 100644
--- a/src/core/hle/service/nfc/common/device.h
+++ b/src/core/hle/service/nfc/common/device.h
@@ -110,6 +110,8 @@ private:
void UpdateSettingsCrc();
void UpdateRegisterInfoCrc();
+ void BuildAmiiboWithoutKeys();
+
bool is_controller_set{};
int callback_key;
const Core::HID::NpadIdType npad_id;
@@ -128,6 +130,7 @@ private:
bool is_data_moddified{};
bool is_app_area_open{};
bool is_plain_amiibo{};
+ bool is_write_protected{};
NFP::MountTarget mount_target{NFP::MountTarget::None};
NFP::NTAG215File tag_data{};
diff --git a/src/input_common/drivers/joycon.cpp b/src/input_common/drivers/joycon.cpp
index 8b57ebe07..b2b5677c8 100644
--- a/src/input_common/drivers/joycon.cpp
+++ b/src/input_common/drivers/joycon.cpp
@@ -195,8 +195,8 @@ void Joycons::RegisterNewDevice(SDL_hid_device_info* device_info) {
OnMotionUpdate(port, type, id, value);
}},
.on_ring_data = {[this](f32 ring_data) { OnRingConUpdate(ring_data); }},
- .on_amiibo_data = {[this, port](const std::vector<u8>& amiibo_data) {
- OnAmiiboUpdate(port, amiibo_data);
+ .on_amiibo_data = {[this, port, type](const std::vector<u8>& amiibo_data) {
+ OnAmiiboUpdate(port, type, amiibo_data);
}},
.on_camera_data = {[this, port](const std::vector<u8>& camera_data,
Joycon::IrsResolution format) {
@@ -291,9 +291,13 @@ Common::Input::NfcState Joycons::SupportsNfc(const PadIdentifier& identifier_) c
return Common::Input::NfcState::Success;
};
-Common::Input::NfcState Joycons::WriteNfcData(const PadIdentifier& identifier_,
+Common::Input::NfcState Joycons::WriteNfcData(const PadIdentifier& identifier,
const std::vector<u8>& data) {
- return Common::Input::NfcState::NotSupported;
+ auto handle = GetHandle(identifier);
+ if (handle->WriteNfcData(data) != Joycon::DriverResult::Success) {
+ return Common::Input::NfcState::WriteFailed;
+ }
+ return Common::Input::NfcState::Success;
};
Common::Input::DriverResult Joycons::SetPollingMode(const PadIdentifier& identifier,
@@ -398,8 +402,9 @@ void Joycons::OnRingConUpdate(f32 ring_data) {
SetAxis(identifier, 100, ring_data);
}
-void Joycons::OnAmiiboUpdate(std::size_t port, const std::vector<u8>& amiibo_data) {
- const auto identifier = GetIdentifier(port, Joycon::ControllerType::Right);
+void Joycons::OnAmiiboUpdate(std::size_t port, Joycon::ControllerType type,
+ const std::vector<u8>& amiibo_data) {
+ const auto identifier = GetIdentifier(port, type);
const auto nfc_state = amiibo_data.empty() ? Common::Input::NfcState::AmiiboRemoved
: Common::Input::NfcState::NewAmiibo;
SetNfc(identifier, {nfc_state, amiibo_data});
diff --git a/src/input_common/drivers/joycon.h b/src/input_common/drivers/joycon.h
index 5b40817e2..e3f0ad78f 100644
--- a/src/input_common/drivers/joycon.h
+++ b/src/input_common/drivers/joycon.h
@@ -81,7 +81,8 @@ private:
void OnMotionUpdate(std::size_t port, Joycon::ControllerType type, int id,
const Joycon::MotionData& value);
void OnRingConUpdate(f32 ring_data);
- void OnAmiiboUpdate(std::size_t port, const std::vector<u8>& amiibo_data);
+ void OnAmiiboUpdate(std::size_t port, Joycon::ControllerType type,
+ const std::vector<u8>& amiibo_data);
void OnCameraUpdate(std::size_t port, const std::vector<u8>& camera_data,
Joycon::IrsResolution format);
diff --git a/src/input_common/helpers/joycon_driver.cpp b/src/input_common/helpers/joycon_driver.cpp
index 83429a336..95106f16d 100644
--- a/src/input_common/helpers/joycon_driver.cpp
+++ b/src/input_common/helpers/joycon_driver.cpp
@@ -492,6 +492,26 @@ DriverResult JoyconDriver::SetRingConMode() {
return result;
}
+DriverResult JoyconDriver::WriteNfcData(std::span<const u8> data) {
+ std::scoped_lock lock{mutex};
+ disable_input_thread = true;
+
+ if (!supported_features.nfc) {
+ return DriverResult::NotSupported;
+ }
+ if (!nfc_protocol->IsEnabled()) {
+ return DriverResult::Disabled;
+ }
+ if (!amiibo_detected) {
+ return DriverResult::ErrorWritingData;
+ }
+
+ const auto result = nfc_protocol->WriteAmiibo(data);
+
+ disable_input_thread = false;
+ return result;
+}
+
bool JoyconDriver::IsConnected() const {
std::scoped_lock lock{mutex};
return is_connected.load();
diff --git a/src/input_common/helpers/joycon_driver.h b/src/input_common/helpers/joycon_driver.h
index 72a9e71dc..e9b2fccbb 100644
--- a/src/input_common/helpers/joycon_driver.h
+++ b/src/input_common/helpers/joycon_driver.h
@@ -49,6 +49,7 @@ public:
DriverResult SetIrMode();
DriverResult SetNfcMode();
DriverResult SetRingConMode();
+ DriverResult WriteNfcData(std::span<const u8> data);
void SetCallbacks(const JoyconCallbacks& callbacks);
diff --git a/src/input_common/helpers/joycon_protocol/common_protocol.cpp b/src/input_common/helpers/joycon_protocol/common_protocol.cpp
index 077d72cd0..51669261a 100644
--- a/src/input_common/helpers/joycon_protocol/common_protocol.cpp
+++ b/src/input_common/helpers/joycon_protocol/common_protocol.cpp
@@ -265,7 +265,7 @@ DriverResult JoyconCommonProtocol::SendMCUData(ReportMode report_mode, MCUSubCom
DriverResult JoyconCommonProtocol::WaitSetMCUMode(ReportMode report_mode, MCUMode mode) {
MCUCommandResponse output{};
- constexpr std::size_t MaxTries{8};
+ constexpr std::size_t MaxTries{16};
std::size_t tries{};
do {
diff --git a/src/input_common/helpers/joycon_protocol/joycon_types.h b/src/input_common/helpers/joycon_protocol/joycon_types.h
index b03143e04..5007b0e18 100644
--- a/src/input_common/helpers/joycon_protocol/joycon_types.h
+++ b/src/input_common/helpers/joycon_protocol/joycon_types.h
@@ -23,6 +23,7 @@ constexpr std::array<u8, 8> DefaultVibrationBuffer{0x0, 0x1, 0x40, 0x40, 0x0, 0x
using MacAddress = std::array<u8, 6>;
using SerialNumber = std::array<u8, 15>;
+using TagUUID = std::array<u8, 7>;
enum class ControllerType : u8 {
None = 0x00,
@@ -276,12 +277,13 @@ enum class MCUPacketFlag : u8 {
LastCommandPacket = 0x08,
};
-enum class NFCReadCommand : u8 {
+enum class NFCCommand : u8 {
CancelAll = 0x00,
StartPolling = 0x01,
StopPolling = 0x02,
StartWaitingRecieve = 0x04,
- Ntag = 0x06,
+ ReadNtag = 0x06,
+ WriteNtag = 0x08,
Mifare = 0x0F,
};
@@ -292,14 +294,19 @@ enum class NFCTagType : u8 {
enum class NFCPages {
Block0 = 0,
+ Block3 = 3,
Block45 = 45,
Block135 = 135,
Block231 = 231,
};
enum class NFCStatus : u8 {
+ Ready = 0x00,
+ Polling = 0x01,
LastPackage = 0x04,
+ WriteDone = 0x05,
TagLost = 0x07,
+ WriteReady = 0x09,
};
enum class IrsMode : u8 {
@@ -394,6 +401,7 @@ enum class DriverResult {
InvalidHandle,
NotSupported,
Disabled,
+ Delayed,
Unknown,
};
@@ -558,13 +566,32 @@ static_assert(sizeof(NFCReadBlockCommand) == 0x9, "NFCReadBlockCommand is an inv
struct NFCReadCommandData {
u8 unknown;
u8 uuid_length;
- u8 unknown_2;
- std::array<u8, 6> uid;
+ TagUUID uid;
NFCTagType tag_type;
NFCReadBlockCommand read_block;
};
static_assert(sizeof(NFCReadCommandData) == 0x13, "NFCReadCommandData is an invalid size");
+#pragma pack(push, 1)
+struct NFCWriteCommandData {
+ u8 unknown;
+ u8 uuid_length;
+ TagUUID uid;
+ NFCTagType tag_type;
+ u8 unknown2;
+ u8 unknown3;
+ u8 unknown4;
+ u8 unknown5;
+ u8 unknown6;
+ u8 unknown7;
+ u8 unknown8;
+ u8 magic;
+ u16_be write_count;
+ u8 amiibo_version;
+};
+static_assert(sizeof(NFCWriteCommandData) == 0x15, "NFCWriteCommandData is an invalid size");
+#pragma pack(pop)
+
struct NFCPollingCommandData {
u8 enable_mifare;
u8 unknown_1;
@@ -575,9 +602,9 @@ struct NFCPollingCommandData {
static_assert(sizeof(NFCPollingCommandData) == 0x05, "NFCPollingCommandData is an invalid size");
struct NFCRequestState {
- NFCReadCommand command_argument;
+ NFCCommand command_argument;
+ u8 block_id;
u8 packet_id;
- INSERT_PADDING_BYTES(0x1);
MCUPacketFlag packet_flag;
u8 data_length;
union {
@@ -590,6 +617,18 @@ struct NFCRequestState {
};
static_assert(sizeof(NFCRequestState) == 0x26, "NFCRequestState is an invalid size");
+struct NFCDataChunk {
+ u8 nfc_page;
+ u8 data_size;
+ std::array<u8, 0xFF> data;
+};
+
+struct NFCWritePackage {
+ NFCWriteCommandData command_data;
+ u8 number_of_chunks;
+ std::array<NFCDataChunk, 4> data_chunks;
+};
+
struct IrsConfigure {
MCUCommand command;
MCUSubCommand sub_command;
diff --git a/src/input_common/helpers/joycon_protocol/nfc.cpp b/src/input_common/helpers/joycon_protocol/nfc.cpp
index 77ea6d5cf..3b7a628e5 100644
--- a/src/input_common/helpers/joycon_protocol/nfc.cpp
+++ b/src/input_common/helpers/joycon_protocol/nfc.cpp
@@ -34,6 +34,12 @@ DriverResult NfcProtocol::EnableNfc() {
result = ConfigureMCU(config);
}
+ if (result == DriverResult::Success) {
+ result = WaitSetMCUMode(ReportMode::NFC_IR_MODE_60HZ, MCUMode::NFC);
+ }
+ if (result == DriverResult::Success) {
+ result = WaitUntilNfcIs(NFCStatus::Ready);
+ }
return result;
}
@@ -56,13 +62,20 @@ DriverResult NfcProtocol::StartNFCPollingMode() {
LOG_DEBUG(Input, "Start NFC pooling Mode");
ScopedSetBlocking sb(this);
DriverResult result{DriverResult::Success};
- TagFoundData tag_data{};
if (result == DriverResult::Success) {
- result = WaitSetMCUMode(ReportMode::NFC_IR_MODE_60HZ, MCUMode::NFC);
+ MCUCommandResponse output{};
+ result = SendStopPollingRequest(output);
+ }
+ if (result == DriverResult::Success) {
+ result = WaitUntilNfcIs(NFCStatus::Ready);
+ }
+ if (result == DriverResult::Success) {
+ MCUCommandResponse output{};
+ result = SendStartPollingRequest(output);
}
if (result == DriverResult::Success) {
- result = WaitUntilNfcIsReady();
+ result = WaitUntilNfcIs(NFCStatus::Polling);
}
if (result == DriverResult::Success) {
is_enabled = true;
@@ -72,49 +85,99 @@ DriverResult NfcProtocol::StartNFCPollingMode() {
}
DriverResult NfcProtocol::ScanAmiibo(std::vector<u8>& data) {
- LOG_DEBUG(Input, "Start NFC pooling Mode");
+ if (update_counter++ < AMIIBO_UPDATE_DELAY) {
+ return DriverResult::Delayed;
+ }
+ update_counter = 0;
+
+ LOG_DEBUG(Input, "Scan for amiibos");
+ ScopedSetBlocking sb(this);
+ DriverResult result{DriverResult::Success};
+ TagFoundData tag_data{};
+
+ if (result == DriverResult::Success) {
+ result = IsTagInRange(tag_data);
+ }
+
+ if (result == DriverResult::Success) {
+ std::string uuid_string;
+ for (auto& content : tag_data.uuid) {
+ uuid_string += fmt::format(" {:02x}", content);
+ }
+ LOG_INFO(Input, "Tag detected, type={}, uuid={}", tag_data.type, uuid_string);
+ result = GetAmiiboData(data);
+ }
+
+ return result;
+}
+
+DriverResult NfcProtocol::WriteAmiibo(std::span<const u8> data) {
+ LOG_DEBUG(Input, "Write amiibo");
ScopedSetBlocking sb(this);
DriverResult result{DriverResult::Success};
+ TagUUID tag_uuid = GetTagUUID(data);
TagFoundData tag_data{};
if (result == DriverResult::Success) {
- result = StartPolling(tag_data);
+ result = IsTagInRange(tag_data, 7);
+ }
+ if (result == DriverResult::Success) {
+ if (tag_data.uuid != tag_uuid) {
+ result = DriverResult::InvalidParameters;
+ }
}
if (result == DriverResult::Success) {
- result = ReadTag(tag_data);
+ MCUCommandResponse output{};
+ result = SendStopPollingRequest(output);
}
if (result == DriverResult::Success) {
- result = WaitUntilNfcIsReady();
+ result = WaitUntilNfcIs(NFCStatus::Ready);
}
if (result == DriverResult::Success) {
- result = StartPolling(tag_data);
+ MCUCommandResponse output{};
+ result = SendStartPollingRequest(output, true);
}
if (result == DriverResult::Success) {
- result = GetAmiiboData(data);
+ result = WaitUntilNfcIs(NFCStatus::WriteReady);
+ }
+ if (result == DriverResult::Success) {
+ result = WriteAmiiboData(tag_uuid, data);
+ }
+ if (result == DriverResult::Success) {
+ result = WaitUntilNfcIs(NFCStatus::WriteDone);
+ }
+ if (result == DriverResult::Success) {
+ MCUCommandResponse output{};
+ result = SendStopPollingRequest(output);
}
return result;
}
bool NfcProtocol::HasAmiibo() {
+ if (update_counter++ < AMIIBO_UPDATE_DELAY) {
+ return true;
+ }
+ update_counter = 0;
+
ScopedSetBlocking sb(this);
DriverResult result{DriverResult::Success};
TagFoundData tag_data{};
if (result == DriverResult::Success) {
- result = StartPolling(tag_data);
+ result = IsTagInRange(tag_data, 7);
}
return result == DriverResult::Success;
}
-DriverResult NfcProtocol::WaitUntilNfcIsReady() {
+DriverResult NfcProtocol::WaitUntilNfcIs(NFCStatus status) {
constexpr std::size_t timeout_limit = 10;
MCUCommandResponse output{};
std::size_t tries = 0;
do {
- auto result = SendStartWaitingRecieveRequest(output);
+ auto result = SendNextPackageRequest(output, {});
if (result != DriverResult::Success) {
return result;
@@ -124,19 +187,17 @@ DriverResult NfcProtocol::WaitUntilNfcIsReady() {
}
} while (output.mcu_report != MCUReport::NFCState ||
(output.mcu_data[1] << 8) + output.mcu_data[0] != 0x0500 ||
- output.mcu_data[5] != 0x31 || output.mcu_data[6] != 0x00);
+ output.mcu_data[5] != 0x31 || output.mcu_data[6] != static_cast<u8>(status));
return DriverResult::Success;
}
-DriverResult NfcProtocol::StartPolling(TagFoundData& data) {
- LOG_DEBUG(Input, "Start Polling for tag");
- constexpr std::size_t timeout_limit = 7;
+DriverResult NfcProtocol::IsTagInRange(TagFoundData& data, std::size_t timeout_limit) {
MCUCommandResponse output{};
std::size_t tries = 0;
do {
- const auto result = SendStartPollingRequest(output);
+ const auto result = SendNextPackageRequest(output, {});
if (result != DriverResult::Success) {
return result;
}
@@ -145,32 +206,31 @@ DriverResult NfcProtocol::StartPolling(TagFoundData& data) {
}
} while (output.mcu_report != MCUReport::NFCState ||
(output.mcu_data[1] << 8) + output.mcu_data[0] != 0x0500 ||
- output.mcu_data[6] != 0x09);
+ (output.mcu_data[6] != 0x09 && output.mcu_data[6] != 0x04));
data.type = output.mcu_data[12];
- data.uuid.resize(output.mcu_data[14]);
+ data.uuid_size = std::min(output.mcu_data[14], static_cast<u8>(sizeof(TagUUID)));
memcpy(data.uuid.data(), output.mcu_data.data() + 15, data.uuid.size());
return DriverResult::Success;
}
-DriverResult NfcProtocol::ReadTag(const TagFoundData& data) {
- constexpr std::size_t timeout_limit = 10;
+DriverResult NfcProtocol::GetAmiiboData(std::vector<u8>& ntag_data) {
+ constexpr std::size_t timeout_limit = 60;
MCUCommandResponse output{};
std::size_t tries = 0;
- std::string uuid_string;
- for (auto& content : data.uuid) {
- uuid_string += fmt::format(" {:02x}", content);
- }
+ u8 package_index = 0;
+ std::size_t ntag_buffer_pos = 0;
+ auto result = SendReadAmiiboRequest(output, NFCPages::Block135);
- LOG_INFO(Input, "Tag detected, type={}, uuid={}", data.type, uuid_string);
+ if (result != DriverResult::Success) {
+ return result;
+ }
- tries = 0;
- NFCPages ntag_pages = NFCPages::Block0;
// Read Tag data
- while (true) {
- auto result = SendReadAmiiboRequest(output, ntag_pages);
+ while (tries++ < timeout_limit) {
+ result = SendNextPackageRequest(output, package_index);
const auto nfc_status = static_cast<NFCStatus>(output.mcu_data[6]);
if (result != DriverResult::Success) {
@@ -183,56 +243,51 @@ DriverResult NfcProtocol::ReadTag(const TagFoundData& data) {
return DriverResult::ErrorReadingData;
}
- if (output.mcu_report == MCUReport::NFCReadData && output.mcu_data[1] == 0x07 &&
- output.mcu_data[2] == 0x01) {
- if (data.type != 2) {
- continue;
- }
- switch (output.mcu_data[24]) {
- case 0:
- ntag_pages = NFCPages::Block135;
- break;
- case 3:
- ntag_pages = NFCPages::Block45;
- break;
- case 4:
- ntag_pages = NFCPages::Block231;
- break;
- default:
- return DriverResult::ErrorReadingData;
+ if (output.mcu_report == MCUReport::NFCReadData && output.mcu_data[1] == 0x07) {
+ std::size_t payload_size = (output.mcu_data[4] << 8 | output.mcu_data[5]) & 0x7FF;
+ if (output.mcu_data[2] == 0x01) {
+ memcpy(ntag_data.data() + ntag_buffer_pos, output.mcu_data.data() + 66,
+ payload_size - 60);
+ ntag_buffer_pos += payload_size - 60;
+ } else {
+ memcpy(ntag_data.data() + ntag_buffer_pos, output.mcu_data.data() + 6,
+ payload_size);
}
+ package_index++;
continue;
}
if (output.mcu_report == MCUReport::NFCState && nfc_status == NFCStatus::LastPackage) {
- // finished
- SendStopPollingRequest(output);
+ LOG_INFO(Input, "Finished reading amiibo");
return DriverResult::Success;
}
-
- // Ignore other state reports
- if (output.mcu_report == MCUReport::NFCState) {
- continue;
- }
-
- if (tries++ > timeout_limit) {
- return DriverResult::Timeout;
- }
}
- return DriverResult::Success;
+ return DriverResult::Timeout;
}
-DriverResult NfcProtocol::GetAmiiboData(std::vector<u8>& ntag_data) {
- constexpr std::size_t timeout_limit = 10;
+DriverResult NfcProtocol::WriteAmiiboData(const TagUUID& tag_uuid, std::span<const u8> data) {
+ constexpr std::size_t timeout_limit = 60;
+ const auto nfc_data = MakeAmiiboWritePackage(tag_uuid, data);
+ const std::vector<u8> nfc_buffer_data = SerializeWritePackage(nfc_data);
+ std::span<const u8> buffer(nfc_buffer_data);
MCUCommandResponse output{};
+ u8 block_id = 1;
+ u8 package_index = 0;
std::size_t tries = 0;
+ std::size_t current_position = 0;
- NFCPages ntag_pages = NFCPages::Block135;
- std::size_t ntag_buffer_pos = 0;
- // Read Tag data
- while (true) {
- auto result = SendReadAmiiboRequest(output, ntag_pages);
+ LOG_INFO(Input, "Writing amiibo data");
+
+ auto result = SendWriteAmiiboRequest(output, tag_uuid);
+
+ if (result != DriverResult::Success) {
+ return result;
+ }
+
+ // Read Tag data but ignore the actual sent data
+ while (tries++ < timeout_limit) {
+ result = SendNextPackageRequest(output, package_index);
const auto nfc_status = static_cast<NFCStatus>(output.mcu_data[6]);
if (result != DriverResult::Success) {
@@ -246,47 +301,59 @@ DriverResult NfcProtocol::GetAmiiboData(std::vector<u8>& ntag_data) {
}
if (output.mcu_report == MCUReport::NFCReadData && output.mcu_data[1] == 0x07) {
- std::size_t payload_size = (output.mcu_data[4] << 8 | output.mcu_data[5]) & 0x7FF;
- if (output.mcu_data[2] == 0x01) {
- memcpy(ntag_data.data() + ntag_buffer_pos, output.mcu_data.data() + 66,
- payload_size - 60);
- ntag_buffer_pos += payload_size - 60;
- } else {
- memcpy(ntag_data.data() + ntag_buffer_pos, output.mcu_data.data() + 6,
- payload_size);
- }
+ package_index++;
continue;
}
if (output.mcu_report == MCUReport::NFCState && nfc_status == NFCStatus::LastPackage) {
LOG_INFO(Input, "Finished reading amiibo");
- return DriverResult::Success;
+ break;
}
+ }
- // Ignore other state reports
- if (output.mcu_report == MCUReport::NFCState) {
- continue;
+ // Send Data. Nfc buffer size is 31, Send the data in smaller packages
+ while (current_position < buffer.size() && tries++ < timeout_limit) {
+ const std::size_t next_position =
+ std::min(current_position + sizeof(NFCRequestState::raw_data), buffer.size());
+ const std::size_t block_size = next_position - current_position;
+ const bool is_last_packet = block_size < sizeof(NFCRequestState::raw_data);
+
+ SendWriteDataAmiiboRequest(output, block_id, is_last_packet,
+ buffer.subspan(current_position, block_size));
+
+ const auto nfc_status = static_cast<NFCStatus>(output.mcu_data[6]);
+
+ if ((output.mcu_report == MCUReport::NFCReadData ||
+ output.mcu_report == MCUReport::NFCState) &&
+ nfc_status == NFCStatus::TagLost) {
+ return DriverResult::ErrorReadingData;
}
- if (tries++ > timeout_limit) {
- return DriverResult::Timeout;
+ // Increase position when data is confirmed by the joycon
+ if (output.mcu_report == MCUReport::NFCState &&
+ (output.mcu_data[1] << 8) + output.mcu_data[0] == 0x0500 &&
+ output.mcu_data[3] == block_id) {
+ block_id++;
+ current_position = next_position;
}
}
- return DriverResult::Success;
+ return result;
}
-DriverResult NfcProtocol::SendStartPollingRequest(MCUCommandResponse& output) {
+DriverResult NfcProtocol::SendStartPollingRequest(MCUCommandResponse& output,
+ bool is_second_attempt) {
NFCRequestState request{
- .command_argument = NFCReadCommand::StartPolling,
- .packet_id = 0x0,
+ .command_argument = NFCCommand::StartPolling,
+ .block_id = {},
+ .packet_id = {},
.packet_flag = MCUPacketFlag::LastCommandPacket,
.data_length = sizeof(NFCPollingCommandData),
.nfc_polling =
{
- .enable_mifare = 0x01,
- .unknown_1 = 0x00,
- .unknown_2 = 0x00,
+ .enable_mifare = 0x00,
+ .unknown_1 = static_cast<u8>(is_second_attempt ? 0xe8 : 0x00),
+ .unknown_2 = static_cast<u8>(is_second_attempt ? 0x03 : 0x00),
.unknown_3 = 0x2c,
.unknown_4 = 0x01,
},
@@ -302,10 +369,11 @@ DriverResult NfcProtocol::SendStartPollingRequest(MCUCommandResponse& output) {
DriverResult NfcProtocol::SendStopPollingRequest(MCUCommandResponse& output) {
NFCRequestState request{
- .command_argument = NFCReadCommand::StopPolling,
- .packet_id = 0x0,
+ .command_argument = NFCCommand::StopPolling,
+ .block_id = {},
+ .packet_id = {},
.packet_flag = MCUPacketFlag::LastCommandPacket,
- .data_length = 0,
+ .data_length = {},
.raw_data = {},
.crc = {},
};
@@ -317,12 +385,13 @@ DriverResult NfcProtocol::SendStopPollingRequest(MCUCommandResponse& output) {
output);
}
-DriverResult NfcProtocol::SendStartWaitingRecieveRequest(MCUCommandResponse& output) {
+DriverResult NfcProtocol::SendNextPackageRequest(MCUCommandResponse& output, u8 packet_id) {
NFCRequestState request{
- .command_argument = NFCReadCommand::StartWaitingRecieve,
- .packet_id = 0x0,
+ .command_argument = NFCCommand::StartWaitingRecieve,
+ .block_id = {},
+ .packet_id = packet_id,
.packet_flag = MCUPacketFlag::LastCommandPacket,
- .data_length = 0,
+ .data_length = {},
.raw_data = {},
.crc = {},
};
@@ -336,17 +405,17 @@ DriverResult NfcProtocol::SendStartWaitingRecieveRequest(MCUCommandResponse& out
DriverResult NfcProtocol::SendReadAmiiboRequest(MCUCommandResponse& output, NFCPages ntag_pages) {
NFCRequestState request{
- .command_argument = NFCReadCommand::Ntag,
- .packet_id = 0x0,
+ .command_argument = NFCCommand::ReadNtag,
+ .block_id = {},
+ .packet_id = {},
.packet_flag = MCUPacketFlag::LastCommandPacket,
.data_length = sizeof(NFCReadCommandData),
.nfc_read =
{
.unknown = 0xd0,
- .uuid_length = 0x07,
- .unknown_2 = 0x00,
+ .uuid_length = sizeof(NFCReadCommandData::uid),
.uid = {},
- .tag_type = NFCTagType::AllTags,
+ .tag_type = NFCTagType::Ntag215,
.read_block = GetReadBlockCommand(ntag_pages),
},
.crc = {},
@@ -359,12 +428,135 @@ DriverResult NfcProtocol::SendReadAmiiboRequest(MCUCommandResponse& output, NFCP
output);
}
+DriverResult NfcProtocol::SendWriteAmiiboRequest(MCUCommandResponse& output,
+ const TagUUID& tag_uuid) {
+ NFCRequestState request{
+ .command_argument = NFCCommand::ReadNtag,
+ .block_id = {},
+ .packet_id = {},
+ .packet_flag = MCUPacketFlag::LastCommandPacket,
+ .data_length = sizeof(NFCReadCommandData),
+ .nfc_read =
+ {
+ .unknown = 0xd0,
+ .uuid_length = sizeof(NFCReadCommandData::uid),
+ .uid = tag_uuid,
+ .tag_type = NFCTagType::Ntag215,
+ .read_block = GetReadBlockCommand(NFCPages::Block3),
+ },
+ .crc = {},
+ };
+
+ std::array<u8, sizeof(NFCRequestState)> request_data{};
+ memcpy(request_data.data(), &request, sizeof(NFCRequestState));
+ request_data[36] = CalculateMCU_CRC8(request_data.data(), 36);
+ return SendMCUData(ReportMode::NFC_IR_MODE_60HZ, MCUSubCommand::ReadDeviceMode, request_data,
+ output);
+}
+
+DriverResult NfcProtocol::SendWriteDataAmiiboRequest(MCUCommandResponse& output, u8 block_id,
+ bool is_last_packet,
+ std::span<const u8> data) {
+ const auto data_size = std::min(data.size(), sizeof(NFCRequestState::raw_data));
+ NFCRequestState request{
+ .command_argument = NFCCommand::WriteNtag,
+ .block_id = block_id,
+ .packet_id = {},
+ .packet_flag =
+ is_last_packet ? MCUPacketFlag::LastCommandPacket : MCUPacketFlag::MorePacketsRemaining,
+ .data_length = static_cast<u8>(data_size),
+ .raw_data = {},
+ .crc = {},
+ };
+ memcpy(request.raw_data.data(), data.data(), data_size);
+
+ std::array<u8, sizeof(NFCRequestState)> request_data{};
+ memcpy(request_data.data(), &request, sizeof(NFCRequestState));
+ request_data[36] = CalculateMCU_CRC8(request_data.data(), 36);
+ return SendMCUData(ReportMode::NFC_IR_MODE_60HZ, MCUSubCommand::ReadDeviceMode, request_data,
+ output);
+}
+
+std::vector<u8> NfcProtocol::SerializeWritePackage(const NFCWritePackage& package) const {
+ const std::size_t header_size =
+ sizeof(NFCWriteCommandData) + sizeof(NFCWritePackage::number_of_chunks);
+ std::vector<u8> serialized_data(header_size);
+ std::size_t start_index = 0;
+
+ memcpy(serialized_data.data(), &package, header_size);
+ start_index += header_size;
+
+ for (const auto& data_chunk : package.data_chunks) {
+ const std::size_t chunk_size =
+ sizeof(NFCDataChunk::nfc_page) + sizeof(NFCDataChunk::data_size) + data_chunk.data_size;
+
+ serialized_data.resize(start_index + chunk_size);
+ memcpy(serialized_data.data() + start_index, &data_chunk, chunk_size);
+ start_index += chunk_size;
+ }
+
+ return serialized_data;
+}
+
+NFCWritePackage NfcProtocol::MakeAmiiboWritePackage(const TagUUID& tag_uuid,
+ std::span<const u8> data) const {
+ return {
+ .command_data{
+ .unknown = 0xd0,
+ .uuid_length = sizeof(NFCReadCommandData::uid),
+ .uid = tag_uuid,
+ .tag_type = NFCTagType::Ntag215,
+ .unknown2 = 0x00,
+ .unknown3 = 0x01,
+ .unknown4 = 0x04,
+ .unknown5 = 0xff,
+ .unknown6 = 0xff,
+ .unknown7 = 0xff,
+ .unknown8 = 0xff,
+ .magic = data[16],
+ .write_count = static_cast<u16>((data[17] << 8) + data[18]),
+ .amiibo_version = data[19],
+ },
+ .number_of_chunks = 3,
+ .data_chunks =
+ {
+ MakeAmiiboChunk(0x05, 0x20, data),
+ MakeAmiiboChunk(0x20, 0xf0, data),
+ MakeAmiiboChunk(0x5c, 0x98, data),
+ },
+ };
+}
+
+NFCDataChunk NfcProtocol::MakeAmiiboChunk(u8 page, u8 size, std::span<const u8> data) const {
+ constexpr u8 PAGE_SIZE = 4;
+
+ if (static_cast<std::size_t>(page * PAGE_SIZE) + size >= data.size()) {
+ return {};
+ }
+
+ NFCDataChunk chunk{
+ .nfc_page = page,
+ .data_size = size,
+ .data = {},
+ };
+ std::memcpy(chunk.data.data(), data.data() + (page * PAGE_SIZE), size);
+ return chunk;
+}
+
NFCReadBlockCommand NfcProtocol::GetReadBlockCommand(NFCPages pages) const {
switch (pages) {
case NFCPages::Block0:
return {
.block_count = 1,
};
+ case NFCPages::Block3:
+ return {
+ .block_count = 1,
+ .blocks =
+ {
+ NFCReadBlock{0x03, 0x03},
+ },
+ };
case NFCPages::Block45:
return {
.block_count = 1,
@@ -399,6 +591,17 @@ NFCReadBlockCommand NfcProtocol::GetReadBlockCommand(NFCPages pages) const {
};
}
+TagUUID NfcProtocol::GetTagUUID(std::span<const u8> data) const {
+ if (data.size() < 10) {
+ return {};
+ }
+
+ // crc byte 3 is omitted in this operation
+ return {
+ data[0], data[1], data[2], data[4], data[5], data[6], data[7],
+ };
+}
+
bool NfcProtocol::IsEnabled() const {
return is_enabled;
}
diff --git a/src/input_common/helpers/joycon_protocol/nfc.h b/src/input_common/helpers/joycon_protocol/nfc.h
index 11e263e07..eb58c427d 100644
--- a/src/input_common/helpers/joycon_protocol/nfc.h
+++ b/src/input_common/helpers/joycon_protocol/nfc.h
@@ -27,35 +27,56 @@ public:
DriverResult ScanAmiibo(std::vector<u8>& data);
+ DriverResult WriteAmiibo(std::span<const u8> data);
+
bool HasAmiibo();
bool IsEnabled() const;
private:
+ // Number of times the function will be delayed until it outputs valid data
+ static constexpr std::size_t AMIIBO_UPDATE_DELAY = 15;
+
struct TagFoundData {
u8 type;
- std::vector<u8> uuid;
+ u8 uuid_size;
+ TagUUID uuid;
};
- DriverResult WaitUntilNfcIsReady();
-
- DriverResult StartPolling(TagFoundData& data);
+ DriverResult WaitUntilNfcIs(NFCStatus status);
- DriverResult ReadTag(const TagFoundData& data);
+ DriverResult IsTagInRange(TagFoundData& data, std::size_t timeout_limit = 1);
DriverResult GetAmiiboData(std::vector<u8>& data);
- DriverResult SendStartPollingRequest(MCUCommandResponse& output);
+ DriverResult WriteAmiiboData(const TagUUID& tag_uuid, std::span<const u8> data);
+
+ DriverResult SendStartPollingRequest(MCUCommandResponse& output,
+ bool is_second_attempt = false);
DriverResult SendStopPollingRequest(MCUCommandResponse& output);
- DriverResult SendStartWaitingRecieveRequest(MCUCommandResponse& output);
+ DriverResult SendNextPackageRequest(MCUCommandResponse& output, u8 packet_id);
DriverResult SendReadAmiiboRequest(MCUCommandResponse& output, NFCPages ntag_pages);
+ DriverResult SendWriteAmiiboRequest(MCUCommandResponse& output, const TagUUID& tag_uuid);
+
+ DriverResult SendWriteDataAmiiboRequest(MCUCommandResponse& output, u8 block_id,
+ bool is_last_packet, std::span<const u8> data);
+
+ std::vector<u8> SerializeWritePackage(const NFCWritePackage& package) const;
+
+ NFCWritePackage MakeAmiiboWritePackage(const TagUUID& tag_uuid, std::span<const u8> data) const;
+
+ NFCDataChunk MakeAmiiboChunk(u8 page, u8 size, std::span<const u8> data) const;
+
NFCReadBlockCommand GetReadBlockCommand(NFCPages pages) const;
+ TagUUID GetTagUUID(std::span<const u8> data) const;
+
bool is_enabled{};
+ std::size_t update_counter{};
};
} // namespace InputCommon::Joycon
diff --git a/src/input_common/input_engine.cpp b/src/input_common/input_engine.cpp
index 91aa96aa7..e4c5b5b3c 100644
--- a/src/input_common/input_engine.cpp
+++ b/src/input_common/input_engine.cpp
@@ -380,13 +380,16 @@ void InputEngine::TriggerOnMotionChange(const PadIdentifier& identifier, int mot
if (!configuring || !mapping_callback.on_data) {
return;
}
+ const auto old_value = GetMotion(identifier, motion);
bool is_active = false;
- if (std::abs(value.accel_x) > 1.5f || std::abs(value.accel_y) > 1.5f ||
- std::abs(value.accel_z) > 1.5f) {
+ if (std::abs(value.accel_x - old_value.accel_x) > 1.5f ||
+ std::abs(value.accel_y - old_value.accel_y) > 1.5f ||
+ std::abs(value.accel_z - old_value.accel_z) > 1.5f) {
is_active = true;
}
- if (std::abs(value.gyro_x) > 0.6f || std::abs(value.gyro_y) > 0.6f ||
- std::abs(value.gyro_z) > 0.6f) {
+ if (std::abs(value.gyro_x - old_value.gyro_x) > 0.6f ||
+ std::abs(value.gyro_y - old_value.gyro_y) > 0.6f ||
+ std::abs(value.gyro_z - old_value.gyro_z) > 0.6f) {
is_active = true;
}
if (!is_active) {
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
index fee510f7b..07c2b7b8a 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
@@ -339,9 +339,7 @@ Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex) {
if (ctx.profile.support_vertex_instance_id) {
return ctx.OpBitcast(ctx.F32[1], ctx.OpLoad(ctx.U32[1], ctx.vertex_id));
} else {
- const Id index{ctx.OpLoad(ctx.U32[1], ctx.vertex_index)};
- const Id base{ctx.OpLoad(ctx.U32[1], ctx.base_vertex)};
- return ctx.OpBitcast(ctx.F32[1], ctx.OpISub(ctx.U32[1], index, base));
+ return ctx.OpBitcast(ctx.F32[1], ctx.OpLoad(ctx.U32[1], ctx.vertex_index));
}
case IR::Attribute::BaseInstance:
return ctx.OpBitcast(ctx.F32[1], ctx.OpLoad(ctx.U32[1], ctx.base_instance));
@@ -386,9 +384,7 @@ Id EmitGetAttributeU32(EmitContext& ctx, IR::Attribute attr, Id) {
if (ctx.profile.support_vertex_instance_id) {
return ctx.OpLoad(ctx.U32[1], ctx.vertex_id);
} else {
- const Id index{ctx.OpLoad(ctx.U32[1], ctx.vertex_index)};
- const Id base{ctx.OpLoad(ctx.U32[1], ctx.base_vertex)};
- return ctx.OpISub(ctx.U32[1], index, base);
+ return ctx.OpLoad(ctx.U32[1], ctx.vertex_index);
}
case IR::Attribute::BaseInstance:
return ctx.OpLoad(ctx.U32[1], ctx.base_instance);
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/integer_funnel_shift.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/integer_funnel_shift.cpp
index 442365a26..c2a0ee6f1 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/integer_funnel_shift.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/integer_funnel_shift.cpp
@@ -30,7 +30,7 @@ void SHF(TranslatorVisitor& v, u64 insn, const IR::U32& shift, const IR::U32& hi
union {
u64 insn;
BitField<0, 8, IR::Reg> dest_reg;
- BitField<0, 8, IR::Reg> lo_bits_reg;
+ BitField<8, 8, IR::Reg> lo_bits_reg;
BitField<37, 2, MaxShift> max_shift;
BitField<47, 1, u64> cc;
BitField<48, 2, u64> x_mode;
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_mipmap_level.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_mipmap_level.cpp
index 639da1e9c..eeb49444f 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/texture_mipmap_level.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/texture_mipmap_level.cpp
@@ -102,12 +102,7 @@ void Impl(TranslatorVisitor& v, u64 insn, bool is_bindless) {
}
IR::F32 value{v.ir.CompositeExtract(sample, element)};
if (element < 2) {
- IR::U32 casted_value;
- if (element == 0) {
- casted_value = v.ir.ConvertFToU(32, value);
- } else {
- casted_value = v.ir.ConvertFToS(16, value);
- }
+ IR::U32 casted_value = v.ir.ConvertFToU(32, value);
v.X(dest_reg, v.ir.ShiftLeftLogical(casted_value, v.ir.Imm32(8)));
} else {
v.F(dest_reg, value);
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index a0009a36f..308d013d6 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -246,10 +246,14 @@ add_library(video_core STATIC
texture_cache/util.h
textures/astc.h
textures/astc.cpp
+ textures/bcn.cpp
+ textures/bcn.h
textures/decoders.cpp
textures/decoders.h
textures/texture.cpp
textures/texture.h
+ textures/workers.cpp
+ textures/workers.h
transform_feedback.cpp
transform_feedback.h
video_core.cpp
@@ -275,7 +279,7 @@ add_library(video_core STATIC
create_target_directory_groups(video_core)
target_link_libraries(video_core PUBLIC common core)
-target_link_libraries(video_core PUBLIC glad shader_recompiler)
+target_link_libraries(video_core PUBLIC glad shader_recompiler stb)
if (YUZU_USE_BUNDLED_FFMPEG AND NOT WIN32)
add_dependencies(video_core ffmpeg-build)
diff --git a/src/video_core/buffer_cache/buffer_cache.cpp b/src/video_core/buffer_cache/buffer_cache.cpp
index 40db243d2..4b4f7061b 100644
--- a/src/video_core/buffer_cache/buffer_cache.cpp
+++ b/src/video_core/buffer_cache/buffer_cache.cpp
@@ -2,6 +2,8 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "common/microprofile.h"
+#include "video_core/buffer_cache/buffer_cache_base.h"
+#include "video_core/control/channel_state_cache.inc"
namespace VideoCommon {
@@ -9,4 +11,6 @@ MICROPROFILE_DEFINE(GPU_PrepareBuffers, "GPU", "Prepare buffers", MP_RGB(224, 12
MICROPROFILE_DEFINE(GPU_BindUploadBuffers, "GPU", "Bind and upload buffers", MP_RGB(224, 128, 128));
MICROPROFILE_DEFINE(GPU_DownloadMemory, "GPU", "Download buffers", MP_RGB(224, 128, 128));
+template class VideoCommon::ChannelSetupCaches<VideoCommon::BufferCacheChannelInfo>;
+
} // namespace VideoCommon
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index fff57ffa9..c336be707 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -30,8 +30,8 @@ BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_,
}
const s64 device_memory = static_cast<s64>(runtime.GetDeviceLocalMemory());
- const s64 min_spacing_expected = device_memory - 1_GiB - 512_MiB;
- const s64 min_spacing_critical = device_memory - 1_GiB;
+ const s64 min_spacing_expected = device_memory - 1_GiB;
+ const s64 min_spacing_critical = device_memory - 512_MiB;
const s64 mem_threshold = std::min(device_memory, TARGET_THRESHOLD);
const s64 min_vacancy_expected = (6 * mem_threshold) / 10;
const s64 min_vacancy_critical = (3 * mem_threshold) / 10;
@@ -64,17 +64,22 @@ void BufferCache<P>::RunGarbageCollector() {
template <class P>
void BufferCache<P>::TickFrame() {
// Calculate hits and shots and move hit bits to the right
- const u32 hits = std::reduce(uniform_cache_hits.begin(), uniform_cache_hits.end());
- const u32 shots = std::reduce(uniform_cache_shots.begin(), uniform_cache_shots.end());
- std::copy_n(uniform_cache_hits.begin(), uniform_cache_hits.size() - 1,
- uniform_cache_hits.begin() + 1);
- std::copy_n(uniform_cache_shots.begin(), uniform_cache_shots.size() - 1,
- uniform_cache_shots.begin() + 1);
- uniform_cache_hits[0] = 0;
- uniform_cache_shots[0] = 0;
+
+ const u32 hits = std::reduce(channel_state->uniform_cache_hits.begin(),
+ channel_state->uniform_cache_hits.end());
+ const u32 shots = std::reduce(channel_state->uniform_cache_shots.begin(),
+ channel_state->uniform_cache_shots.end());
+ std::copy_n(channel_state->uniform_cache_hits.begin(),
+ channel_state->uniform_cache_hits.size() - 1,
+ channel_state->uniform_cache_hits.begin() + 1);
+ std::copy_n(channel_state->uniform_cache_shots.begin(),
+ channel_state->uniform_cache_shots.size() - 1,
+ channel_state->uniform_cache_shots.begin() + 1);
+ channel_state->uniform_cache_hits[0] = 0;
+ channel_state->uniform_cache_shots[0] = 0;
const bool skip_preferred = hits * 256 < shots * 251;
- uniform_buffer_skip_cache_size = skip_preferred ? DEFAULT_SKIP_CACHE_SIZE : 0;
+ channel_state->uniform_buffer_skip_cache_size = skip_preferred ? DEFAULT_SKIP_CACHE_SIZE : 0;
// If we can obtain the memory info, use it instead of the estimate.
if (runtime.CanReportMemoryUsage()) {
@@ -131,33 +136,15 @@ std::optional<VideoCore::RasterizerDownloadArea> BufferCache<P>::GetFlushArea(VA
template <class P>
void BufferCache<P>::DownloadMemory(VAddr cpu_addr, u64 size) {
- WaitOnAsyncFlushes(cpu_addr, size);
ForEachBufferInRange(cpu_addr, size, [&](BufferId, Buffer& buffer) {
DownloadBufferMemory(buffer, cpu_addr, size);
});
}
template <class P>
-void BufferCache<P>::WaitOnAsyncFlushes(VAddr cpu_addr, u64 size) {
- bool must_wait = false;
- ForEachInOverlapCounter(async_downloads, cpu_addr, size,
- [&](VAddr, VAddr, int) { must_wait = true; });
- bool must_release = false;
- ForEachInRangeSet(pending_ranges, cpu_addr, size, [&](VAddr, VAddr) { must_release = true; });
- if (must_release) {
- std::function<void()> tmp([]() {});
- rasterizer.SignalFence(std::move(tmp));
- }
- if (must_wait || must_release) {
- rasterizer.ReleaseFences();
- }
-}
-
-template <class P>
void BufferCache<P>::ClearDownload(IntervalType subtract_interval) {
RemoveEachInOverlapCounter(async_downloads, subtract_interval, -1024);
uncommitted_ranges.subtract(subtract_interval);
- pending_ranges.subtract(subtract_interval);
for (auto& interval_set : committed_ranges) {
interval_set.subtract(subtract_interval);
}
@@ -177,16 +164,15 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
}
const IntervalType subtract_interval{*cpu_dest_address, *cpu_dest_address + amount};
- WaitOnAsyncFlushes(*cpu_src_address, static_cast<u32>(amount));
ClearDownload(subtract_interval);
BufferId buffer_a;
BufferId buffer_b;
do {
- has_deleted_buffers = false;
+ channel_state->has_deleted_buffers = false;
buffer_a = FindBuffer(*cpu_src_address, static_cast<u32>(amount));
buffer_b = FindBuffer(*cpu_dest_address, static_cast<u32>(amount));
- } while (has_deleted_buffers);
+ } while (channel_state->has_deleted_buffers);
auto& src_buffer = slot_buffers[buffer_a];
auto& dest_buffer = slot_buffers[buffer_b];
SynchronizeBuffer(src_buffer, *cpu_src_address, static_cast<u32>(amount));
@@ -205,7 +191,6 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
const IntervalType add_interval{new_base_address, new_base_address + size};
tmp_intervals.push_back(add_interval);
uncommitted_ranges.add(add_interval);
- pending_ranges.add(add_interval);
};
ForEachInRangeSet(common_ranges, *cpu_src_address, amount, mirror);
// This subtraction in this order is important for overlapping copies.
@@ -292,30 +277,30 @@ void BufferCache<P>::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr
.size = size,
.buffer_id = BufferId{},
};
- uniform_buffers[stage][index] = binding;
+ channel_state->uniform_buffers[stage][index] = binding;
}
template <class P>
void BufferCache<P>::DisableGraphicsUniformBuffer(size_t stage, u32 index) {
- uniform_buffers[stage][index] = NULL_BINDING;
+ channel_state->uniform_buffers[stage][index] = NULL_BINDING;
}
template <class P>
void BufferCache<P>::UpdateGraphicsBuffers(bool is_indexed) {
MICROPROFILE_SCOPE(GPU_PrepareBuffers);
do {
- has_deleted_buffers = false;
+ channel_state->has_deleted_buffers = false;
DoUpdateGraphicsBuffers(is_indexed);
- } while (has_deleted_buffers);
+ } while (channel_state->has_deleted_buffers);
}
template <class P>
void BufferCache<P>::UpdateComputeBuffers() {
MICROPROFILE_SCOPE(GPU_PrepareBuffers);
do {
- has_deleted_buffers = false;
+ channel_state->has_deleted_buffers = false;
DoUpdateComputeBuffers();
- } while (has_deleted_buffers);
+ } while (channel_state->has_deleted_buffers);
}
template <class P>
@@ -358,98 +343,102 @@ template <class P>
void BufferCache<P>::SetUniformBuffersState(const std::array<u32, NUM_STAGES>& mask,
const UniformBufferSizes* sizes) {
if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) {
- if (enabled_uniform_buffer_masks != mask) {
+ if (channel_state->enabled_uniform_buffer_masks != mask) {
if constexpr (IS_OPENGL) {
- fast_bound_uniform_buffers.fill(0);
+ channel_state->fast_bound_uniform_buffers.fill(0);
}
- dirty_uniform_buffers.fill(~u32{0});
- uniform_buffer_binding_sizes.fill({});
+ channel_state->dirty_uniform_buffers.fill(~u32{0});
+ channel_state->uniform_buffer_binding_sizes.fill({});
}
}
- enabled_uniform_buffer_masks = mask;
- uniform_buffer_sizes = sizes;
+ channel_state->enabled_uniform_buffer_masks = mask;
+ channel_state->uniform_buffer_sizes = sizes;
}
template <class P>
void BufferCache<P>::SetComputeUniformBufferState(u32 mask,
const ComputeUniformBufferSizes* sizes) {
- enabled_compute_uniform_buffer_mask = mask;
- compute_uniform_buffer_sizes = sizes;
+ channel_state->enabled_compute_uniform_buffer_mask = mask;
+ channel_state->compute_uniform_buffer_sizes = sizes;
}
template <class P>
void BufferCache<P>::UnbindGraphicsStorageBuffers(size_t stage) {
- enabled_storage_buffers[stage] = 0;
- written_storage_buffers[stage] = 0;
+ channel_state->enabled_storage_buffers[stage] = 0;
+ channel_state->written_storage_buffers[stage] = 0;
}
template <class P>
void BufferCache<P>::BindGraphicsStorageBuffer(size_t stage, size_t ssbo_index, u32 cbuf_index,
u32 cbuf_offset, bool is_written) {
- enabled_storage_buffers[stage] |= 1U << ssbo_index;
- written_storage_buffers[stage] |= (is_written ? 1U : 0U) << ssbo_index;
+ channel_state->enabled_storage_buffers[stage] |= 1U << ssbo_index;
+ channel_state->written_storage_buffers[stage] |= (is_written ? 1U : 0U) << ssbo_index;
const auto& cbufs = maxwell3d->state.shader_stages[stage];
const GPUVAddr ssbo_addr = cbufs.const_buffers[cbuf_index].address + cbuf_offset;
- storage_buffers[stage][ssbo_index] = StorageBufferBinding(ssbo_addr, cbuf_index, is_written);
+ channel_state->storage_buffers[stage][ssbo_index] =
+ StorageBufferBinding(ssbo_addr, cbuf_index, is_written);
}
template <class P>
void BufferCache<P>::UnbindGraphicsTextureBuffers(size_t stage) {
- enabled_texture_buffers[stage] = 0;
- written_texture_buffers[stage] = 0;
- image_texture_buffers[stage] = 0;
+ channel_state->enabled_texture_buffers[stage] = 0;
+ channel_state->written_texture_buffers[stage] = 0;
+ channel_state->image_texture_buffers[stage] = 0;
}
template <class P>
void BufferCache<P>::BindGraphicsTextureBuffer(size_t stage, size_t tbo_index, GPUVAddr gpu_addr,
u32 size, PixelFormat format, bool is_written,
bool is_image) {
- enabled_texture_buffers[stage] |= 1U << tbo_index;
- written_texture_buffers[stage] |= (is_written ? 1U : 0U) << tbo_index;
+ channel_state->enabled_texture_buffers[stage] |= 1U << tbo_index;
+ channel_state->written_texture_buffers[stage] |= (is_written ? 1U : 0U) << tbo_index;
if constexpr (SEPARATE_IMAGE_BUFFERS_BINDINGS) {
- image_texture_buffers[stage] |= (is_image ? 1U : 0U) << tbo_index;
+ channel_state->image_texture_buffers[stage] |= (is_image ? 1U : 0U) << tbo_index;
}
- texture_buffers[stage][tbo_index] = GetTextureBufferBinding(gpu_addr, size, format);
+ channel_state->texture_buffers[stage][tbo_index] =
+ GetTextureBufferBinding(gpu_addr, size, format);
}
template <class P>
void BufferCache<P>::UnbindComputeStorageBuffers() {
- enabled_compute_storage_buffers = 0;
- written_compute_storage_buffers = 0;
- image_compute_texture_buffers = 0;
+ channel_state->enabled_compute_storage_buffers = 0;
+ channel_state->written_compute_storage_buffers = 0;
+ channel_state->image_compute_texture_buffers = 0;
}
template <class P>
void BufferCache<P>::BindComputeStorageBuffer(size_t ssbo_index, u32 cbuf_index, u32 cbuf_offset,
bool is_written) {
- enabled_compute_storage_buffers |= 1U << ssbo_index;
- written_compute_storage_buffers |= (is_written ? 1U : 0U) << ssbo_index;
+ channel_state->enabled_compute_storage_buffers |= 1U << ssbo_index;
+ channel_state->written_compute_storage_buffers |= (is_written ? 1U : 0U) << ssbo_index;
const auto& launch_desc = kepler_compute->launch_description;
ASSERT(((launch_desc.const_buffer_enable_mask >> cbuf_index) & 1) != 0);
const auto& cbufs = launch_desc.const_buffer_config;
const GPUVAddr ssbo_addr = cbufs[cbuf_index].Address() + cbuf_offset;
- compute_storage_buffers[ssbo_index] = StorageBufferBinding(ssbo_addr, cbuf_index, is_written);
+ channel_state->compute_storage_buffers[ssbo_index] =
+ StorageBufferBinding(ssbo_addr, cbuf_index, is_written);
}
template <class P>
void BufferCache<P>::UnbindComputeTextureBuffers() {
- enabled_compute_texture_buffers = 0;
- written_compute_texture_buffers = 0;
- image_compute_texture_buffers = 0;
+ channel_state->enabled_compute_texture_buffers = 0;
+ channel_state->written_compute_texture_buffers = 0;
+ channel_state->image_compute_texture_buffers = 0;
}
template <class P>
void BufferCache<P>::BindComputeTextureBuffer(size_t tbo_index, GPUVAddr gpu_addr, u32 size,
PixelFormat format, bool is_written, bool is_image) {
- enabled_compute_texture_buffers |= 1U << tbo_index;
- written_compute_texture_buffers |= (is_written ? 1U : 0U) << tbo_index;
+ channel_state->enabled_compute_texture_buffers |= 1U << tbo_index;
+ channel_state->written_compute_texture_buffers |= (is_written ? 1U : 0U) << tbo_index;
if constexpr (SEPARATE_IMAGE_BUFFERS_BINDINGS) {
- image_compute_texture_buffers |= (is_image ? 1U : 0U) << tbo_index;
+ channel_state->image_compute_texture_buffers |= (is_image ? 1U : 0U) << tbo_index;
}
- compute_texture_buffers[tbo_index] = GetTextureBufferBinding(gpu_addr, size, format);
+ channel_state->compute_texture_buffers[tbo_index] =
+ GetTextureBufferBinding(gpu_addr, size, format);
}
template <class P>
@@ -492,7 +481,6 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
}
MICROPROFILE_SCOPE(GPU_DownloadMemory);
- pending_ranges.clear();
auto it = committed_ranges.begin();
while (it != committed_ranges.end()) {
auto& current_intervals = *it;
@@ -693,10 +681,10 @@ bool BufferCache<P>::IsRegionCpuModified(VAddr addr, size_t size) {
template <class P>
void BufferCache<P>::BindHostIndexBuffer() {
- Buffer& buffer = slot_buffers[index_buffer.buffer_id];
- TouchBuffer(buffer, index_buffer.buffer_id);
- const u32 offset = buffer.Offset(index_buffer.cpu_addr);
- const u32 size = index_buffer.size;
+ Buffer& buffer = slot_buffers[channel_state->index_buffer.buffer_id];
+ TouchBuffer(buffer, channel_state->index_buffer.buffer_id);
+ const u32 offset = buffer.Offset(channel_state->index_buffer.cpu_addr);
+ const u32 size = channel_state->index_buffer.size;
const auto& draw_state = maxwell3d->draw_manager->GetDrawState();
if (!draw_state.inline_index_draw_indexes.empty()) [[unlikely]] {
if constexpr (USE_MEMORY_MAPS) {
@@ -710,7 +698,7 @@ void BufferCache<P>::BindHostIndexBuffer() {
buffer.ImmediateUpload(0, draw_state.inline_index_draw_indexes);
}
} else {
- SynchronizeBuffer(buffer, index_buffer.cpu_addr, size);
+ SynchronizeBuffer(buffer, channel_state->index_buffer.cpu_addr, size);
}
if constexpr (HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) {
const u32 new_offset =
@@ -727,7 +715,7 @@ template <class P>
void BufferCache<P>::BindHostVertexBuffers() {
auto& flags = maxwell3d->dirty.flags;
for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) {
- const Binding& binding = vertex_buffers[index];
+ const Binding& binding = channel_state->vertex_buffers[index];
Buffer& buffer = slot_buffers[binding.buffer_id];
TouchBuffer(buffer, binding.buffer_id);
SynchronizeBuffer(buffer, binding.cpu_addr, binding.size);
@@ -750,19 +738,19 @@ void BufferCache<P>::BindHostDrawIndirectBuffers() {
SynchronizeBuffer(buffer, binding.cpu_addr, binding.size);
};
if (current_draw_indirect->include_count) {
- bind_buffer(count_buffer_binding);
+ bind_buffer(channel_state->count_buffer_binding);
}
- bind_buffer(indirect_buffer_binding);
+ bind_buffer(channel_state->indirect_buffer_binding);
}
template <class P>
void BufferCache<P>::BindHostGraphicsUniformBuffers(size_t stage) {
u32 dirty = ~0U;
if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) {
- dirty = std::exchange(dirty_uniform_buffers[stage], 0);
+ dirty = std::exchange(channel_state->dirty_uniform_buffers[stage], 0);
}
u32 binding_index = 0;
- ForEachEnabledBit(enabled_uniform_buffer_masks[stage], [&](u32 index) {
+ ForEachEnabledBit(channel_state->enabled_uniform_buffer_masks[stage], [&](u32 index) {
const bool needs_bind = ((dirty >> index) & 1) != 0;
BindHostGraphicsUniformBuffer(stage, index, binding_index, needs_bind);
if constexpr (NEEDS_BIND_UNIFORM_INDEX) {
@@ -774,13 +762,13 @@ void BufferCache<P>::BindHostGraphicsUniformBuffers(size_t stage) {
template <class P>
void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32 binding_index,
bool needs_bind) {
- const Binding& binding = uniform_buffers[stage][index];
+ const Binding& binding = channel_state->uniform_buffers[stage][index];
const VAddr cpu_addr = binding.cpu_addr;
- const u32 size = std::min(binding.size, (*uniform_buffer_sizes)[stage][index]);
+ const u32 size = std::min(binding.size, (*channel_state->uniform_buffer_sizes)[stage][index]);
Buffer& buffer = slot_buffers[binding.buffer_id];
TouchBuffer(buffer, binding.buffer_id);
const bool use_fast_buffer = binding.buffer_id != NULL_BUFFER_ID &&
- size <= uniform_buffer_skip_cache_size &&
+ size <= channel_state->uniform_buffer_skip_cache_size &&
!memory_tracker.IsRegionGpuModified(cpu_addr, size);
if (use_fast_buffer) {
if constexpr (IS_OPENGL) {
@@ -788,11 +776,11 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
// Fast path for Nvidia
const bool should_fast_bind =
!HasFastUniformBufferBound(stage, binding_index) ||
- uniform_buffer_binding_sizes[stage][binding_index] != size;
+ channel_state->uniform_buffer_binding_sizes[stage][binding_index] != size;
if (should_fast_bind) {
// We only have to bind when the currently bound buffer is not the fast version
- fast_bound_uniform_buffers[stage] |= 1U << binding_index;
- uniform_buffer_binding_sizes[stage][binding_index] = size;
+ channel_state->fast_bound_uniform_buffers[stage] |= 1U << binding_index;
+ channel_state->uniform_buffer_binding_sizes[stage][binding_index] = size;
runtime.BindFastUniformBuffer(stage, binding_index, size);
}
const auto span = ImmediateBufferWithData(cpu_addr, size);
@@ -801,8 +789,8 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
}
}
if constexpr (IS_OPENGL) {
- fast_bound_uniform_buffers[stage] |= 1U << binding_index;
- uniform_buffer_binding_sizes[stage][binding_index] = size;
+ channel_state->fast_bound_uniform_buffers[stage] |= 1U << binding_index;
+ channel_state->uniform_buffer_binding_sizes[stage][binding_index] = size;
}
// Stream buffer path to avoid stalling on non-Nvidia drivers or Vulkan
const std::span<u8> span = runtime.BindMappedUniformBuffer(stage, binding_index, size);
@@ -812,15 +800,15 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
// Classic cached path
const bool sync_cached = SynchronizeBuffer(buffer, cpu_addr, size);
if (sync_cached) {
- ++uniform_cache_hits[0];
+ ++channel_state->uniform_cache_hits[0];
}
- ++uniform_cache_shots[0];
+ ++channel_state->uniform_cache_shots[0];
// Skip binding if it's not needed and if the bound buffer is not the fast version
// This exists to avoid instances where the fast buffer is bound and a GPU write happens
needs_bind |= HasFastUniformBufferBound(stage, binding_index);
if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) {
- needs_bind |= uniform_buffer_binding_sizes[stage][binding_index] != size;
+ needs_bind |= channel_state->uniform_buffer_binding_sizes[stage][binding_index] != size;
}
if (!needs_bind) {
return;
@@ -828,14 +816,14 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
const u32 offset = buffer.Offset(cpu_addr);
if constexpr (IS_OPENGL) {
// Fast buffer will be unbound
- fast_bound_uniform_buffers[stage] &= ~(1U << binding_index);
+ channel_state->fast_bound_uniform_buffers[stage] &= ~(1U << binding_index);
// Mark the index as dirty if offset doesn't match
const bool is_copy_bind = offset != 0 && !runtime.SupportsNonZeroUniformOffset();
- dirty_uniform_buffers[stage] |= (is_copy_bind ? 1U : 0U) << index;
+ channel_state->dirty_uniform_buffers[stage] |= (is_copy_bind ? 1U : 0U) << index;
}
if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) {
- uniform_buffer_binding_sizes[stage][binding_index] = size;
+ channel_state->uniform_buffer_binding_sizes[stage][binding_index] = size;
}
if constexpr (NEEDS_BIND_UNIFORM_INDEX) {
runtime.BindUniformBuffer(stage, binding_index, buffer, offset, size);
@@ -847,15 +835,15 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
template <class P>
void BufferCache<P>::BindHostGraphicsStorageBuffers(size_t stage) {
u32 binding_index = 0;
- ForEachEnabledBit(enabled_storage_buffers[stage], [&](u32 index) {
- const Binding& binding = storage_buffers[stage][index];
+ ForEachEnabledBit(channel_state->enabled_storage_buffers[stage], [&](u32 index) {
+ const Binding& binding = channel_state->storage_buffers[stage][index];
Buffer& buffer = slot_buffers[binding.buffer_id];
TouchBuffer(buffer, binding.buffer_id);
const u32 size = binding.size;
SynchronizeBuffer(buffer, binding.cpu_addr, size);
const u32 offset = buffer.Offset(binding.cpu_addr);
- const bool is_written = ((written_storage_buffers[stage] >> index) & 1) != 0;
+ const bool is_written = ((channel_state->written_storage_buffers[stage] >> index) & 1) != 0;
if constexpr (NEEDS_BIND_STORAGE_INDEX) {
runtime.BindStorageBuffer(stage, binding_index, buffer, offset, size, is_written);
++binding_index;
@@ -867,8 +855,8 @@ void BufferCache<P>::BindHostGraphicsStorageBuffers(size_t stage) {
template <class P>
void BufferCache<P>::BindHostGraphicsTextureBuffers(size_t stage) {
- ForEachEnabledBit(enabled_texture_buffers[stage], [&](u32 index) {
- const TextureBufferBinding& binding = texture_buffers[stage][index];
+ ForEachEnabledBit(channel_state->enabled_texture_buffers[stage], [&](u32 index) {
+ const TextureBufferBinding& binding = channel_state->texture_buffers[stage][index];
Buffer& buffer = slot_buffers[binding.buffer_id];
const u32 size = binding.size;
SynchronizeBuffer(buffer, binding.cpu_addr, size);
@@ -876,7 +864,7 @@ void BufferCache<P>::BindHostGraphicsTextureBuffers(size_t stage) {
const u32 offset = buffer.Offset(binding.cpu_addr);
const PixelFormat format = binding.format;
if constexpr (SEPARATE_IMAGE_BUFFERS_BINDINGS) {
- if (((image_texture_buffers[stage] >> index) & 1) != 0) {
+ if (((channel_state->image_texture_buffers[stage] >> index) & 1) != 0) {
runtime.BindImageBuffer(buffer, offset, size, format);
} else {
runtime.BindTextureBuffer(buffer, offset, size, format);
@@ -893,7 +881,7 @@ void BufferCache<P>::BindHostTransformFeedbackBuffers() {
return;
}
for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) {
- const Binding& binding = transform_feedback_buffers[index];
+ const Binding& binding = channel_state->transform_feedback_buffers[index];
Buffer& buffer = slot_buffers[binding.buffer_id];
TouchBuffer(buffer, binding.buffer_id);
const u32 size = binding.size;
@@ -908,15 +896,16 @@ template <class P>
void BufferCache<P>::BindHostComputeUniformBuffers() {
if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) {
// Mark all uniform buffers as dirty
- dirty_uniform_buffers.fill(~u32{0});
- fast_bound_uniform_buffers.fill(0);
+ channel_state->dirty_uniform_buffers.fill(~u32{0});
+ channel_state->fast_bound_uniform_buffers.fill(0);
}
u32 binding_index = 0;
- ForEachEnabledBit(enabled_compute_uniform_buffer_mask, [&](u32 index) {
- const Binding& binding = compute_uniform_buffers[index];
+ ForEachEnabledBit(channel_state->enabled_compute_uniform_buffer_mask, [&](u32 index) {
+ const Binding& binding = channel_state->compute_uniform_buffers[index];
Buffer& buffer = slot_buffers[binding.buffer_id];
TouchBuffer(buffer, binding.buffer_id);
- const u32 size = std::min(binding.size, (*compute_uniform_buffer_sizes)[index]);
+ const u32 size =
+ std::min(binding.size, (*channel_state->compute_uniform_buffer_sizes)[index]);
SynchronizeBuffer(buffer, binding.cpu_addr, size);
const u32 offset = buffer.Offset(binding.cpu_addr);
@@ -932,15 +921,16 @@ void BufferCache<P>::BindHostComputeUniformBuffers() {
template <class P>
void BufferCache<P>::BindHostComputeStorageBuffers() {
u32 binding_index = 0;
- ForEachEnabledBit(enabled_compute_storage_buffers, [&](u32 index) {
- const Binding& binding = compute_storage_buffers[index];
+ ForEachEnabledBit(channel_state->enabled_compute_storage_buffers, [&](u32 index) {
+ const Binding& binding = channel_state->compute_storage_buffers[index];
Buffer& buffer = slot_buffers[binding.buffer_id];
TouchBuffer(buffer, binding.buffer_id);
const u32 size = binding.size;
SynchronizeBuffer(buffer, binding.cpu_addr, size);
const u32 offset = buffer.Offset(binding.cpu_addr);
- const bool is_written = ((written_compute_storage_buffers >> index) & 1) != 0;
+ const bool is_written =
+ ((channel_state->written_compute_storage_buffers >> index) & 1) != 0;
if constexpr (NEEDS_BIND_STORAGE_INDEX) {
runtime.BindComputeStorageBuffer(binding_index, buffer, offset, size, is_written);
++binding_index;
@@ -952,8 +942,8 @@ void BufferCache<P>::BindHostComputeStorageBuffers() {
template <class P>
void BufferCache<P>::BindHostComputeTextureBuffers() {
- ForEachEnabledBit(enabled_compute_texture_buffers, [&](u32 index) {
- const TextureBufferBinding& binding = compute_texture_buffers[index];
+ ForEachEnabledBit(channel_state->enabled_compute_texture_buffers, [&](u32 index) {
+ const TextureBufferBinding& binding = channel_state->compute_texture_buffers[index];
Buffer& buffer = slot_buffers[binding.buffer_id];
const u32 size = binding.size;
SynchronizeBuffer(buffer, binding.cpu_addr, size);
@@ -961,7 +951,7 @@ void BufferCache<P>::BindHostComputeTextureBuffers() {
const u32 offset = buffer.Offset(binding.cpu_addr);
const PixelFormat format = binding.format;
if constexpr (SEPARATE_IMAGE_BUFFERS_BINDINGS) {
- if (((image_compute_texture_buffers >> index) & 1) != 0) {
+ if (((channel_state->image_compute_texture_buffers >> index) & 1) != 0) {
runtime.BindImageBuffer(buffer, offset, size, format);
} else {
runtime.BindTextureBuffer(buffer, offset, size, format);
@@ -975,7 +965,7 @@ void BufferCache<P>::BindHostComputeTextureBuffers() {
template <class P>
void BufferCache<P>::DoUpdateGraphicsBuffers(bool is_indexed) {
do {
- has_deleted_buffers = false;
+ channel_state->has_deleted_buffers = false;
if (is_indexed) {
UpdateIndexBuffer();
}
@@ -989,7 +979,7 @@ void BufferCache<P>::DoUpdateGraphicsBuffers(bool is_indexed) {
if (current_draw_indirect) {
UpdateDrawIndirect();
}
- } while (has_deleted_buffers);
+ } while (channel_state->has_deleted_buffers);
}
template <class P>
@@ -1020,7 +1010,7 @@ void BufferCache<P>::UpdateIndexBuffer() {
slot_buffers.erase(inline_buffer_id);
inline_buffer_id = CreateBuffer(0, buffer_size);
}
- index_buffer = Binding{
+ channel_state->index_buffer = Binding{
.cpu_addr = 0,
.size = inline_index_size,
.buffer_id = inline_buffer_id,
@@ -1036,10 +1026,10 @@ void BufferCache<P>::UpdateIndexBuffer() {
(index_buffer_ref.count + index_buffer_ref.first) * index_buffer_ref.FormatSizeInBytes();
const u32 size = std::min(address_size, draw_size);
if (size == 0 || !cpu_addr) {
- index_buffer = NULL_BINDING;
+ channel_state->index_buffer = NULL_BINDING;
return;
}
- index_buffer = Binding{
+ channel_state->index_buffer = Binding{
.cpu_addr = *cpu_addr,
.size = size,
.buffer_id = FindBuffer(*cpu_addr, size),
@@ -1072,13 +1062,13 @@ void BufferCache<P>::UpdateVertexBuffer(u32 index) {
const u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin);
u32 size = address_size; // TODO: Analyze stride and number of vertices
if (array.enable == 0 || size == 0 || !cpu_addr) {
- vertex_buffers[index] = NULL_BINDING;
+ channel_state->vertex_buffers[index] = NULL_BINDING;
return;
}
if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) {
size = static_cast<u32>(gpu_memory->MaxContinuousRange(gpu_addr_begin, size));
}
- vertex_buffers[index] = Binding{
+ channel_state->vertex_buffers[index] = Binding{
.cpu_addr = *cpu_addr,
.size = size,
.buffer_id = FindBuffer(*cpu_addr, size),
@@ -1100,23 +1090,24 @@ void BufferCache<P>::UpdateDrawIndirect() {
};
};
if (current_draw_indirect->include_count) {
- update(current_draw_indirect->count_start_address, sizeof(u32), count_buffer_binding);
+ update(current_draw_indirect->count_start_address, sizeof(u32),
+ channel_state->count_buffer_binding);
}
update(current_draw_indirect->indirect_start_address, current_draw_indirect->buffer_size,
- indirect_buffer_binding);
+ channel_state->indirect_buffer_binding);
}
template <class P>
void BufferCache<P>::UpdateUniformBuffers(size_t stage) {
- ForEachEnabledBit(enabled_uniform_buffer_masks[stage], [&](u32 index) {
- Binding& binding = uniform_buffers[stage][index];
+ ForEachEnabledBit(channel_state->enabled_uniform_buffer_masks[stage], [&](u32 index) {
+ Binding& binding = channel_state->uniform_buffers[stage][index];
if (binding.buffer_id) {
// Already updated
return;
}
// Mark as dirty
if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) {
- dirty_uniform_buffers[stage] |= 1U << index;
+ channel_state->dirty_uniform_buffers[stage] |= 1U << index;
}
// Resolve buffer
binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size);
@@ -1125,10 +1116,10 @@ void BufferCache<P>::UpdateUniformBuffers(size_t stage) {
template <class P>
void BufferCache<P>::UpdateStorageBuffers(size_t stage) {
- const u32 written_mask = written_storage_buffers[stage];
- ForEachEnabledBit(enabled_storage_buffers[stage], [&](u32 index) {
+ const u32 written_mask = channel_state->written_storage_buffers[stage];
+ ForEachEnabledBit(channel_state->enabled_storage_buffers[stage], [&](u32 index) {
// Resolve buffer
- Binding& binding = storage_buffers[stage][index];
+ Binding& binding = channel_state->storage_buffers[stage][index];
const BufferId buffer_id = FindBuffer(binding.cpu_addr, binding.size);
binding.buffer_id = buffer_id;
// Mark buffer as written if needed
@@ -1140,11 +1131,11 @@ void BufferCache<P>::UpdateStorageBuffers(size_t stage) {
template <class P>
void BufferCache<P>::UpdateTextureBuffers(size_t stage) {
- ForEachEnabledBit(enabled_texture_buffers[stage], [&](u32 index) {
- Binding& binding = texture_buffers[stage][index];
+ ForEachEnabledBit(channel_state->enabled_texture_buffers[stage], [&](u32 index) {
+ Binding& binding = channel_state->texture_buffers[stage][index];
binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size);
// Mark buffer as written if needed
- if (((written_texture_buffers[stage] >> index) & 1) != 0) {
+ if (((channel_state->written_texture_buffers[stage] >> index) & 1) != 0) {
MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, binding.size);
}
});
@@ -1167,11 +1158,11 @@ void BufferCache<P>::UpdateTransformFeedbackBuffer(u32 index) {
const u32 size = binding.size;
const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
if (binding.enable == 0 || size == 0 || !cpu_addr) {
- transform_feedback_buffers[index] = NULL_BINDING;
+ channel_state->transform_feedback_buffers[index] = NULL_BINDING;
return;
}
const BufferId buffer_id = FindBuffer(*cpu_addr, size);
- transform_feedback_buffers[index] = Binding{
+ channel_state->transform_feedback_buffers[index] = Binding{
.cpu_addr = *cpu_addr,
.size = size,
.buffer_id = buffer_id,
@@ -1181,8 +1172,8 @@ void BufferCache<P>::UpdateTransformFeedbackBuffer(u32 index) {
template <class P>
void BufferCache<P>::UpdateComputeUniformBuffers() {
- ForEachEnabledBit(enabled_compute_uniform_buffer_mask, [&](u32 index) {
- Binding& binding = compute_uniform_buffers[index];
+ ForEachEnabledBit(channel_state->enabled_compute_uniform_buffer_mask, [&](u32 index) {
+ Binding& binding = channel_state->compute_uniform_buffers[index];
binding = NULL_BINDING;
const auto& launch_desc = kepler_compute->launch_description;
if (((launch_desc.const_buffer_enable_mask >> index) & 1) != 0) {
@@ -1199,12 +1190,12 @@ void BufferCache<P>::UpdateComputeUniformBuffers() {
template <class P>
void BufferCache<P>::UpdateComputeStorageBuffers() {
- ForEachEnabledBit(enabled_compute_storage_buffers, [&](u32 index) {
+ ForEachEnabledBit(channel_state->enabled_compute_storage_buffers, [&](u32 index) {
// Resolve buffer
- Binding& binding = compute_storage_buffers[index];
+ Binding& binding = channel_state->compute_storage_buffers[index];
binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size);
// Mark as written if needed
- if (((written_compute_storage_buffers >> index) & 1) != 0) {
+ if (((channel_state->written_compute_storage_buffers >> index) & 1) != 0) {
MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, binding.size);
}
});
@@ -1212,11 +1203,11 @@ void BufferCache<P>::UpdateComputeStorageBuffers() {
template <class P>
void BufferCache<P>::UpdateComputeTextureBuffers() {
- ForEachEnabledBit(enabled_compute_texture_buffers, [&](u32 index) {
- Binding& binding = compute_texture_buffers[index];
+ ForEachEnabledBit(channel_state->enabled_compute_texture_buffers, [&](u32 index) {
+ Binding& binding = channel_state->compute_texture_buffers[index];
binding.buffer_id = FindBuffer(binding.cpu_addr, binding.size);
// Mark as written if needed
- if (((written_compute_texture_buffers >> index) & 1) != 0) {
+ if (((channel_state->written_compute_texture_buffers >> index) & 1) != 0) {
MarkWrittenBuffer(binding.buffer_id, binding.cpu_addr, binding.size);
}
});
@@ -1232,7 +1223,6 @@ void BufferCache<P>::MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 s
const IntervalType base_interval{cpu_addr, cpu_addr + size};
common_ranges.add(base_interval);
uncommitted_ranges.add(base_interval);
- pending_ranges.add(base_interval);
}
template <class P>
@@ -1632,13 +1622,13 @@ void BufferCache<P>::DeleteBuffer(BufferId buffer_id, bool do_not_mark) {
const auto replace = [scalar_replace](std::span<Binding> bindings) {
std::ranges::for_each(bindings, scalar_replace);
};
- scalar_replace(index_buffer);
- replace(vertex_buffers);
- std::ranges::for_each(uniform_buffers, replace);
- std::ranges::for_each(storage_buffers, replace);
- replace(transform_feedback_buffers);
- replace(compute_uniform_buffers);
- replace(compute_storage_buffers);
+ scalar_replace(channel_state->index_buffer);
+ replace(channel_state->vertex_buffers);
+ std::ranges::for_each(channel_state->uniform_buffers, replace);
+ std::ranges::for_each(channel_state->storage_buffers, replace);
+ replace(channel_state->transform_feedback_buffers);
+ replace(channel_state->compute_uniform_buffers);
+ replace(channel_state->compute_storage_buffers);
// Mark the whole buffer as CPU written to stop tracking CPU writes
if (!do_not_mark) {
@@ -1656,8 +1646,8 @@ void BufferCache<P>::DeleteBuffer(BufferId buffer_id, bool do_not_mark) {
template <class P>
void BufferCache<P>::NotifyBufferDeletion() {
if constexpr (HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS) {
- dirty_uniform_buffers.fill(~u32{0});
- uniform_buffer_binding_sizes.fill({});
+ channel_state->dirty_uniform_buffers.fill(~u32{0});
+ channel_state->uniform_buffer_binding_sizes.fill({});
}
auto& flags = maxwell3d->dirty.flags;
flags[Dirty::IndexBuffer] = true;
@@ -1665,27 +1655,27 @@ void BufferCache<P>::NotifyBufferDeletion() {
for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) {
flags[Dirty::VertexBuffer0 + index] = true;
}
- has_deleted_buffers = true;
+ channel_state->has_deleted_buffers = true;
}
template <class P>
-typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr,
- u32 cbuf_index,
- bool is_written) const {
+Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr, u32 cbuf_index,
+ bool is_written) const {
const GPUVAddr gpu_addr = gpu_memory->Read<u64>(ssbo_addr);
const auto size = [&]() {
const bool is_nvn_cbuf = cbuf_index == 0;
// The NVN driver buffer (index 0) is known to pack the SSBO address followed by its size.
if (is_nvn_cbuf) {
- return gpu_memory->Read<u32>(ssbo_addr + 8);
+ const u32 ssbo_size = gpu_memory->Read<u32>(ssbo_addr + 8);
+ if (ssbo_size != 0) {
+ return ssbo_size;
+ }
}
// Other titles (notably Doom Eternal) may use STG/LDG on buffer addresses in custom defined
// cbufs, which do not store the sizes adjacent to the addresses, so use the fully
// mapped buffer size for now.
const u32 memory_layout_size = static_cast<u32>(gpu_memory->GetMemoryLayoutSize(gpu_addr));
- LOG_INFO(HW_GPU, "Binding storage buffer for cbuf index {}, MemoryLayoutSize 0x{:X}",
- cbuf_index, memory_layout_size);
- return memory_layout_size;
+ return std::min(memory_layout_size, static_cast<u32>(8_MiB));
}();
const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
if (!cpu_addr || size == 0) {
@@ -1702,8 +1692,8 @@ typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr s
}
template <class P>
-typename BufferCache<P>::TextureBufferBinding BufferCache<P>::GetTextureBufferBinding(
- GPUVAddr gpu_addr, u32 size, PixelFormat format) {
+TextureBufferBinding BufferCache<P>::GetTextureBufferBinding(GPUVAddr gpu_addr, u32 size,
+ PixelFormat format) {
const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
TextureBufferBinding binding;
if (!cpu_addr || size == 0) {
@@ -1742,7 +1732,7 @@ std::span<u8> BufferCache<P>::ImmediateBuffer(size_t wanted_capacity) {
template <class P>
bool BufferCache<P>::HasFastUniformBufferBound(size_t stage, u32 binding_index) const noexcept {
if constexpr (IS_OPENGL) {
- return ((fast_bound_uniform_buffers[stage] >> binding_index) & 1) != 0;
+ return ((channel_state->fast_bound_uniform_buffers[stage] >> binding_index) & 1) != 0;
} else {
// Only OpenGL has fast uniform buffers
return false;
@@ -1751,14 +1741,14 @@ bool BufferCache<P>::HasFastUniformBufferBound(size_t stage, u32 binding_index)
template <class P>
std::pair<typename BufferCache<P>::Buffer*, u32> BufferCache<P>::GetDrawIndirectCount() {
- auto& buffer = slot_buffers[count_buffer_binding.buffer_id];
- return std::make_pair(&buffer, buffer.Offset(count_buffer_binding.cpu_addr));
+ auto& buffer = slot_buffers[channel_state->count_buffer_binding.buffer_id];
+ return std::make_pair(&buffer, buffer.Offset(channel_state->count_buffer_binding.cpu_addr));
}
template <class P>
std::pair<typename BufferCache<P>::Buffer*, u32> BufferCache<P>::GetDrawIndirectBuffer() {
- auto& buffer = slot_buffers[indirect_buffer_binding.buffer_id];
- return std::make_pair(&buffer, buffer.Offset(indirect_buffer_binding.cpu_addr));
+ auto& buffer = slot_buffers[channel_state->indirect_buffer_binding.buffer_id];
+ return std::make_pair(&buffer, buffer.Offset(channel_state->indirect_buffer_binding.cpu_addr));
}
} // namespace VideoCommon
diff --git a/src/video_core/buffer_cache/buffer_cache_base.h b/src/video_core/buffer_cache/buffer_cache_base.h
index 0445ec47f..c689fe06b 100644
--- a/src/video_core/buffer_cache/buffer_cache_base.h
+++ b/src/video_core/buffer_cache/buffer_cache_base.h
@@ -86,8 +86,78 @@ enum class ObtainBufferOperation : u32 {
MarkQuery = 3,
};
-template <typename P>
-class BufferCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
+static constexpr BufferId NULL_BUFFER_ID{0};
+static constexpr u32 DEFAULT_SKIP_CACHE_SIZE = static_cast<u32>(4_KiB);
+
+struct Binding {
+ VAddr cpu_addr{};
+ u32 size{};
+ BufferId buffer_id;
+};
+
+struct TextureBufferBinding : Binding {
+ PixelFormat format;
+};
+
+static constexpr Binding NULL_BINDING{
+ .cpu_addr = 0,
+ .size = 0,
+ .buffer_id = NULL_BUFFER_ID,
+};
+
+class BufferCacheChannelInfo : public ChannelInfo {
+public:
+ BufferCacheChannelInfo() = delete;
+ BufferCacheChannelInfo(Tegra::Control::ChannelState& state) noexcept : ChannelInfo(state) {}
+ BufferCacheChannelInfo(const BufferCacheChannelInfo& state) = delete;
+ BufferCacheChannelInfo& operator=(const BufferCacheChannelInfo&) = delete;
+
+ Binding index_buffer;
+ std::array<Binding, NUM_VERTEX_BUFFERS> vertex_buffers;
+ std::array<std::array<Binding, NUM_GRAPHICS_UNIFORM_BUFFERS>, NUM_STAGES> uniform_buffers;
+ std::array<std::array<Binding, NUM_STORAGE_BUFFERS>, NUM_STAGES> storage_buffers;
+ std::array<std::array<TextureBufferBinding, NUM_TEXTURE_BUFFERS>, NUM_STAGES> texture_buffers;
+ std::array<Binding, NUM_TRANSFORM_FEEDBACK_BUFFERS> transform_feedback_buffers;
+ Binding count_buffer_binding;
+ Binding indirect_buffer_binding;
+
+ std::array<Binding, NUM_COMPUTE_UNIFORM_BUFFERS> compute_uniform_buffers;
+ std::array<Binding, NUM_STORAGE_BUFFERS> compute_storage_buffers;
+ std::array<TextureBufferBinding, NUM_TEXTURE_BUFFERS> compute_texture_buffers;
+
+ std::array<u32, NUM_STAGES> enabled_uniform_buffer_masks{};
+ u32 enabled_compute_uniform_buffer_mask = 0;
+
+ const UniformBufferSizes* uniform_buffer_sizes{};
+ const ComputeUniformBufferSizes* compute_uniform_buffer_sizes{};
+
+ std::array<u32, NUM_STAGES> enabled_storage_buffers{};
+ std::array<u32, NUM_STAGES> written_storage_buffers{};
+ u32 enabled_compute_storage_buffers = 0;
+ u32 written_compute_storage_buffers = 0;
+
+ std::array<u32, NUM_STAGES> enabled_texture_buffers{};
+ std::array<u32, NUM_STAGES> written_texture_buffers{};
+ std::array<u32, NUM_STAGES> image_texture_buffers{};
+ u32 enabled_compute_texture_buffers = 0;
+ u32 written_compute_texture_buffers = 0;
+ u32 image_compute_texture_buffers = 0;
+
+ std::array<u32, 16> uniform_cache_hits{};
+ std::array<u32, 16> uniform_cache_shots{};
+
+ u32 uniform_buffer_skip_cache_size = DEFAULT_SKIP_CACHE_SIZE;
+
+ bool has_deleted_buffers = false;
+
+ std::array<u32, NUM_STAGES> dirty_uniform_buffers{};
+ std::array<u32, NUM_STAGES> fast_bound_uniform_buffers{};
+ std::array<std::array<u32, NUM_GRAPHICS_UNIFORM_BUFFERS>, NUM_STAGES>
+ uniform_buffer_binding_sizes{};
+};
+
+template <class P>
+class BufferCache : public VideoCommon::ChannelSetupCaches<BufferCacheChannelInfo> {
// Page size for caching purposes.
// This is unrelated to the CPU page size and it can be changed as it seems optimal.
static constexpr u32 CACHING_PAGEBITS = 16;
@@ -104,8 +174,6 @@ class BufferCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelI
static constexpr bool SEPARATE_IMAGE_BUFFERS_BINDINGS = P::SEPARATE_IMAGE_BUFFER_BINDINGS;
static constexpr bool IMPLEMENTS_ASYNC_DOWNLOADS = P::IMPLEMENTS_ASYNC_DOWNLOADS;
- static constexpr BufferId NULL_BUFFER_ID{0};
-
static constexpr s64 DEFAULT_EXPECTED_MEMORY = 512_MiB;
static constexpr s64 DEFAULT_CRITICAL_MEMORY = 1_GiB;
static constexpr s64 TARGET_THRESHOLD = 4_GiB;
@@ -149,8 +217,6 @@ class BufferCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelI
using OverlapSection = boost::icl::inter_section<int>;
using OverlapCounter = boost::icl::split_interval_map<VAddr, int>;
- struct Empty {};
-
struct OverlapResult {
std::vector<BufferId> ids;
VAddr begin;
@@ -158,25 +224,7 @@ class BufferCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelI
bool has_stream_leap = false;
};
- struct Binding {
- VAddr cpu_addr{};
- u32 size{};
- BufferId buffer_id;
- };
-
- struct TextureBufferBinding : Binding {
- PixelFormat format;
- };
-
- static constexpr Binding NULL_BINDING{
- .cpu_addr = 0,
- .size = 0,
- .buffer_id = NULL_BUFFER_ID,
- };
-
public:
- static constexpr u32 DEFAULT_SKIP_CACHE_SIZE = static_cast<u32>(4_KiB);
-
explicit BufferCache(VideoCore::RasterizerInterface& rasterizer_,
Core::Memory::Memory& cpu_memory_, Runtime& runtime_);
@@ -381,8 +429,6 @@ private:
void RunGarbageCollector();
- void WaitOnAsyncFlushes(VAddr cpu_addr, u64 size);
-
void BindHostIndexBuffer();
void BindHostVertexBuffers();
@@ -498,56 +544,10 @@ private:
u32 last_index_count = 0;
- Binding index_buffer;
- std::array<Binding, NUM_VERTEX_BUFFERS> vertex_buffers;
- std::array<std::array<Binding, NUM_GRAPHICS_UNIFORM_BUFFERS>, NUM_STAGES> uniform_buffers;
- std::array<std::array<Binding, NUM_STORAGE_BUFFERS>, NUM_STAGES> storage_buffers;
- std::array<std::array<TextureBufferBinding, NUM_TEXTURE_BUFFERS>, NUM_STAGES> texture_buffers;
- std::array<Binding, NUM_TRANSFORM_FEEDBACK_BUFFERS> transform_feedback_buffers;
- Binding count_buffer_binding;
- Binding indirect_buffer_binding;
-
- std::array<Binding, NUM_COMPUTE_UNIFORM_BUFFERS> compute_uniform_buffers;
- std::array<Binding, NUM_STORAGE_BUFFERS> compute_storage_buffers;
- std::array<TextureBufferBinding, NUM_TEXTURE_BUFFERS> compute_texture_buffers;
-
- std::array<u32, NUM_STAGES> enabled_uniform_buffer_masks{};
- u32 enabled_compute_uniform_buffer_mask = 0;
-
- const UniformBufferSizes* uniform_buffer_sizes{};
- const ComputeUniformBufferSizes* compute_uniform_buffer_sizes{};
-
- std::array<u32, NUM_STAGES> enabled_storage_buffers{};
- std::array<u32, NUM_STAGES> written_storage_buffers{};
- u32 enabled_compute_storage_buffers = 0;
- u32 written_compute_storage_buffers = 0;
-
- std::array<u32, NUM_STAGES> enabled_texture_buffers{};
- std::array<u32, NUM_STAGES> written_texture_buffers{};
- std::array<u32, NUM_STAGES> image_texture_buffers{};
- u32 enabled_compute_texture_buffers = 0;
- u32 written_compute_texture_buffers = 0;
- u32 image_compute_texture_buffers = 0;
-
- std::array<u32, 16> uniform_cache_hits{};
- std::array<u32, 16> uniform_cache_shots{};
-
- u32 uniform_buffer_skip_cache_size = DEFAULT_SKIP_CACHE_SIZE;
-
- bool has_deleted_buffers = false;
-
- std::conditional_t<HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS, std::array<u32, NUM_STAGES>, Empty>
- dirty_uniform_buffers{};
- std::conditional_t<IS_OPENGL, std::array<u32, NUM_STAGES>, Empty> fast_bound_uniform_buffers{};
- std::conditional_t<HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS,
- std::array<std::array<u32, NUM_GRAPHICS_UNIFORM_BUFFERS>, NUM_STAGES>, Empty>
- uniform_buffer_binding_sizes{};
-
MemoryTracker memory_tracker;
IntervalSet uncommitted_ranges;
IntervalSet common_ranges;
IntervalSet cached_ranges;
- IntervalSet pending_ranges;
std::deque<IntervalSet> committed_ranges;
// Async Buffers
diff --git a/src/video_core/host1x/codecs/codec.cpp b/src/video_core/host1x/codecs/codec.cpp
index 3e9022dce..cd6a3a9b8 100644
--- a/src/video_core/host1x/codecs/codec.cpp
+++ b/src/video_core/host1x/codecs/codec.cpp
@@ -5,6 +5,7 @@
#include <fstream>
#include <vector>
#include "common/assert.h"
+#include "common/scope_exit.h"
#include "common/settings.h"
#include "video_core/host1x/codecs/codec.h"
#include "video_core/host1x/codecs/h264.h"
@@ -14,6 +15,8 @@
#include "video_core/memory_manager.h"
extern "C" {
+#include <libavfilter/buffersink.h>
+#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
#ifdef LIBVA_FOUND
// for querying VAAPI driver information
@@ -85,6 +88,10 @@ Codec::~Codec() {
// Free libav memory
avcodec_free_context(&av_codec_ctx);
av_buffer_unref(&av_gpu_decoder);
+
+ if (filters_initialized) {
+ avfilter_graph_free(&av_filter_graph);
+ }
}
bool Codec::CreateGpuAvDevice() {
@@ -167,6 +174,62 @@ void Codec::InitializeGpuDecoder() {
av_codec_ctx->get_format = GetGpuFormat;
}
+void Codec::InitializeAvFilters(AVFrame* frame) {
+ const AVFilter* buffer_src = avfilter_get_by_name("buffer");
+ const AVFilter* buffer_sink = avfilter_get_by_name("buffersink");
+ AVFilterInOut* inputs = avfilter_inout_alloc();
+ AVFilterInOut* outputs = avfilter_inout_alloc();
+ SCOPE_EXIT({
+ avfilter_inout_free(&inputs);
+ avfilter_inout_free(&outputs);
+ });
+
+ // Don't know how to get the accurate time_base but it doesn't matter for yadif filter
+ // so just use 1/1 to make buffer filter happy
+ std::string args = fmt::format("video_size={}x{}:pix_fmt={}:time_base=1/1", frame->width,
+ frame->height, frame->format);
+
+ av_filter_graph = avfilter_graph_alloc();
+ int ret = avfilter_graph_create_filter(&av_filter_src_ctx, buffer_src, "in", args.c_str(),
+ nullptr, av_filter_graph);
+ if (ret < 0) {
+ LOG_ERROR(Service_NVDRV, "avfilter_graph_create_filter source error: {}", ret);
+ return;
+ }
+
+ ret = avfilter_graph_create_filter(&av_filter_sink_ctx, buffer_sink, "out", nullptr, nullptr,
+ av_filter_graph);
+ if (ret < 0) {
+ LOG_ERROR(Service_NVDRV, "avfilter_graph_create_filter sink error: {}", ret);
+ return;
+ }
+
+ inputs->name = av_strdup("out");
+ inputs->filter_ctx = av_filter_sink_ctx;
+ inputs->pad_idx = 0;
+ inputs->next = nullptr;
+
+ outputs->name = av_strdup("in");
+ outputs->filter_ctx = av_filter_src_ctx;
+ outputs->pad_idx = 0;
+ outputs->next = nullptr;
+
+ const char* description = "yadif=1:-1:0";
+ ret = avfilter_graph_parse_ptr(av_filter_graph, description, &inputs, &outputs, nullptr);
+ if (ret < 0) {
+ LOG_ERROR(Service_NVDRV, "avfilter_graph_parse_ptr error: {}", ret);
+ return;
+ }
+
+ ret = avfilter_graph_config(av_filter_graph, nullptr);
+ if (ret < 0) {
+ LOG_ERROR(Service_NVDRV, "avfilter_graph_config error: {}", ret);
+ return;
+ }
+
+ filters_initialized = true;
+}
+
void Codec::Initialize() {
const AVCodecID codec = [&] {
switch (current_codec) {
@@ -271,8 +334,34 @@ void Codec::Decode() {
UNIMPLEMENTED_MSG("Unexpected video format: {}", final_frame->format);
return;
}
- av_frames.push(std::move(final_frame));
- if (av_frames.size() > 10) {
+ if (!final_frame->interlaced_frame) {
+ av_frames.push(std::move(final_frame));
+ } else {
+ if (!filters_initialized) {
+ InitializeAvFilters(final_frame.get());
+ }
+ if (const int ret = av_buffersrc_add_frame_flags(av_filter_src_ctx, final_frame.get(),
+ AV_BUFFERSRC_FLAG_KEEP_REF);
+ ret) {
+ LOG_DEBUG(Service_NVDRV, "av_buffersrc_add_frame_flags error {}", ret);
+ return;
+ }
+ while (true) {
+ auto filter_frame = AVFramePtr{av_frame_alloc(), AVFrameDeleter};
+
+ int ret = av_buffersink_get_frame(av_filter_sink_ctx, filter_frame.get());
+
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR(AVERROR_EOF))
+ break;
+ if (ret < 0) {
+ LOG_DEBUG(Service_NVDRV, "av_buffersink_get_frame error {}", ret);
+ return;
+ }
+
+ av_frames.push(std::move(filter_frame));
+ }
+ }
+ while (av_frames.size() > 10) {
LOG_TRACE(Service_NVDRV, "av_frames.push overflow dropped frame");
av_frames.pop();
}
diff --git a/src/video_core/host1x/codecs/codec.h b/src/video_core/host1x/codecs/codec.h
index 0d45fb7fe..06fe00a4b 100644
--- a/src/video_core/host1x/codecs/codec.h
+++ b/src/video_core/host1x/codecs/codec.h
@@ -15,6 +15,7 @@ extern "C" {
#pragma GCC diagnostic ignored "-Wconversion"
#endif
#include <libavcodec/avcodec.h>
+#include <libavfilter/avfilter.h>
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic pop
#endif
@@ -61,17 +62,24 @@ public:
private:
void InitializeAvCodecContext();
+ void InitializeAvFilters(AVFrame* frame);
+
void InitializeGpuDecoder();
bool CreateGpuAvDevice();
bool initialized{};
+ bool filters_initialized{};
Host1x::NvdecCommon::VideoCodec current_codec{Host1x::NvdecCommon::VideoCodec::None};
const AVCodec* av_codec{nullptr};
AVCodecContext* av_codec_ctx{nullptr};
AVBufferRef* av_gpu_decoder{nullptr};
+ AVFilterContext* av_filter_src_ctx{nullptr};
+ AVFilterContext* av_filter_sink_ctx{nullptr};
+ AVFilterGraph* av_filter_graph{nullptr};
+
Host1x::Host1x& host1x;
const Host1x::NvdecCommon::NvdecRegisters& state;
std::unique_ptr<Decoder::H264> h264_decoder;
diff --git a/src/video_core/host1x/codecs/h264.cpp b/src/video_core/host1x/codecs/h264.cpp
index e87bd65fa..6ce179167 100644
--- a/src/video_core/host1x/codecs/h264.cpp
+++ b/src/video_core/host1x/codecs/h264.cpp
@@ -111,7 +111,7 @@ const std::vector<u8>& H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegist
writer.WriteUe(0);
writer.WriteBit(context.h264_parameter_set.entropy_coding_mode_flag != 0);
- writer.WriteBit(false);
+ writer.WriteBit(context.h264_parameter_set.pic_order_present_flag != 0);
writer.WriteUe(0);
writer.WriteUe(context.h264_parameter_set.num_refidx_l0_default_active);
writer.WriteUe(context.h264_parameter_set.num_refidx_l1_default_active);
@@ -129,7 +129,7 @@ const std::vector<u8>& H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegist
writer.WriteBit(context.h264_parameter_set.redundant_pic_cnt_present_flag != 0);
writer.WriteBit(context.h264_parameter_set.transform_8x8_mode_flag != 0);
- writer.WriteBit(true);
+ writer.WriteBit(true); // pic_scaling_matrix_present_flag
for (s32 index = 0; index < 6; index++) {
writer.WriteBit(true);
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
index 6af4ae793..6d3bda192 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
@@ -117,7 +117,7 @@ BufferCacheRuntime::BufferCacheRuntime(const Device& device_)
for (auto& stage_uniforms : fast_uniforms) {
for (OGLBuffer& buffer : stage_uniforms) {
buffer.Create();
- glNamedBufferData(buffer.handle, BufferCache::DEFAULT_SKIP_CACHE_SIZE, nullptr,
+ glNamedBufferData(buffer.handle, VideoCommon::DEFAULT_SKIP_CACHE_SIZE, nullptr,
GL_STREAM_DRAW);
}
}
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.cpp b/src/video_core/renderer_opengl/gl_texture_cache.cpp
index 31118886f..1e0823836 100644
--- a/src/video_core/renderer_opengl/gl_texture_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_texture_cache.cpp
@@ -233,6 +233,8 @@ void ApplySwizzle(GLuint handle, PixelFormat format, std::array<SwizzleSource, 4
const VideoCommon::ImageInfo& info) {
if (IsPixelFormatASTC(info.format) && info.size.depth == 1 && !runtime.HasNativeASTC()) {
return Settings::values.accelerate_astc.GetValue() &&
+ Settings::values.astc_recompression.GetValue() ==
+ Settings::AstcRecompression::Uncompressed &&
!Settings::values.async_astc.GetValue();
}
// Disable other accelerated uploads for now as they don't implement swizzled uploads
@@ -437,6 +439,19 @@ OGLTexture MakeImage(const VideoCommon::ImageInfo& info, GLenum gl_internal_form
return GL_R32UI;
}
+[[nodiscard]] GLenum SelectAstcFormat(PixelFormat format, bool is_srgb) {
+ switch (Settings::values.astc_recompression.GetValue()) {
+ case Settings::AstcRecompression::Bc1:
+ return is_srgb ? GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT : GL_COMPRESSED_RGBA_S3TC_DXT1_EXT;
+ break;
+ case Settings::AstcRecompression::Bc3:
+ return is_srgb ? GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT : GL_COMPRESSED_RGBA_S3TC_DXT5_EXT;
+ break;
+ default:
+ return is_srgb ? GL_SRGB8_ALPHA8 : GL_RGBA8;
+ }
+}
+
} // Anonymous namespace
ImageBufferMap::~ImageBufferMap() {
@@ -739,9 +754,16 @@ Image::Image(TextureCacheRuntime& runtime_, const VideoCommon::ImageInfo& info_,
if (IsConverted(runtime->device, info.format, info.type)) {
flags |= ImageFlagBits::Converted;
flags |= ImageFlagBits::CostlyLoad;
- gl_internal_format = IsPixelFormatSRGB(info.format) ? GL_SRGB8_ALPHA8 : GL_RGBA8;
+
+ const bool is_srgb = IsPixelFormatSRGB(info.format);
+ gl_internal_format = is_srgb ? GL_SRGB8_ALPHA8 : GL_RGBA8;
gl_format = GL_RGBA;
gl_type = GL_UNSIGNED_INT_8_8_8_8_REV;
+
+ if (IsPixelFormatASTC(info.format)) {
+ gl_internal_format = SelectAstcFormat(info.format, is_srgb);
+ gl_format = GL_NONE;
+ }
} else {
const auto& tuple = MaxwellToGL::GetFormatTuple(info.format);
gl_internal_format = tuple.internal_format;
@@ -1130,7 +1152,12 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI
views{runtime.null_image_views} {
const Device& device = runtime.device;
if (True(image.flags & ImageFlagBits::Converted)) {
- internal_format = IsPixelFormatSRGB(info.format) ? GL_SRGB8_ALPHA8 : GL_RGBA8;
+ const bool is_srgb = IsPixelFormatSRGB(info.format);
+ internal_format = is_srgb ? GL_SRGB8_ALPHA8 : GL_RGBA8;
+
+ if (IsPixelFormatASTC(info.format)) {
+ internal_format = SelectAstcFormat(info.format, is_srgb);
+ }
} else {
internal_format = MaxwellToGL::GetFormatTuple(format).internal_format;
}
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.h b/src/video_core/renderer_opengl/gl_texture_cache.h
index 1190999a8..3e9b3302b 100644
--- a/src/video_core/renderer_opengl/gl_texture_cache.h
+++ b/src/video_core/renderer_opengl/gl_texture_cache.h
@@ -144,6 +144,10 @@ public:
return state_tracker;
}
+ void BarrierFeedbackLoop() const noexcept {
+ // OpenGL does not require a barrier for attachment feedback loops.
+ }
+
private:
struct StagingBuffers {
explicit StagingBuffers(GLenum storage_flags_, GLenum map_flags_);
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
index 8853cf0f7..b75d7220d 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
@@ -6,6 +6,7 @@
#include "common/assert.h"
#include "common/common_types.h"
#include "common/logging/log.h"
+#include "common/settings.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
#include "video_core/surface.h"
@@ -237,14 +238,25 @@ FormatInfo SurfaceFormat(const Device& device, FormatType format_type, bool with
PixelFormat pixel_format) {
ASSERT(static_cast<size_t>(pixel_format) < std::size(tex_format_tuples));
FormatTuple tuple = tex_format_tuples[static_cast<size_t>(pixel_format)];
- // Use A8B8G8R8_UNORM on hardware that doesn't support ASTC natively
+ // Transcode on hardware that doesn't support ASTC natively
if (!device.IsOptimalAstcSupported() && VideoCore::Surface::IsPixelFormatASTC(pixel_format)) {
const bool is_srgb = with_srgb && VideoCore::Surface::IsPixelFormatSRGB(pixel_format);
- if (is_srgb) {
- tuple.format = VK_FORMAT_A8B8G8R8_SRGB_PACK32;
- } else {
- tuple.format = VK_FORMAT_A8B8G8R8_UNORM_PACK32;
- tuple.usage |= Storage;
+
+ switch (Settings::values.astc_recompression.GetValue()) {
+ case Settings::AstcRecompression::Uncompressed:
+ if (is_srgb) {
+ tuple.format = VK_FORMAT_A8B8G8R8_SRGB_PACK32;
+ } else {
+ tuple.format = VK_FORMAT_A8B8G8R8_UNORM_PACK32;
+ tuple.usage |= Storage;
+ }
+ break;
+ case Settings::AstcRecompression::Bc1:
+ tuple.format = is_srgb ? VK_FORMAT_BC1_RGBA_SRGB_BLOCK : VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
+ break;
+ case Settings::AstcRecompression::Bc3:
+ tuple.format = is_srgb ? VK_FORMAT_BC3_SRGB_BLOCK : VK_FORMAT_BC3_UNORM_BLOCK;
+ break;
}
}
const bool attachable = (tuple.usage & Attachable) != 0;
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index f1bcd5cd6..506b78f08 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -481,12 +481,13 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
if constexpr (Spec::enabled_stages[4]) {
prepare_stage(4);
}
+ texture_cache.UpdateRenderTargets(false);
+ texture_cache.CheckFeedbackLoop(views);
ConfigureDraw(rescaling, render_area);
}
void GraphicsPipeline::ConfigureDraw(const RescalingPushConstant& rescaling,
const RenderAreaPushConstant& render_area) {
- texture_cache.UpdateRenderTargets(false);
scheduler.RequestRenderpass(texture_cache.GetFramebuffer());
if (!is_built.load(std::memory_order::relaxed)) {
diff --git a/src/video_core/renderer_vulkan/vk_master_semaphore.cpp b/src/video_core/renderer_vulkan/vk_master_semaphore.cpp
index 47c74e4d8..8b65aeaeb 100644
--- a/src/video_core/renderer_vulkan/vk_master_semaphore.cpp
+++ b/src/video_core/renderer_vulkan/vk_master_semaphore.cpp
@@ -10,11 +10,16 @@
namespace Vulkan {
+constexpr u64 FENCE_RESERVE_SIZE = 8;
+
MasterSemaphore::MasterSemaphore(const Device& device_) : device(device_) {
if (!device.HasTimelineSemaphore()) {
static constexpr VkFenceCreateInfo fence_ci{
.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, .pNext = nullptr, .flags = 0};
- fence = device.GetLogical().CreateFence(fence_ci);
+ free_queue.resize(FENCE_RESERVE_SIZE);
+ std::ranges::generate(free_queue,
+ [&] { return device.GetLogical().CreateFence(fence_ci); });
+ wait_thread = std::jthread([this](std::stop_token token) { WaitThread(token); });
return;
}
@@ -167,16 +172,53 @@ VkResult MasterSemaphore::SubmitQueueFence(vk::CommandBuffer& cmdbuf, VkSemaphor
.pSignalSemaphores = &signal_semaphore,
};
+ auto fence = GetFreeFence();
auto result = device.GetGraphicsQueue().Submit(submit_info, *fence);
if (result == VK_SUCCESS) {
+ std::scoped_lock lock{wait_mutex};
+ wait_queue.emplace(host_tick, std::move(fence));
+ wait_cv.notify_one();
+ }
+
+ return result;
+}
+
+void MasterSemaphore::WaitThread(std::stop_token token) {
+ while (!token.stop_requested()) {
+ u64 host_tick;
+ vk::Fence fence;
+ {
+ std::unique_lock lock{wait_mutex};
+ Common::CondvarWait(wait_cv, lock, token, [this] { return !wait_queue.empty(); });
+ if (token.stop_requested()) {
+ return;
+ }
+ std::tie(host_tick, fence) = std::move(wait_queue.front());
+ wait_queue.pop();
+ }
+
fence.Wait();
fence.Reset();
gpu_tick.store(host_tick);
gpu_tick.notify_all();
+
+ std::scoped_lock lock{free_mutex};
+ free_queue.push_front(std::move(fence));
}
+}
- return result;
+vk::Fence MasterSemaphore::GetFreeFence() {
+ std::scoped_lock lock{free_mutex};
+ if (free_queue.empty()) {
+ static constexpr VkFenceCreateInfo fence_ci{
+ .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, .pNext = nullptr, .flags = 0};
+ return device.GetLogical().CreateFence(fence_ci);
+ }
+
+ auto fence = std::move(free_queue.back());
+ free_queue.pop_back();
+ return fence;
}
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_master_semaphore.h b/src/video_core/renderer_vulkan/vk_master_semaphore.h
index f2f61f781..1e7c90215 100644
--- a/src/video_core/renderer_vulkan/vk_master_semaphore.h
+++ b/src/video_core/renderer_vulkan/vk_master_semaphore.h
@@ -5,8 +5,10 @@
#include <atomic>
#include <condition_variable>
+#include <deque>
#include <mutex>
#include <thread>
+#include <queue>
#include "common/common_types.h"
#include "common/polyfill_thread.h"
@@ -17,6 +19,8 @@ namespace Vulkan {
class Device;
class MasterSemaphore {
+ using Waitable = std::pair<u64, vk::Fence>;
+
public:
explicit MasterSemaphore(const Device& device);
~MasterSemaphore();
@@ -57,13 +61,22 @@ private:
VkResult SubmitQueueFence(vk::CommandBuffer& cmdbuf, VkSemaphore signal_semaphore,
VkSemaphore wait_semaphore, u64 host_tick);
+ void WaitThread(std::stop_token token);
+
+ vk::Fence GetFreeFence();
+
private:
const Device& device; ///< Device.
- vk::Fence fence; ///< Fence.
vk::Semaphore semaphore; ///< Timeline semaphore.
std::atomic<u64> gpu_tick{0}; ///< Current known GPU tick.
std::atomic<u64> current_tick{1}; ///< Current logical tick.
+ std::mutex wait_mutex;
+ std::mutex free_mutex;
+ std::condition_variable_any wait_cv;
+ std::queue<Waitable> wait_queue; ///< Queue for the fences to be waited on by the wait thread.
+ std::deque<vk::Fence> free_queue; ///< Holds available fences for submission.
std::jthread debug_thread; ///< Debug thread to workaround validation layer bugs.
+ std::jthread wait_thread; ///< Helper thread that waits for submitted fences.
};
} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 596996bec..66dfe5733 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -698,7 +698,8 @@ std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline(
PipelineStatistics* statistics, bool build_in_parallel) try {
// TODO: Remove this when Intel fixes their shader compiler.
// https://github.com/IGCIT/Intel-GPU-Community-Issue-Tracker-IGCIT/issues/159
- if (device.GetDriverID() == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS) {
+ if (device.GetDriverID() == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS &&
+ !Settings::values.enable_compute_pipelines.GetValue()) {
LOG_ERROR(Render_Vulkan, "Skipping 0x{:016x}", key.Hash());
return nullptr;
}
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 64bd2f6a5..8d3a9736b 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -348,25 +348,12 @@ void RasterizerVulkan::Clear(u32 layer_count) {
const u32 color_attachment = regs.clear_surface.RT;
if (use_color && framebuffer->HasAspectColorBit(color_attachment)) {
- VkClearValue clear_value;
- bool is_integer = false;
- bool is_signed = false;
- size_t int_size = 8;
- for (std::size_t i = 0; i < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets; ++i) {
- const auto& this_rt = regs.rt[i];
- if (this_rt.Address() == 0) {
- continue;
- }
- if (this_rt.format == Tegra::RenderTargetFormat::NONE) {
- continue;
- }
- const auto format =
- VideoCore::Surface::PixelFormatFromRenderTargetFormat(this_rt.format);
- is_integer = IsPixelFormatInteger(format);
- is_signed = IsPixelFormatSignedInteger(format);
- int_size = PixelComponentSizeBitsInteger(format);
- break;
- }
+ const auto format =
+ VideoCore::Surface::PixelFormatFromRenderTargetFormat(regs.rt[color_attachment].format);
+ bool is_integer = IsPixelFormatInteger(format);
+ bool is_signed = IsPixelFormatSignedInteger(format);
+ size_t int_size = PixelComponentSizeBitsInteger(format);
+ VkClearValue clear_value{};
if (!is_integer) {
std::memcpy(clear_value.color.float32, regs.clear_color.data(),
regs.clear_color.size() * sizeof(f32));
diff --git a/src/video_core/renderer_vulkan/vk_swapchain.cpp b/src/video_core/renderer_vulkan/vk_swapchain.cpp
index 1e80ce463..8c0dec590 100644
--- a/src/video_core/renderer_vulkan/vk_swapchain.cpp
+++ b/src/video_core/renderer_vulkan/vk_swapchain.cpp
@@ -34,8 +34,8 @@ VkSurfaceFormatKHR ChooseSwapSurfaceFormat(vk::Span<VkSurfaceFormatKHR> formats)
return found != formats.end() ? *found : formats[0];
}
-static constexpr VkPresentModeKHR ChooseSwapPresentMode(bool has_imm, bool has_mailbox,
- bool has_fifo_relaxed) {
+static VkPresentModeKHR ChooseSwapPresentMode(bool has_imm, bool has_mailbox,
+ bool has_fifo_relaxed) {
// Mailbox doesn't lock the application like FIFO (vsync)
// FIFO present mode locks the framerate to the monitor's refresh rate
Settings::VSyncMode setting = [has_imm, has_mailbox]() {
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index 4d0481f2a..8711e2a87 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -861,6 +861,10 @@ VkBuffer TextureCacheRuntime::GetTemporaryBuffer(size_t needed_size) {
return *buffers[level];
}
+void TextureCacheRuntime::BarrierFeedbackLoop() {
+ scheduler.RequestOutsideRenderPassOperationContext();
+}
+
void TextureCacheRuntime::ReinterpretImage(Image& dst, Image& src,
std::span<const VideoCommon::ImageCopy> copies) {
std::vector<VkBufferImageCopy> vk_in_copies(copies.size());
@@ -1268,7 +1272,9 @@ Image::Image(TextureCacheRuntime& runtime_, const ImageInfo& info_, GPUVAddr gpu
if (IsPixelFormatASTC(info.format) && !runtime->device.IsOptimalAstcSupported()) {
if (Settings::values.async_astc.GetValue()) {
flags |= VideoCommon::ImageFlagBits::AsynchronousDecode;
- } else if (Settings::values.accelerate_astc.GetValue() && info.size.depth == 1) {
+ } else if (Settings::values.astc_recompression.GetValue() ==
+ Settings::AstcRecompression::Uncompressed &&
+ Settings::values.accelerate_astc.GetValue() && info.size.depth == 1) {
flags |= VideoCommon::ImageFlagBits::AcceleratedUpload;
}
flags |= VideoCommon::ImageFlagBits::Converted;
@@ -1283,7 +1289,9 @@ Image::Image(TextureCacheRuntime& runtime_, const ImageInfo& info_, GPUVAddr gpu
.usage = VK_IMAGE_USAGE_STORAGE_BIT,
};
current_image = *original_image;
- if (IsPixelFormatASTC(info.format) && !runtime->device.IsOptimalAstcSupported()) {
+ if (IsPixelFormatASTC(info.format) && !runtime->device.IsOptimalAstcSupported() &&
+ Settings::values.astc_recompression.GetValue() ==
+ Settings::AstcRecompression::Uncompressed) {
const auto& device = runtime->device.GetLogical();
storage_image_views.reserve(info.resources.levels);
for (s32 level = 0; level < info.resources.levels; ++level) {
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index 4166b3d20..0f7a5ffd4 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -103,6 +103,8 @@ public:
[[nodiscard]] VkBuffer GetTemporaryBuffer(size_t needed_size);
+ void BarrierFeedbackLoop();
+
const Device& device;
Scheduler& scheduler;
MemoryAllocator& memory_allocator;
diff --git a/src/video_core/texture_cache/image_base.cpp b/src/video_core/texture_cache/image_base.cpp
index 91512022f..d79594ce5 100644
--- a/src/video_core/texture_cache/image_base.cpp
+++ b/src/video_core/texture_cache/image_base.cpp
@@ -155,7 +155,7 @@ void ImageBase::CheckAliasState() {
flags &= ~ImageFlagBits::Alias;
}
-void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_id) {
+bool AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_id) {
static constexpr auto OPTIONS = RelaxedOptions::Size | RelaxedOptions::Format;
ASSERT(lhs.info.type == rhs.info.type);
std::optional<SubresourceBase> base;
@@ -169,7 +169,7 @@ void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i
}
if (!base) {
LOG_ERROR(HW_GPU, "Image alias should have been flipped");
- return;
+ return false;
}
const PixelFormat lhs_format = lhs.info.format;
const PixelFormat rhs_format = rhs.info.format;
@@ -248,12 +248,13 @@ void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i
}
ASSERT(lhs_alias.copies.empty() == rhs_alias.copies.empty());
if (lhs_alias.copies.empty()) {
- return;
+ return false;
}
lhs.aliased_images.push_back(std::move(lhs_alias));
rhs.aliased_images.push_back(std::move(rhs_alias));
lhs.flags &= ~ImageFlagBits::IsRescalable;
rhs.flags &= ~ImageFlagBits::IsRescalable;
+ return true;
}
} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/image_base.h b/src/video_core/texture_cache/image_base.h
index 329396bb6..1b8a17ee8 100644
--- a/src/video_core/texture_cache/image_base.h
+++ b/src/video_core/texture_cache/image_base.h
@@ -142,6 +142,6 @@ struct ImageAllocBase {
std::vector<ImageId> images;
};
-void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_id);
+bool AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_id);
} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index b24086fce..2cf082c5d 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -49,8 +49,8 @@ TextureCache<P>::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface&
if constexpr (HAS_DEVICE_MEMORY_INFO) {
const s64 device_memory = static_cast<s64>(runtime.GetDeviceLocalMemory());
- const s64 min_spacing_expected = device_memory - 1_GiB - 512_MiB;
- const s64 min_spacing_critical = device_memory - 1_GiB;
+ const s64 min_spacing_expected = device_memory - 1_GiB;
+ const s64 min_spacing_critical = device_memory - 512_MiB;
const s64 mem_threshold = std::min(device_memory, TARGET_THRESHOLD);
const s64 min_vacancy_expected = (6 * mem_threshold) / 10;
const s64 min_vacancy_critical = (3 * mem_threshold) / 10;
@@ -86,10 +86,12 @@ void TextureCache<P>::RunGarbageCollector() {
// used by the async decoder thread.
return false;
}
+ if (!aggressive_mode && True(image.flags & ImageFlagBits::CostlyLoad)) {
+ return false;
+ }
const bool must_download =
image.IsSafeDownload() && False(image.flags & ImageFlagBits::BadOverlap);
- if (!high_priority_mode &&
- (must_download || True(image.flags & ImageFlagBits::CostlyLoad))) {
+ if (!high_priority_mode && must_download) {
return false;
}
if (must_download) {
@@ -137,7 +139,6 @@ void TextureCache<P>::TickFrame() {
TickAsyncDecode();
runtime.TickFrame();
- critical_gc = 0;
++frame_tick;
if constexpr (IMPLEMENTS_ASYNC_DOWNLOADS) {
@@ -184,6 +185,42 @@ void TextureCache<P>::FillComputeImageViews(std::span<ImageViewInOut> views) {
}
template <class P>
+void TextureCache<P>::CheckFeedbackLoop(std::span<const ImageViewInOut> views) {
+ const bool requires_barrier = [&] {
+ for (const auto& view : views) {
+ if (!view.id) {
+ continue;
+ }
+ auto& image_view = slot_image_views[view.id];
+
+ // Check color targets
+ for (const auto& ct_view_id : render_targets.color_buffer_ids) {
+ if (ct_view_id) {
+ auto& ct_view = slot_image_views[ct_view_id];
+ if (image_view.image_id == ct_view.image_id) {
+ return true;
+ }
+ }
+ }
+
+ // Check zeta target
+ if (render_targets.depth_buffer_id) {
+ auto& zt_view = slot_image_views[render_targets.depth_buffer_id];
+ if (image_view.image_id == zt_view.image_id) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }();
+
+ if (requires_barrier) {
+ runtime.BarrierFeedbackLoop();
+ }
+}
+
+template <class P>
typename P::Sampler* TextureCache<P>::GetGraphicsSampler(u32 index) {
if (index > channel_state->graphics_sampler_table.Limit()) {
LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index);
@@ -1274,17 +1311,18 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
const size_t size_bytes = CalculateGuestSizeInBytes(new_info);
const bool broken_views = runtime.HasBrokenTextureViewFormats();
const bool native_bgr = runtime.HasNativeBgr();
- boost::container::small_vector<ImageId, 4> overlap_ids;
- std::unordered_set<ImageId> overlaps_found;
- boost::container::small_vector<ImageId, 4> left_aliased_ids;
- boost::container::small_vector<ImageId, 4> right_aliased_ids;
- std::unordered_set<ImageId> ignore_textures;
- boost::container::small_vector<ImageId, 4> bad_overlap_ids;
- boost::container::small_vector<ImageId, 4> all_siblings;
+ join_overlap_ids.clear();
+ join_overlaps_found.clear();
+ join_left_aliased_ids.clear();
+ join_right_aliased_ids.clear();
+ join_ignore_textures.clear();
+ join_bad_overlap_ids.clear();
+ join_copies_to_do.clear();
+ join_alias_indices.clear();
const bool this_is_linear = info.type == ImageType::Linear;
const auto region_check = [&](ImageId overlap_id, ImageBase& overlap) {
if (True(overlap.flags & ImageFlagBits::Remapped)) {
- ignore_textures.insert(overlap_id);
+ join_ignore_textures.insert(overlap_id);
return;
}
const bool overlap_is_linear = overlap.info.type == ImageType::Linear;
@@ -1294,11 +1332,11 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
if (this_is_linear && overlap_is_linear) {
if (info.pitch == overlap.info.pitch && gpu_addr == overlap.gpu_addr) {
// Alias linear images with the same pitch
- left_aliased_ids.push_back(overlap_id);
+ join_left_aliased_ids.push_back(overlap_id);
}
return;
}
- overlaps_found.insert(overlap_id);
+ join_overlaps_found.insert(overlap_id);
static constexpr bool strict_size = true;
const std::optional<OverlapResult> solution = ResolveOverlap(
new_info, gpu_addr, cpu_addr, overlap, strict_size, broken_views, native_bgr);
@@ -1306,33 +1344,33 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
gpu_addr = solution->gpu_addr;
cpu_addr = solution->cpu_addr;
new_info.resources = solution->resources;
- overlap_ids.push_back(overlap_id);
- all_siblings.push_back(overlap_id);
+ join_overlap_ids.push_back(overlap_id);
+ join_copies_to_do.emplace_back(JoinCopy{false, overlap_id});
return;
}
static constexpr auto options = RelaxedOptions::Size | RelaxedOptions::Format;
const ImageBase new_image_base(new_info, gpu_addr, cpu_addr);
if (IsSubresource(new_info, overlap, gpu_addr, options, broken_views, native_bgr)) {
- left_aliased_ids.push_back(overlap_id);
+ join_left_aliased_ids.push_back(overlap_id);
overlap.flags |= ImageFlagBits::Alias;
- all_siblings.push_back(overlap_id);
+ join_copies_to_do.emplace_back(JoinCopy{true, overlap_id});
} else if (IsSubresource(overlap.info, new_image_base, overlap.gpu_addr, options,
broken_views, native_bgr)) {
- right_aliased_ids.push_back(overlap_id);
+ join_right_aliased_ids.push_back(overlap_id);
overlap.flags |= ImageFlagBits::Alias;
- all_siblings.push_back(overlap_id);
+ join_copies_to_do.emplace_back(JoinCopy{true, overlap_id});
} else {
- bad_overlap_ids.push_back(overlap_id);
+ join_bad_overlap_ids.push_back(overlap_id);
}
};
ForEachImageInRegion(cpu_addr, size_bytes, region_check);
const auto region_check_gpu = [&](ImageId overlap_id, ImageBase& overlap) {
- if (!overlaps_found.contains(overlap_id)) {
+ if (!join_overlaps_found.contains(overlap_id)) {
if (True(overlap.flags & ImageFlagBits::Remapped)) {
- ignore_textures.insert(overlap_id);
+ join_ignore_textures.insert(overlap_id);
}
if (overlap.gpu_addr == gpu_addr && overlap.guest_size_bytes == size_bytes) {
- ignore_textures.insert(overlap_id);
+ join_ignore_textures.insert(overlap_id);
}
}
};
@@ -1340,11 +1378,11 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
bool can_rescale = info.rescaleable;
bool any_rescaled = false;
- for (const ImageId sibling_id : all_siblings) {
+ for (const auto& copy : join_copies_to_do) {
if (!can_rescale) {
break;
}
- Image& sibling = slot_images[sibling_id];
+ Image& sibling = slot_images[copy.id];
can_rescale &= ImageCanRescale(sibling);
any_rescaled |= True(sibling.flags & ImageFlagBits::Rescaled);
}
@@ -1352,13 +1390,13 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
can_rescale &= any_rescaled;
if (can_rescale) {
- for (const ImageId sibling_id : all_siblings) {
- Image& sibling = slot_images[sibling_id];
+ for (const auto& copy : join_copies_to_do) {
+ Image& sibling = slot_images[copy.id];
ScaleUp(sibling);
}
} else {
- for (const ImageId sibling_id : all_siblings) {
- Image& sibling = slot_images[sibling_id];
+ for (const auto& copy : join_copies_to_do) {
+ Image& sibling = slot_images[copy.id];
ScaleDown(sibling);
}
}
@@ -1370,7 +1408,7 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
new_image.flags |= ImageFlagBits::Sparse;
}
- for (const ImageId overlap_id : ignore_textures) {
+ for (const ImageId overlap_id : join_ignore_textures) {
Image& overlap = slot_images[overlap_id];
if (True(overlap.flags & ImageFlagBits::GpuModified)) {
UNIMPLEMENTED();
@@ -1391,14 +1429,60 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
ScaleDown(new_image);
}
- std::ranges::sort(overlap_ids, [this](const ImageId lhs, const ImageId rhs) {
- const ImageBase& lhs_image = slot_images[lhs];
- const ImageBase& rhs_image = slot_images[rhs];
+ std::ranges::sort(join_copies_to_do, [this](const JoinCopy& lhs, const JoinCopy& rhs) {
+ const ImageBase& lhs_image = slot_images[lhs.id];
+ const ImageBase& rhs_image = slot_images[rhs.id];
return lhs_image.modification_tick < rhs_image.modification_tick;
});
- for (const ImageId overlap_id : overlap_ids) {
- Image& overlap = slot_images[overlap_id];
+ ImageBase& new_image_base = new_image;
+ for (const ImageId aliased_id : join_right_aliased_ids) {
+ ImageBase& aliased = slot_images[aliased_id];
+ size_t alias_index = new_image_base.aliased_images.size();
+ if (!AddImageAlias(new_image_base, aliased, new_image_id, aliased_id)) {
+ continue;
+ }
+ join_alias_indices.emplace(aliased_id, alias_index);
+ new_image.flags |= ImageFlagBits::Alias;
+ }
+ for (const ImageId aliased_id : join_left_aliased_ids) {
+ ImageBase& aliased = slot_images[aliased_id];
+ size_t alias_index = new_image_base.aliased_images.size();
+ if (!AddImageAlias(aliased, new_image_base, aliased_id, new_image_id)) {
+ continue;
+ }
+ join_alias_indices.emplace(aliased_id, alias_index);
+ new_image.flags |= ImageFlagBits::Alias;
+ }
+ for (const ImageId aliased_id : join_bad_overlap_ids) {
+ ImageBase& aliased = slot_images[aliased_id];
+ aliased.overlapping_images.push_back(new_image_id);
+ new_image.overlapping_images.push_back(aliased_id);
+ if (aliased.info.resources.levels == 1 && aliased.info.block.depth == 0 &&
+ aliased.overlapping_images.size() > 1) {
+ aliased.flags |= ImageFlagBits::BadOverlap;
+ }
+ if (new_image.info.resources.levels == 1 && new_image.info.block.depth == 0 &&
+ new_image.overlapping_images.size() > 1) {
+ new_image.flags |= ImageFlagBits::BadOverlap;
+ }
+ }
+
+ for (const auto& copy_object : join_copies_to_do) {
+ Image& overlap = slot_images[copy_object.id];
+ if (copy_object.is_alias) {
+ if (!overlap.IsSafeDownload()) {
+ continue;
+ }
+ const auto alias_pointer = join_alias_indices.find(copy_object.id);
+ if (alias_pointer == join_alias_indices.end()) {
+ continue;
+ }
+ const AliasedImage& aliased = new_image.aliased_images[alias_pointer->second];
+ CopyImage(new_image_id, aliased.id, aliased.copies);
+ new_image.modification_tick = overlap.modification_tick;
+ continue;
+ }
if (True(overlap.flags & ImageFlagBits::GpuModified)) {
new_image.flags |= ImageFlagBits::GpuModified;
const auto& resolution = Settings::values.resolution_info;
@@ -1411,35 +1495,15 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
} else {
runtime.CopyImage(new_image, overlap, std::move(copies));
}
+ new_image.modification_tick = overlap.modification_tick;
}
if (True(overlap.flags & ImageFlagBits::Tracked)) {
- UntrackImage(overlap, overlap_id);
- }
- UnregisterImage(overlap_id);
- DeleteImage(overlap_id);
- }
- ImageBase& new_image_base = new_image;
- for (const ImageId aliased_id : right_aliased_ids) {
- ImageBase& aliased = slot_images[aliased_id];
- AddImageAlias(new_image_base, aliased, new_image_id, aliased_id);
- new_image.flags |= ImageFlagBits::Alias;
- }
- for (const ImageId aliased_id : left_aliased_ids) {
- ImageBase& aliased = slot_images[aliased_id];
- AddImageAlias(aliased, new_image_base, aliased_id, new_image_id);
- new_image.flags |= ImageFlagBits::Alias;
- }
- for (const ImageId aliased_id : bad_overlap_ids) {
- ImageBase& aliased = slot_images[aliased_id];
- aliased.overlapping_images.push_back(new_image_id);
- new_image.overlapping_images.push_back(aliased_id);
- if (aliased.info.resources.levels == 1 && aliased.overlapping_images.size() > 1) {
- aliased.flags |= ImageFlagBits::BadOverlap;
- }
- if (new_image.info.resources.levels == 1 && new_image.overlapping_images.size() > 1) {
- new_image.flags |= ImageFlagBits::BadOverlap;
+ UntrackImage(overlap, copy_object.id);
}
+ UnregisterImage(copy_object.id);
+ DeleteImage(copy_object.id);
}
+
RegisterImage(new_image_id);
return new_image_id;
}
@@ -1469,7 +1533,7 @@ std::optional<typename TextureCache<P>::BlitImages> TextureCache<P>::GetBlitImag
if (!copy.must_accelerate) {
do {
if (!src_id && !dst_id) {
- break;
+ return std::nullopt;
}
if (src_id && True(slot_images[src_id].flags & ImageFlagBits::GpuModified)) {
break;
@@ -1847,10 +1911,6 @@ void TextureCache<P>::RegisterImage(ImageId image_id) {
tentative_size = EstimatedDecompressedSize(tentative_size, image.info.format);
}
total_used_memory += Common::AlignUp(tentative_size, 1024);
- if (total_used_memory > critical_memory && critical_gc < GC_EMERGENCY_COUNTS) {
- RunGarbageCollector();
- critical_gc++;
- }
image.lru_index = lru_cache.Insert(image_id, frame_tick);
ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, [this, image_id](u64 page) {
diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h
index 0720494e5..3bfa92154 100644
--- a/src/video_core/texture_cache/texture_cache_base.h
+++ b/src/video_core/texture_cache/texture_cache_base.h
@@ -10,7 +10,9 @@
#include <span>
#include <type_traits>
#include <unordered_map>
+#include <unordered_set>
#include <vector>
+#include <boost/container/small_vector.hpp>
#include <queue>
#include "common/common_types.h"
@@ -148,6 +150,9 @@ public:
/// Fill image_view_ids with the compute images in indices
void FillComputeImageViews(std::span<ImageViewInOut> views);
+ /// Handle feedback loops during draws.
+ void CheckFeedbackLoop(std::span<const ImageViewInOut> views);
+
/// Get the sampler from the graphics descriptor table in the specified index
Sampler* GetGraphicsSampler(u32 index);
@@ -424,7 +429,6 @@ private:
u64 minimum_memory;
u64 expected_memory;
u64 critical_memory;
- size_t critical_gc;
struct BufferDownload {
GPUVAddr address;
@@ -474,6 +478,20 @@ private:
Common::ThreadWorker texture_decode_worker{1, "TextureDecoder"};
std::vector<std::unique_ptr<AsyncDecodeContext>> async_decodes;
+
+ // Join caching
+ boost::container::small_vector<ImageId, 4> join_overlap_ids;
+ std::unordered_set<ImageId> join_overlaps_found;
+ boost::container::small_vector<ImageId, 4> join_left_aliased_ids;
+ boost::container::small_vector<ImageId, 4> join_right_aliased_ids;
+ std::unordered_set<ImageId> join_ignore_textures;
+ boost::container::small_vector<ImageId, 4> join_bad_overlap_ids;
+ struct JoinCopy {
+ bool is_alias;
+ ImageId id;
+ };
+ boost::container::small_vector<JoinCopy, 4> join_copies_to_do;
+ std::unordered_map<ImageId, size_t> join_alias_indices;
};
} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp
index f1071aa23..95a5b47d8 100644
--- a/src/video_core/texture_cache/util.cpp
+++ b/src/video_core/texture_cache/util.cpp
@@ -18,6 +18,8 @@
#include "common/bit_util.h"
#include "common/common_types.h"
#include "common/div_ceil.h"
+#include "common/scratch_buffer.h"
+#include "common/settings.h"
#include "video_core/compatible_formats.h"
#include "video_core/engines/maxwell_3d.h"
#include "video_core/memory_manager.h"
@@ -28,6 +30,7 @@
#include "video_core/texture_cache/samples_helper.h"
#include "video_core/texture_cache/util.h"
#include "video_core/textures/astc.h"
+#include "video_core/textures/bcn.h"
#include "video_core/textures/decoders.h"
namespace VideoCommon {
@@ -120,7 +123,9 @@ template <u32 GOB_EXTENT>
return {
.width = AdjustMipBlockSize<GOB_SIZE_X>(num_tiles.width, block_size.width, level),
.height = AdjustMipBlockSize<GOB_SIZE_Y>(num_tiles.height, block_size.height, level),
- .depth = AdjustMipBlockSize<GOB_SIZE_Z>(num_tiles.depth, block_size.depth, level),
+ .depth = level == 0
+ ? block_size.depth
+ : AdjustMipBlockSize<GOB_SIZE_Z>(num_tiles.depth, block_size.depth, level),
};
}
@@ -162,6 +167,13 @@ template <u32 GOB_EXTENT>
}
[[nodiscard]] constexpr Extent3D TileShift(const LevelInfo& info, u32 level) {
+ if (level == 0) {
+ return Extent3D{
+ .width = info.block.width,
+ .height = info.block.height,
+ .depth = info.block.depth,
+ };
+ }
const Extent3D blocks = NumLevelBlocks(info, level);
return Extent3D{
.width = AdjustTileSize(info.block.width, GOB_SIZE_X, blocks.width),
@@ -585,6 +597,21 @@ u32 CalculateConvertedSizeBytes(const ImageInfo& info) noexcept {
return info.size.width * BytesPerBlock(info.format);
}
static constexpr Extent2D TILE_SIZE{1, 1};
+ if (IsPixelFormatASTC(info.format) && Settings::values.astc_recompression.GetValue() !=
+ Settings::AstcRecompression::Uncompressed) {
+ const u32 bpp_div =
+ Settings::values.astc_recompression.GetValue() == Settings::AstcRecompression::Bc1 ? 2
+ : 1;
+ // NumBlocksPerLayer doesn't account for this correctly, so we have to do it manually.
+ u32 output_size = 0;
+ for (s32 i = 0; i < info.resources.levels; i++) {
+ const auto mip_size = AdjustMipSize(info.size, i);
+ const u32 plane_dim =
+ Common::AlignUp(mip_size.width, 4U) * Common::AlignUp(mip_size.height, 4U);
+ output_size += (plane_dim * info.size.depth * info.resources.layers) / bpp_div;
+ }
+ return output_size;
+ }
return NumBlocksPerLayer(info, TILE_SIZE) * info.resources.layers * CONVERTED_BYTES_PER_BLOCK;
}
@@ -885,6 +912,7 @@ BufferCopy UploadBufferCopy(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr,
void ConvertImage(std::span<const u8> input, const ImageInfo& info, std::span<u8> output,
std::span<BufferImageCopy> copies) {
u32 output_offset = 0;
+ Common::ScratchBuffer<u8> decode_scratch;
const Extent2D tile_size = DefaultBlockSize(info.format);
for (BufferImageCopy& copy : copies) {
@@ -895,22 +923,58 @@ void ConvertImage(std::span<const u8> input, const ImageInfo& info, std::span<u8
ASSERT(copy.image_extent == mip_size);
ASSERT(copy.buffer_row_length == Common::AlignUp(mip_size.width, tile_size.width));
ASSERT(copy.buffer_image_height == Common::AlignUp(mip_size.height, tile_size.height));
- if (IsPixelFormatASTC(info.format)) {
+
+ const auto input_offset = input.subspan(copy.buffer_offset);
+ copy.buffer_offset = output_offset;
+ copy.buffer_row_length = mip_size.width;
+ copy.buffer_image_height = mip_size.height;
+
+ const auto recompression_setting = Settings::values.astc_recompression.GetValue();
+ const bool astc = IsPixelFormatASTC(info.format);
+
+ if (astc && recompression_setting == Settings::AstcRecompression::Uncompressed) {
Tegra::Texture::ASTC::Decompress(
- input.subspan(copy.buffer_offset), copy.image_extent.width,
- copy.image_extent.height,
+ input_offset, copy.image_extent.width, copy.image_extent.height,
copy.image_subresource.num_layers * copy.image_extent.depth, tile_size.width,
tile_size.height, output.subspan(output_offset));
+
+ output_offset += copy.image_extent.width * copy.image_extent.height *
+ copy.image_subresource.num_layers * CONVERTED_BYTES_PER_BLOCK;
+ } else if (astc) {
+ // BC1 uses 0.5 bytes per texel
+ // BC3 uses 1 byte per texel
+ const auto compress = recompression_setting == Settings::AstcRecompression::Bc1
+ ? Tegra::Texture::BCN::CompressBC1
+ : Tegra::Texture::BCN::CompressBC3;
+ const auto bpp_div = recompression_setting == Settings::AstcRecompression::Bc1 ? 2 : 1;
+
+ const u32 plane_dim = copy.image_extent.width * copy.image_extent.height;
+ const u32 level_size = plane_dim * copy.image_extent.depth *
+ copy.image_subresource.num_layers * CONVERTED_BYTES_PER_BLOCK;
+ decode_scratch.resize_destructive(level_size);
+
+ Tegra::Texture::ASTC::Decompress(
+ input_offset, copy.image_extent.width, copy.image_extent.height,
+ copy.image_subresource.num_layers * copy.image_extent.depth, tile_size.width,
+ tile_size.height, decode_scratch);
+
+ compress(decode_scratch, copy.image_extent.width, copy.image_extent.height,
+ copy.image_subresource.num_layers * copy.image_extent.depth,
+ output.subspan(output_offset));
+
+ const u32 aligned_plane_dim = Common::AlignUp(copy.image_extent.width, 4) *
+ Common::AlignUp(copy.image_extent.height, 4);
+
+ copy.buffer_size =
+ (aligned_plane_dim * copy.image_extent.depth * copy.image_subresource.num_layers) /
+ bpp_div;
+ output_offset += static_cast<u32>(copy.buffer_size);
} else {
- DecompressBC4(input.subspan(copy.buffer_offset), copy.image_extent,
- output.subspan(output_offset));
- }
- copy.buffer_offset = output_offset;
- copy.buffer_row_length = mip_size.width;
- copy.buffer_image_height = mip_size.height;
+ DecompressBC4(input_offset, copy.image_extent, output.subspan(output_offset));
- output_offset += copy.image_extent.width * copy.image_extent.height *
- copy.image_subresource.num_layers * CONVERTED_BYTES_PER_BLOCK;
+ output_offset += copy.image_extent.width * copy.image_extent.height *
+ copy.image_subresource.num_layers * CONVERTED_BYTES_PER_BLOCK;
+ }
}
}
@@ -1233,7 +1297,9 @@ u32 MapSizeBytes(const ImageBase& image) {
static_assert(CalculateLevelSize(LevelInfo{{1920, 1080, 1}, {0, 2, 0}, {1, 1}, 2, 0}, 0) ==
0x7f8000);
-static_assert(CalculateLevelSize(LevelInfo{{32, 32, 1}, {0, 0, 4}, {1, 1}, 4, 0}, 0) == 0x4000);
+static_assert(CalculateLevelSize(LevelInfo{{32, 32, 1}, {0, 0, 4}, {1, 1}, 4, 0}, 0) == 0x40000);
+
+static_assert(CalculateLevelSize(LevelInfo{{128, 8, 1}, {0, 4, 0}, {1, 1}, 4, 0}, 0) == 0x40000);
static_assert(CalculateLevelOffset(PixelFormat::R8_SINT, {1920, 1080, 1}, {0, 2, 0}, 0, 7) ==
0x2afc00);
diff --git a/src/video_core/textures/astc.cpp b/src/video_core/textures/astc.cpp
index a68bc0d77..fef0be31d 100644
--- a/src/video_core/textures/astc.cpp
+++ b/src/video_core/textures/astc.cpp
@@ -16,8 +16,8 @@
#include "common/alignment.h"
#include "common/common_types.h"
#include "common/polyfill_ranges.h"
-#include "common/thread_worker.h"
#include "video_core/textures/astc.h"
+#include "video_core/textures/workers.h"
class InputBitStream {
public:
@@ -1656,8 +1656,7 @@ void Decompress(std::span<const uint8_t> data, uint32_t width, uint32_t height,
const u32 rows = Common::DivideUp(height, block_height);
const u32 cols = Common::DivideUp(width, block_width);
- static Common::ThreadWorker workers{std::max(std::thread::hardware_concurrency(), 2U) / 2,
- "ASTCDecompress"};
+ Common::ThreadWorker& workers{GetThreadWorkers()};
for (u32 z = 0; z < depth; ++z) {
const u32 depth_offset = z * height * width * 4;
diff --git a/src/video_core/textures/bcn.cpp b/src/video_core/textures/bcn.cpp
new file mode 100644
index 000000000..671212a49
--- /dev/null
+++ b/src/video_core/textures/bcn.cpp
@@ -0,0 +1,87 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <stb_dxt.h>
+#include <string.h>
+
+#include "common/alignment.h"
+#include "video_core/textures/bcn.h"
+#include "video_core/textures/workers.h"
+
+namespace Tegra::Texture::BCN {
+
+using BCNCompressor = void(u8* block_output, const u8* block_input, bool any_alpha);
+
+template <u32 BytesPerBlock, bool ThresholdAlpha = false>
+void CompressBCN(std::span<const uint8_t> data, uint32_t width, uint32_t height, uint32_t depth,
+ std::span<uint8_t> output, BCNCompressor f) {
+ constexpr u8 alpha_threshold = 128;
+ constexpr u32 bytes_per_px = 4;
+ const u32 plane_dim = width * height;
+
+ Common::ThreadWorker& workers{GetThreadWorkers()};
+
+ for (u32 z = 0; z < depth; z++) {
+ for (u32 y = 0; y < height; y += 4) {
+ auto compress_row = [z, y, width, height, plane_dim, f, data, output]() {
+ for (u32 x = 0; x < width; x += 4) {
+ // Gather 4x4 block of RGBA texels
+ u8 input_colors[4][4][4];
+ bool any_alpha = false;
+
+ for (u32 j = 0; j < 4; j++) {
+ for (u32 i = 0; i < 4; i++) {
+ const size_t coord =
+ (z * plane_dim + (y + j) * width + (x + i)) * bytes_per_px;
+
+ if ((x + i < width) && (y + j < height)) {
+ if constexpr (ThresholdAlpha) {
+ if (data[coord + 3] >= alpha_threshold) {
+ input_colors[j][i][0] = data[coord + 0];
+ input_colors[j][i][1] = data[coord + 1];
+ input_colors[j][i][2] = data[coord + 2];
+ input_colors[j][i][3] = 255;
+ } else {
+ any_alpha = true;
+ memset(input_colors[j][i], 0, bytes_per_px);
+ }
+ } else {
+ memcpy(input_colors[j][i], &data[coord], bytes_per_px);
+ }
+ } else {
+ memset(input_colors[j][i], 0, bytes_per_px);
+ }
+ }
+ }
+
+ const u32 bytes_per_row = BytesPerBlock * Common::DivideUp(width, 4U);
+ const u32 bytes_per_plane = bytes_per_row * Common::DivideUp(height, 4U);
+ f(output.data() + z * bytes_per_plane + (y / 4) * bytes_per_row +
+ (x / 4) * BytesPerBlock,
+ reinterpret_cast<u8*>(input_colors), any_alpha);
+ }
+ };
+ workers.QueueWork(std::move(compress_row));
+ }
+ workers.WaitForRequests();
+ }
+}
+
+void CompressBC1(std::span<const uint8_t> data, uint32_t width, uint32_t height, uint32_t depth,
+ std::span<uint8_t> output) {
+ CompressBCN<8, true>(data, width, height, depth, output,
+ [](u8* block_output, const u8* block_input, bool any_alpha) {
+ stb_compress_bc1_block(block_output, block_input, any_alpha,
+ STB_DXT_NORMAL);
+ });
+}
+
+void CompressBC3(std::span<const uint8_t> data, uint32_t width, uint32_t height, uint32_t depth,
+ std::span<uint8_t> output) {
+ CompressBCN<16, false>(data, width, height, depth, output,
+ [](u8* block_output, const u8* block_input, bool any_alpha) {
+ stb_compress_bc3_block(block_output, block_input, STB_DXT_NORMAL);
+ });
+}
+
+} // namespace Tegra::Texture::BCN
diff --git a/src/video_core/textures/bcn.h b/src/video_core/textures/bcn.h
new file mode 100644
index 000000000..6464af885
--- /dev/null
+++ b/src/video_core/textures/bcn.h
@@ -0,0 +1,17 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <span>
+#include <stdint.h>
+
+namespace Tegra::Texture::BCN {
+
+void CompressBC1(std::span<const uint8_t> data, uint32_t width, uint32_t height, uint32_t depth,
+ std::span<uint8_t> output);
+
+void CompressBC3(std::span<const uint8_t> data, uint32_t width, uint32_t height, uint32_t depth,
+ std::span<uint8_t> output);
+
+} // namespace Tegra::Texture::BCN
diff --git a/src/video_core/textures/workers.cpp b/src/video_core/textures/workers.cpp
new file mode 100644
index 000000000..a71c305f4
--- /dev/null
+++ b/src/video_core/textures/workers.cpp
@@ -0,0 +1,15 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "video_core/textures/workers.h"
+
+namespace Tegra::Texture {
+
+Common::ThreadWorker& GetThreadWorkers() {
+ static Common::ThreadWorker workers{std::max(std::thread::hardware_concurrency(), 2U) / 2,
+ "ImageTranscode"};
+
+ return workers;
+}
+
+} // namespace Tegra::Texture
diff --git a/src/video_core/textures/workers.h b/src/video_core/textures/workers.h
new file mode 100644
index 000000000..008dd05b3
--- /dev/null
+++ b/src/video_core/textures/workers.h
@@ -0,0 +1,12 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/thread_worker.h"
+
+namespace Tegra::Texture {
+
+Common::ThreadWorker& GetThreadWorkers();
+
+}
diff --git a/src/video_core/vulkan_common/vulkan_device.cpp b/src/video_core/vulkan_common/vulkan_device.cpp
index 6ffca2af2..aea677cb3 100644
--- a/src/video_core/vulkan_common/vulkan_device.cpp
+++ b/src/video_core/vulkan_common/vulkan_device.cpp
@@ -406,6 +406,14 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
features.extended_dynamic_state3.extendedDynamicState3ColorBlendEnable = false;
features.extended_dynamic_state3.extendedDynamicState3ColorBlendEquation = false;
dynamic_state3_blending = false;
+
+ const u32 version = (properties.properties.driverVersion << 3) >> 3;
+ if (version < VK_MAKE_API_VERSION(0, 23, 1, 0)) {
+ LOG_WARNING(Render_Vulkan,
+ "RADV versions older than 23.1.0 have broken depth clamp dynamic state");
+ features.extended_dynamic_state3.extendedDynamicState3DepthClampEnable = false;
+ dynamic_state3_enables = false;
+ }
}
if (extensions.vertex_input_dynamic_state && is_radv) {
// TODO(ameerj): Blacklist only offending driver versions
@@ -463,6 +471,18 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
LOG_WARNING(Render_Vulkan, "ANV driver does not support native BGR format");
must_emulate_bgr565 = true;
}
+ if (extensions.push_descriptor && is_intel_anv) {
+ const u32 version = (properties.properties.driverVersion << 3) >> 3;
+ if (version >= VK_MAKE_API_VERSION(0, 22, 3, 0) &&
+ version < VK_MAKE_API_VERSION(0, 23, 2, 0)) {
+ // Disable VK_KHR_push_descriptor due to
+ // mesa/mesa/-/commit/ff91c5ca42bc80aa411cb3fd8f550aa6fdd16bdc
+ LOG_WARNING(Render_Vulkan,
+ "ANV drivers 22.3.0 to 23.1.0 have broken VK_KHR_push_descriptor");
+ extensions.push_descriptor = false;
+ loaded_extensions.erase(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME);
+ }
+ }
if (is_mvk) {
LOG_WARNING(Render_Vulkan,
"MVK driver breaks when using more than 16 vertex attributes/bindings");
@@ -982,6 +1002,11 @@ u64 Device::GetDeviceMemoryUsage() const {
}
void Device::CollectPhysicalMemoryInfo() {
+ // Account for resolution scaling in memory limits
+ const size_t normal_memory = 6_GiB;
+ const size_t scaler_memory = 1_GiB * Settings::values.resolution_info.ScaleUp(1);
+
+ // Calculate limits using memory budget
VkPhysicalDeviceMemoryBudgetPropertiesEXT budget{};
budget.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT;
const auto mem_info =
@@ -1009,11 +1034,14 @@ void Device::CollectPhysicalMemoryInfo() {
device_access_memory += mem_properties.memoryHeaps[element].size;
}
if (!is_integrated) {
+ const u64 reserve_memory = std::min<u64>(device_access_memory / 8, 1_GiB);
+ device_access_memory -= reserve_memory;
+ device_access_memory = std::min<u64>(device_access_memory, normal_memory + scaler_memory);
return;
}
const s64 available_memory = static_cast<s64>(device_access_memory - device_initial_usage);
device_access_memory = static_cast<u64>(std::max<s64>(
- std::min<s64>(available_memory - 8_GiB, 4_GiB), static_cast<s64>(local_memory)));
+ std::min<s64>(available_memory - 8_GiB, 4_GiB), std::min<s64>(local_memory, 4_GiB)));
}
void Device::CollectToolingInfo() {
diff --git a/src/video_core/vulkan_common/vulkan_memory_allocator.cpp b/src/video_core/vulkan_common/vulkan_memory_allocator.cpp
index 1732866e0..e28a556f8 100644
--- a/src/video_core/vulkan_common/vulkan_memory_allocator.cpp
+++ b/src/video_core/vulkan_common/vulkan_memory_allocator.cpp
@@ -147,7 +147,7 @@ public:
/// Returns whether this allocation is compatible with the arguments.
[[nodiscard]] bool IsCompatible(VkMemoryPropertyFlags flags, u32 type_mask) const {
- return (flags & property_flags) == property_flags && (type_mask & shifted_memory_type) != 0;
+ return (flags & property_flags) == flags && (type_mask & shifted_memory_type) != 0;
}
private:
diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp
index b94d36838..662651196 100644
--- a/src/yuzu/configuration/config.cpp
+++ b/src/yuzu/configuration/config.cpp
@@ -711,11 +711,13 @@ void Config::ReadRendererValues() {
ReadGlobalSetting(Settings::values.nvdec_emulation);
ReadGlobalSetting(Settings::values.accelerate_astc);
ReadGlobalSetting(Settings::values.async_astc);
+ ReadGlobalSetting(Settings::values.astc_recompression);
ReadGlobalSetting(Settings::values.use_reactive_flushing);
ReadGlobalSetting(Settings::values.shader_backend);
ReadGlobalSetting(Settings::values.use_asynchronous_shaders);
ReadGlobalSetting(Settings::values.use_fast_gpu_time);
ReadGlobalSetting(Settings::values.use_vulkan_driver_pipeline_cache);
+ ReadGlobalSetting(Settings::values.enable_compute_pipelines);
ReadGlobalSetting(Settings::values.bg_red);
ReadGlobalSetting(Settings::values.bg_green);
ReadGlobalSetting(Settings::values.bg_blue);
@@ -1358,6 +1360,10 @@ void Config::SaveRendererValues() {
Settings::values.nvdec_emulation.UsingGlobal());
WriteGlobalSetting(Settings::values.accelerate_astc);
WriteGlobalSetting(Settings::values.async_astc);
+ WriteSetting(QString::fromStdString(Settings::values.astc_recompression.GetLabel()),
+ static_cast<u32>(Settings::values.astc_recompression.GetValue(global)),
+ static_cast<u32>(Settings::values.astc_recompression.GetDefault()),
+ Settings::values.astc_recompression.UsingGlobal());
WriteGlobalSetting(Settings::values.use_reactive_flushing);
WriteSetting(QString::fromStdString(Settings::values.shader_backend.GetLabel()),
static_cast<u32>(Settings::values.shader_backend.GetValue(global)),
@@ -1366,6 +1372,7 @@ void Config::SaveRendererValues() {
WriteGlobalSetting(Settings::values.use_asynchronous_shaders);
WriteGlobalSetting(Settings::values.use_fast_gpu_time);
WriteGlobalSetting(Settings::values.use_vulkan_driver_pipeline_cache);
+ WriteGlobalSetting(Settings::values.enable_compute_pipelines);
WriteGlobalSetting(Settings::values.bg_red);
WriteGlobalSetting(Settings::values.bg_green);
WriteGlobalSetting(Settings::values.bg_blue);
diff --git a/src/yuzu/configuration/config.h b/src/yuzu/configuration/config.h
index 7d26e9ab6..9cb9db6cf 100644
--- a/src/yuzu/configuration/config.h
+++ b/src/yuzu/configuration/config.h
@@ -208,3 +208,4 @@ Q_DECLARE_METATYPE(Settings::ScalingFilter);
Q_DECLARE_METATYPE(Settings::AntiAliasing);
Q_DECLARE_METATYPE(Settings::RendererBackend);
Q_DECLARE_METATYPE(Settings::ShaderBackend);
+Q_DECLARE_METATYPE(Settings::AstcRecompression);
diff --git a/src/yuzu/configuration/configure_dialog.cpp b/src/yuzu/configuration/configure_dialog.cpp
index 2aaefcc05..8e76a819a 100644
--- a/src/yuzu/configuration/configure_dialog.cpp
+++ b/src/yuzu/configuration/configure_dialog.cpp
@@ -36,8 +36,9 @@ ConfigureDialog::ConfigureDialog(QWidget* parent, HotkeyRegistry& registry_,
debug_tab_tab{std::make_unique<ConfigureDebugTab>(system_, this)},
filesystem_tab{std::make_unique<ConfigureFilesystem>(this)},
general_tab{std::make_unique<ConfigureGeneral>(system_, this)},
- graphics_tab{std::make_unique<ConfigureGraphics>(system_, this)},
graphics_advanced_tab{std::make_unique<ConfigureGraphicsAdvanced>(system_, this)},
+ graphics_tab{std::make_unique<ConfigureGraphics>(
+ system_, [&]() { graphics_advanced_tab->ExposeComputeOption(); }, this)},
hotkeys_tab{std::make_unique<ConfigureHotkeys>(system_.HIDCore(), this)},
input_tab{std::make_unique<ConfigureInput>(system_, this)},
network_tab{std::make_unique<ConfigureNetwork>(system_, this)},
diff --git a/src/yuzu/configuration/configure_dialog.h b/src/yuzu/configuration/configure_dialog.h
index 1f724834a..a086a07c4 100644
--- a/src/yuzu/configuration/configure_dialog.h
+++ b/src/yuzu/configuration/configure_dialog.h
@@ -72,8 +72,8 @@ private:
std::unique_ptr<ConfigureDebugTab> debug_tab_tab;
std::unique_ptr<ConfigureFilesystem> filesystem_tab;
std::unique_ptr<ConfigureGeneral> general_tab;
- std::unique_ptr<ConfigureGraphics> graphics_tab;
std::unique_ptr<ConfigureGraphicsAdvanced> graphics_advanced_tab;
+ std::unique_ptr<ConfigureGraphics> graphics_tab;
std::unique_ptr<ConfigureHotkeys> hotkeys_tab;
std::unique_ptr<ConfigureInput> input_tab;
std::unique_ptr<ConfigureNetwork> network_tab;
diff --git a/src/yuzu/configuration/configure_graphics.cpp b/src/yuzu/configuration/configure_graphics.cpp
index 76e5b7499..f316b598c 100644
--- a/src/yuzu/configuration/configure_graphics.cpp
+++ b/src/yuzu/configuration/configure_graphics.cpp
@@ -2,9 +2,11 @@
// SPDX-License-Identifier: GPL-2.0-or-later
// Include this early to include Vulkan headers how we want to
+#include "video_core/vulkan_common/vulkan_device.h"
#include "video_core/vulkan_common/vulkan_wrapper.h"
#include <algorithm>
+#include <functional>
#include <iosfwd>
#include <iterator>
#include <string>
@@ -74,8 +76,11 @@ static constexpr Settings::VSyncMode PresentModeToSetting(VkPresentModeKHR mode)
}
}
-ConfigureGraphics::ConfigureGraphics(const Core::System& system_, QWidget* parent)
- : QWidget(parent), ui{std::make_unique<Ui::ConfigureGraphics>()}, system{system_} {
+ConfigureGraphics::ConfigureGraphics(const Core::System& system_,
+ const std::function<void()>& expose_compute_option_,
+ QWidget* parent)
+ : QWidget(parent), ui{std::make_unique<Ui::ConfigureGraphics>()},
+ expose_compute_option{expose_compute_option_}, system{system_} {
vulkan_device = Settings::values.vulkan_device.GetValue();
RetrieveVulkanDevices();
@@ -513,8 +518,7 @@ void ConfigureGraphics::RetrieveVulkanDevices() try {
const Common::DynamicLibrary library = OpenLibrary();
const vk::Instance instance = CreateInstance(library, dld, VK_API_VERSION_1_1, wsi.type);
const std::vector<VkPhysicalDevice> physical_devices = instance.EnumeratePhysicalDevices();
- vk::SurfaceKHR surface = //< needed to view present modes for a device
- CreateSurface(instance, wsi);
+ vk::SurfaceKHR surface = CreateSurface(instance, wsi);
vulkan_devices.clear();
vulkan_devices.reserve(physical_devices.size());
@@ -527,6 +531,17 @@ void ConfigureGraphics::RetrieveVulkanDevices() try {
physical_device.GetSurfacePresentModesKHR(*surface);
vulkan_devices.push_back(QString::fromStdString(name));
device_present_modes.push_back(present_modes);
+
+ VkPhysicalDeviceDriverProperties driver_properties{};
+ driver_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES;
+ driver_properties.pNext = nullptr;
+ VkPhysicalDeviceProperties2 properties{};
+ properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR;
+ properties.pNext = &driver_properties;
+ dld.vkGetPhysicalDeviceProperties2(physical_device, &properties);
+ if (driver_properties.driverID == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS) {
+ expose_compute_option();
+ }
}
} catch (const Vulkan::vk::Exception& exception) {
LOG_ERROR(Frontend, "Failed to enumerate devices with error: {}", exception.what());
diff --git a/src/yuzu/configuration/configure_graphics.h b/src/yuzu/configuration/configure_graphics.h
index 901f604a5..364b1cac2 100644
--- a/src/yuzu/configuration/configure_graphics.h
+++ b/src/yuzu/configuration/configure_graphics.h
@@ -3,6 +3,7 @@
#pragma once
+#include <functional>
#include <memory>
#include <vector>
#include <QColor>
@@ -37,7 +38,9 @@ class ConfigureGraphics : public QWidget {
Q_OBJECT
public:
- explicit ConfigureGraphics(const Core::System& system_, QWidget* parent = nullptr);
+ explicit ConfigureGraphics(const Core::System& system_,
+ const std::function<void()>& expose_compute_option_,
+ QWidget* parent = nullptr);
~ConfigureGraphics() override;
void ApplyConfiguration();
@@ -81,6 +84,7 @@ private:
// selection in the combobox
u32 vulkan_device{};
Settings::ShaderBackend shader_backend{};
+ const std::function<void()>& expose_compute_option;
const Core::System& system;
};
diff --git a/src/yuzu/configuration/configure_graphics_advanced.cpp b/src/yuzu/configuration/configure_graphics_advanced.cpp
index 627ed8b17..896863f87 100644
--- a/src/yuzu/configuration/configure_graphics_advanced.cpp
+++ b/src/yuzu/configuration/configure_graphics_advanced.cpp
@@ -15,6 +15,8 @@ ConfigureGraphicsAdvanced::ConfigureGraphicsAdvanced(const Core::System& system_
SetupPerGameUI();
SetConfiguration();
+
+ ui->enable_compute_pipelines_checkbox->setVisible(false);
}
ConfigureGraphicsAdvanced::~ConfigureGraphicsAdvanced() = default;
@@ -25,8 +27,10 @@ void ConfigureGraphicsAdvanced::SetConfiguration() {
ui->async_present->setEnabled(runtime_lock);
ui->renderer_force_max_clock->setEnabled(runtime_lock);
ui->async_astc->setEnabled(runtime_lock);
+ ui->astc_recompression_combobox->setEnabled(runtime_lock);
ui->use_asynchronous_shaders->setEnabled(runtime_lock);
ui->anisotropic_filtering_combobox->setEnabled(runtime_lock);
+ ui->enable_compute_pipelines_checkbox->setEnabled(runtime_lock);
ui->async_present->setChecked(Settings::values.async_presentation.GetValue());
ui->renderer_force_max_clock->setChecked(Settings::values.renderer_force_max_clock.GetValue());
@@ -36,20 +40,28 @@ void ConfigureGraphicsAdvanced::SetConfiguration() {
ui->use_fast_gpu_time->setChecked(Settings::values.use_fast_gpu_time.GetValue());
ui->use_vulkan_driver_pipeline_cache->setChecked(
Settings::values.use_vulkan_driver_pipeline_cache.GetValue());
+ ui->enable_compute_pipelines_checkbox->setChecked(
+ Settings::values.enable_compute_pipelines.GetValue());
if (Settings::IsConfiguringGlobal()) {
ui->gpu_accuracy->setCurrentIndex(
static_cast<int>(Settings::values.gpu_accuracy.GetValue()));
ui->anisotropic_filtering_combobox->setCurrentIndex(
Settings::values.max_anisotropy.GetValue());
+ ui->astc_recompression_combobox->setCurrentIndex(
+ static_cast<int>(Settings::values.astc_recompression.GetValue()));
} else {
ConfigurationShared::SetPerGameSetting(ui->gpu_accuracy, &Settings::values.gpu_accuracy);
ConfigurationShared::SetPerGameSetting(ui->anisotropic_filtering_combobox,
&Settings::values.max_anisotropy);
+ ConfigurationShared::SetPerGameSetting(ui->astc_recompression_combobox,
+ &Settings::values.astc_recompression);
ConfigurationShared::SetHighlight(ui->label_gpu_accuracy,
!Settings::values.gpu_accuracy.UsingGlobal());
ConfigurationShared::SetHighlight(ui->af_label,
!Settings::values.max_anisotropy.UsingGlobal());
+ ConfigurationShared::SetHighlight(ui->label_astc_recompression,
+ !Settings::values.astc_recompression.UsingGlobal());
}
}
@@ -66,6 +78,8 @@ void ConfigureGraphicsAdvanced::ApplyConfiguration() {
ui->use_reactive_flushing, use_reactive_flushing);
ConfigurationShared::ApplyPerGameSetting(&Settings::values.async_astc, ui->async_astc,
async_astc);
+ ConfigurationShared::ApplyPerGameSetting(&Settings::values.astc_recompression,
+ ui->astc_recompression_combobox);
ConfigurationShared::ApplyPerGameSetting(&Settings::values.use_asynchronous_shaders,
ui->use_asynchronous_shaders,
use_asynchronous_shaders);
@@ -74,6 +88,9 @@ void ConfigureGraphicsAdvanced::ApplyConfiguration() {
ConfigurationShared::ApplyPerGameSetting(&Settings::values.use_vulkan_driver_pipeline_cache,
ui->use_vulkan_driver_pipeline_cache,
use_vulkan_driver_pipeline_cache);
+ ConfigurationShared::ApplyPerGameSetting(&Settings::values.enable_compute_pipelines,
+ ui->enable_compute_pipelines_checkbox,
+ enable_compute_pipelines);
}
void ConfigureGraphicsAdvanced::changeEvent(QEvent* event) {
@@ -97,6 +114,8 @@ void ConfigureGraphicsAdvanced::SetupPerGameUI() {
Settings::values.renderer_force_max_clock.UsingGlobal());
ui->use_reactive_flushing->setEnabled(Settings::values.use_reactive_flushing.UsingGlobal());
ui->async_astc->setEnabled(Settings::values.async_astc.UsingGlobal());
+ ui->astc_recompression_combobox->setEnabled(
+ Settings::values.astc_recompression.UsingGlobal());
ui->use_asynchronous_shaders->setEnabled(
Settings::values.use_asynchronous_shaders.UsingGlobal());
ui->use_fast_gpu_time->setEnabled(Settings::values.use_fast_gpu_time.UsingGlobal());
@@ -104,6 +123,8 @@ void ConfigureGraphicsAdvanced::SetupPerGameUI() {
Settings::values.use_vulkan_driver_pipeline_cache.UsingGlobal());
ui->anisotropic_filtering_combobox->setEnabled(
Settings::values.max_anisotropy.UsingGlobal());
+ ui->enable_compute_pipelines_checkbox->setEnabled(
+ Settings::values.enable_compute_pipelines.UsingGlobal());
return;
}
@@ -125,10 +146,20 @@ void ConfigureGraphicsAdvanced::SetupPerGameUI() {
ConfigurationShared::SetColoredTristate(ui->use_vulkan_driver_pipeline_cache,
Settings::values.use_vulkan_driver_pipeline_cache,
use_vulkan_driver_pipeline_cache);
+ ConfigurationShared::SetColoredTristate(ui->enable_compute_pipelines_checkbox,
+ Settings::values.enable_compute_pipelines,
+ enable_compute_pipelines);
ConfigurationShared::SetColoredComboBox(
ui->gpu_accuracy, ui->label_gpu_accuracy,
static_cast<int>(Settings::values.gpu_accuracy.GetValue(true)));
ConfigurationShared::SetColoredComboBox(
ui->anisotropic_filtering_combobox, ui->af_label,
static_cast<int>(Settings::values.max_anisotropy.GetValue(true)));
+ ConfigurationShared::SetColoredComboBox(
+ ui->astc_recompression_combobox, ui->label_astc_recompression,
+ static_cast<int>(Settings::values.astc_recompression.GetValue(true)));
+}
+
+void ConfigureGraphicsAdvanced::ExposeComputeOption() {
+ ui->enable_compute_pipelines_checkbox->setVisible(true);
}
diff --git a/src/yuzu/configuration/configure_graphics_advanced.h b/src/yuzu/configuration/configure_graphics_advanced.h
index ae3c10946..1c7b636b9 100644
--- a/src/yuzu/configuration/configure_graphics_advanced.h
+++ b/src/yuzu/configuration/configure_graphics_advanced.h
@@ -28,6 +28,8 @@ public:
void ApplyConfiguration();
void SetConfiguration();
+ void ExposeComputeOption();
+
private:
void changeEvent(QEvent* event) override;
void RetranslateUI();
@@ -44,6 +46,7 @@ private:
ConfigurationShared::CheckState use_asynchronous_shaders;
ConfigurationShared::CheckState use_fast_gpu_time;
ConfigurationShared::CheckState use_vulkan_driver_pipeline_cache;
+ ConfigurationShared::CheckState enable_compute_pipelines;
const Core::System& system;
};
diff --git a/src/yuzu/configuration/configure_graphics_advanced.ui b/src/yuzu/configuration/configure_graphics_advanced.ui
index 9d8cbea09..37757a918 100644
--- a/src/yuzu/configuration/configure_graphics_advanced.ui
+++ b/src/yuzu/configuration/configure_graphics_advanced.ui
@@ -70,6 +70,50 @@
</widget>
</item>
<item>
+ <widget class="QWidget" name="astc_recompression_layout" native="true">
+ <layout class="QHBoxLayout" name="horizontalLayout_3">
+ <property name="leftMargin">
+ <number>0</number>
+ </property>
+ <property name="topMargin">
+ <number>0</number>
+ </property>
+ <property name="rightMargin">
+ <number>0</number>
+ </property>
+ <property name="bottomMargin">
+ <number>0</number>
+ </property>
+ <item>
+ <widget class="QLabel" name="label_astc_recompression">
+ <property name="text">
+ <string>ASTC recompression:</string>
+ </property>
+ </widget>
+ </item>
+ <item>
+ <widget class="QComboBox" name="astc_recompression_combobox">
+ <item>
+ <property name="text">
+ <string>Uncompressed (Best quality)</string>
+ </property>
+ </item>
+ <item>
+ <property name="text">
+ <string>BC1 (Low quality)</string>
+ </property>
+ </item>
+ <item>
+ <property name="text">
+ <string>BC3 (Medium quality)</string>
+ </property>
+ </item>
+ </widget>
+ </item>
+ </layout>
+ </widget>
+ </item>
+ <item>
<widget class="QCheckBox" name="async_present">
<property name="text">
<string>Enable asynchronous presentation (Vulkan only)</string>
@@ -137,6 +181,17 @@
</widget>
</item>
<item>
+ <widget class="QCheckBox" name="enable_compute_pipelines_checkbox">
+ <property name="toolTip">
+ <string>Enable compute pipelines, required by some games. This setting only exists for Intel proprietary drivers, and may crash if enabled.
+Compute pipelines are always enabled on all other drivers.</string>
+ </property>
+ <property name="text">
+ <string>Enable Compute Pipelines (Intel Vulkan only)</string>
+ </property>
+ </widget>
+ </item>
+ <item>
<widget class="QWidget" name="af_layout" native="true">
<layout class="QHBoxLayout" name="horizontalLayout_1">
<property name="leftMargin">
diff --git a/src/yuzu/configuration/configure_hotkeys.cpp b/src/yuzu/configuration/configure_hotkeys.cpp
index daa77a8f8..0b2a965f8 100644
--- a/src/yuzu/configuration/configure_hotkeys.cpp
+++ b/src/yuzu/configuration/configure_hotkeys.cpp
@@ -48,7 +48,9 @@ ConfigureHotkeys::ConfigureHotkeys(Core::HID::HIDCore& hid_core, QWidget* parent
connect(poll_timer.get(), &QTimer::timeout, [this] {
const auto buttons = controller->GetNpadButtons();
- if (buttons.raw != Core::HID::NpadButton::None) {
+ const auto home_pressed = controller->GetHomeButtons().home != 0;
+ const auto capture_pressed = controller->GetCaptureButtons().capture != 0;
+ if (home_pressed || capture_pressed) {
SetPollingResult(buttons.raw, false);
return;
}
@@ -154,8 +156,10 @@ void ConfigureHotkeys::ConfigureController(QModelIndex index) {
model->setData(index, previous_key);
return;
}
-
- const QString button_string = tr("Home+%1").arg(GetButtonName(button));
+ const auto home_pressed = this->controller->GetHomeButtons().home != 0;
+ const auto capture_pressed = this->controller->GetCaptureButtons().capture != 0;
+ const QString button_string =
+ GetButtonCombinationName(button, home_pressed, capture_pressed);
const auto [key_sequence_used, used_action] = IsUsedControllerKey(button_string);
@@ -174,72 +178,83 @@ void ConfigureHotkeys::ConfigureController(QModelIndex index) {
poll_timer->start(200); // Check for new inputs every 200ms
// We need to disable configuration to be able to read npad buttons
controller->DisableConfiguration();
- controller->DisableSystemButtons();
}
void ConfigureHotkeys::SetPollingResult(Core::HID::NpadButton button, const bool cancel) {
timeout_timer->stop();
poll_timer->stop();
+ (*input_setter)(button, cancel);
// Re-Enable configuration
controller->EnableConfiguration();
- controller->EnableSystemButtons();
-
- (*input_setter)(button, cancel);
input_setter = std::nullopt;
}
-QString ConfigureHotkeys::GetButtonName(Core::HID::NpadButton button) const {
+QString ConfigureHotkeys::GetButtonCombinationName(Core::HID::NpadButton button,
+ const bool home = false,
+ const bool capture = false) const {
Core::HID::NpadButtonState state{button};
+ QString button_combination;
+ if (home) {
+ button_combination.append(QStringLiteral("Home+"));
+ }
+ if (capture) {
+ button_combination.append(QStringLiteral("Screenshot+"));
+ }
if (state.a) {
- return QStringLiteral("A");
+ button_combination.append(QStringLiteral("A+"));
}
if (state.b) {
- return QStringLiteral("B");
+ button_combination.append(QStringLiteral("B+"));
}
if (state.x) {
- return QStringLiteral("X");
+ button_combination.append(QStringLiteral("X+"));
}
if (state.y) {
- return QStringLiteral("Y");
+ button_combination.append(QStringLiteral("Y+"));
}
if (state.l || state.right_sl || state.left_sl) {
- return QStringLiteral("L");
+ button_combination.append(QStringLiteral("L+"));
}
if (state.r || state.right_sr || state.left_sr) {
- return QStringLiteral("R");
+ button_combination.append(QStringLiteral("R+"));
}
if (state.zl) {
- return QStringLiteral("ZL");
+ button_combination.append(QStringLiteral("ZL+"));
}
if (state.zr) {
- return QStringLiteral("ZR");
+ button_combination.append(QStringLiteral("ZR+"));
}
if (state.left) {
- return QStringLiteral("Dpad_Left");
+ button_combination.append(QStringLiteral("Dpad_Left+"));
}
if (state.right) {
- return QStringLiteral("Dpad_Right");
+ button_combination.append(QStringLiteral("Dpad_Right+"));
}
if (state.up) {
- return QStringLiteral("Dpad_Up");
+ button_combination.append(QStringLiteral("Dpad_Up+"));
}
if (state.down) {
- return QStringLiteral("Dpad_Down");
+ button_combination.append(QStringLiteral("Dpad_Down+"));
}
if (state.stick_l) {
- return QStringLiteral("Left_Stick");
+ button_combination.append(QStringLiteral("Left_Stick+"));
}
if (state.stick_r) {
- return QStringLiteral("Right_Stick");
+ button_combination.append(QStringLiteral("Right_Stick+"));
}
if (state.minus) {
- return QStringLiteral("Minus");
+ button_combination.append(QStringLiteral("Minus+"));
}
if (state.plus) {
- return QStringLiteral("Plus");
+ button_combination.append(QStringLiteral("Plus+"));
+ }
+ if (button_combination.isEmpty()) {
+ return tr("Invalid");
+ } else {
+ button_combination.chop(1);
+ return button_combination;
}
- return tr("Invalid");
}
std::pair<bool, QString> ConfigureHotkeys::IsUsedKey(QKeySequence key_sequence) const {
diff --git a/src/yuzu/configuration/configure_hotkeys.h b/src/yuzu/configuration/configure_hotkeys.h
index e8e414320..5fd1bcbfe 100644
--- a/src/yuzu/configuration/configure_hotkeys.h
+++ b/src/yuzu/configuration/configure_hotkeys.h
@@ -59,7 +59,7 @@ private:
QStandardItemModel* model;
void SetPollingResult(Core::HID::NpadButton button, bool cancel);
- QString GetButtonName(Core::HID::NpadButton button) const;
+ QString GetButtonCombinationName(Core::HID::NpadButton button, bool home, bool capture) const;
Core::HID::EmulatedController* controller;
std::unique_ptr<QTimer> timeout_timer;
std::unique_ptr<QTimer> poll_timer;
diff --git a/src/yuzu/configuration/configure_per_game.cpp b/src/yuzu/configuration/configure_per_game.cpp
index 7e757eafd..7ac162586 100644
--- a/src/yuzu/configuration/configure_per_game.cpp
+++ b/src/yuzu/configuration/configure_per_game.cpp
@@ -48,8 +48,9 @@ ConfigurePerGame::ConfigurePerGame(QWidget* parent, u64 title_id_, const std::st
audio_tab = std::make_unique<ConfigureAudio>(system_, this);
cpu_tab = std::make_unique<ConfigureCpu>(system_, this);
general_tab = std::make_unique<ConfigureGeneral>(system_, this);
- graphics_tab = std::make_unique<ConfigureGraphics>(system_, this);
graphics_advanced_tab = std::make_unique<ConfigureGraphicsAdvanced>(system_, this);
+ graphics_tab = std::make_unique<ConfigureGraphics>(
+ system_, [&]() { graphics_advanced_tab->ExposeComputeOption(); }, this);
input_tab = std::make_unique<ConfigureInputPerGame>(system_, game_config.get(), this);
system_tab = std::make_unique<ConfigureSystem>(system_, this);
diff --git a/src/yuzu/configuration/configure_per_game.h b/src/yuzu/configuration/configure_per_game.h
index 4ecc43541..85752f1fa 100644
--- a/src/yuzu/configuration/configure_per_game.h
+++ b/src/yuzu/configuration/configure_per_game.h
@@ -75,8 +75,8 @@ private:
std::unique_ptr<ConfigureAudio> audio_tab;
std::unique_ptr<ConfigureCpu> cpu_tab;
std::unique_ptr<ConfigureGeneral> general_tab;
- std::unique_ptr<ConfigureGraphics> graphics_tab;
std::unique_ptr<ConfigureGraphicsAdvanced> graphics_advanced_tab;
+ std::unique_ptr<ConfigureGraphics> graphics_tab;
std::unique_ptr<ConfigureInputPerGame> input_tab;
std::unique_ptr<ConfigureSystem> system_tab;
};
diff --git a/src/yuzu/game_list.cpp b/src/yuzu/game_list.cpp
index c21828b1d..465084fea 100644
--- a/src/yuzu/game_list.cpp
+++ b/src/yuzu/game_list.cpp
@@ -544,6 +544,7 @@ void GameList::AddGamePopup(QMenu& context_menu, u64 program_id, const std::stri
QAction* remove_update = remove_menu->addAction(tr("Remove Installed Update"));
QAction* remove_dlc = remove_menu->addAction(tr("Remove All Installed DLC"));
QAction* remove_custom_config = remove_menu->addAction(tr("Remove Custom Configuration"));
+ QAction* remove_cache_storage = remove_menu->addAction(tr("Remove Cache Storage"));
QAction* remove_gl_shader_cache = remove_menu->addAction(tr("Remove OpenGL Pipeline Cache"));
QAction* remove_vk_shader_cache = remove_menu->addAction(tr("Remove Vulkan Pipeline Cache"));
remove_menu->addSeparator();
@@ -614,6 +615,9 @@ void GameList::AddGamePopup(QMenu& context_menu, u64 program_id, const std::stri
connect(remove_custom_config, &QAction::triggered, [this, program_id, path]() {
emit RemoveFileRequested(program_id, GameListRemoveTarget::CustomConfiguration, path);
});
+ connect(remove_cache_storage, &QAction::triggered, [this, program_id, path] {
+ emit RemoveFileRequested(program_id, GameListRemoveTarget::CacheStorage, path);
+ });
connect(dump_romfs, &QAction::triggered, [this, program_id, path]() {
emit DumpRomFSRequested(program_id, path, DumpRomFSTarget::Normal);
});
diff --git a/src/yuzu/game_list.h b/src/yuzu/game_list.h
index 64e5af4c1..6c2f75e53 100644
--- a/src/yuzu/game_list.h
+++ b/src/yuzu/game_list.h
@@ -45,6 +45,7 @@ enum class GameListRemoveTarget {
VkShaderCache,
AllShaderCache,
CustomConfiguration,
+ CacheStorage,
};
enum class DumpRomFSTarget {
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index d932e33a7..25cfef6d5 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -1164,7 +1164,8 @@ void GMainWindow::InitializeRecentFileMenuActions() {
UpdateRecentFiles();
}
-void GMainWindow::LinkActionShortcut(QAction* action, const QString& action_name) {
+void GMainWindow::LinkActionShortcut(QAction* action, const QString& action_name,
+ const bool tas_allowed) {
static const QString main_window = QStringLiteral("Main Window");
action->setShortcut(hotkey_registry.GetKeySequence(main_window, action_name));
action->setShortcutContext(hotkey_registry.GetShortcutContext(main_window, action_name));
@@ -1176,7 +1177,14 @@ void GMainWindow::LinkActionShortcut(QAction* action, const QString& action_name
const auto* controller_hotkey =
hotkey_registry.GetControllerHotkey(main_window, action_name, controller);
connect(
- controller_hotkey, &ControllerShortcut::Activated, this, [action] { action->trigger(); },
+ controller_hotkey, &ControllerShortcut::Activated, this,
+ [action, tas_allowed, this] {
+ auto [tas_status, current_tas_frame, total_tas_frames] =
+ input_subsystem->GetTas()->GetStatus();
+ if (tas_allowed || tas_status == InputCommon::TasInput::TasState::Stopped) {
+ action->trigger();
+ }
+ },
Qt::QueuedConnection);
}
@@ -1193,9 +1201,9 @@ void GMainWindow::InitializeHotkeys() {
LinkActionShortcut(ui->action_Show_Status_Bar, QStringLiteral("Toggle Status Bar"));
LinkActionShortcut(ui->action_Fullscreen, QStringLiteral("Fullscreen"));
LinkActionShortcut(ui->action_Capture_Screenshot, QStringLiteral("Capture Screenshot"));
- LinkActionShortcut(ui->action_TAS_Start, QStringLiteral("TAS Start/Stop"));
- LinkActionShortcut(ui->action_TAS_Record, QStringLiteral("TAS Record"));
- LinkActionShortcut(ui->action_TAS_Reset, QStringLiteral("TAS Reset"));
+ LinkActionShortcut(ui->action_TAS_Start, QStringLiteral("TAS Start/Stop"), true);
+ LinkActionShortcut(ui->action_TAS_Record, QStringLiteral("TAS Record"), true);
+ LinkActionShortcut(ui->action_TAS_Reset, QStringLiteral("TAS Reset"), true);
static const QString main_window = QStringLiteral("Main Window");
const auto connect_shortcut = [&]<typename Fn>(const QString& action_name, const Fn& function) {
@@ -2315,6 +2323,8 @@ void GMainWindow::OnGameListRemoveFile(u64 program_id, GameListRemoveTarget targ
return tr("Delete All Transferable Shader Caches?");
case GameListRemoveTarget::CustomConfiguration:
return tr("Remove Custom Game Configuration?");
+ case GameListRemoveTarget::CacheStorage:
+ return tr("Remove Cache Storage?");
default:
return QString{};
}
@@ -2338,6 +2348,9 @@ void GMainWindow::OnGameListRemoveFile(u64 program_id, GameListRemoveTarget targ
case GameListRemoveTarget::CustomConfiguration:
RemoveCustomConfiguration(program_id, game_path);
break;
+ case GameListRemoveTarget::CacheStorage:
+ RemoveCacheStorage(program_id);
+ break;
}
}
@@ -2427,6 +2440,21 @@ void GMainWindow::RemoveCustomConfiguration(u64 program_id, const std::string& g
}
}
+void GMainWindow::RemoveCacheStorage(u64 program_id) {
+ const auto nand_dir = Common::FS::GetYuzuPath(Common::FS::YuzuPath::NANDDir);
+ auto vfs_nand_dir =
+ vfs->OpenDirectory(Common::FS::PathToUTF8String(nand_dir), FileSys::Mode::Read);
+
+ const auto cache_storage_path = FileSys::SaveDataFactory::GetFullPath(
+ *system, vfs_nand_dir, FileSys::SaveDataSpaceId::NandUser,
+ FileSys::SaveDataType::CacheStorage, 0 /* program_id */, {}, 0);
+
+ const auto path = Common::FS::ConcatPathSafe(nand_dir, cache_storage_path);
+
+ // Not an error if it wasn't cleared.
+ Common::FS::RemoveDirRecursively(path);
+}
+
void GMainWindow::OnGameListDumpRomFS(u64 program_id, const std::string& game_path,
DumpRomFSTarget target) {
const auto failed = [this] {
diff --git a/src/yuzu/main.h b/src/yuzu/main.h
index 7b23f2a59..6bb70972f 100644
--- a/src/yuzu/main.h
+++ b/src/yuzu/main.h
@@ -214,7 +214,8 @@ public slots:
private:
/// Updates an action's shortcut and text to reflect an updated hotkey from the hotkey registry.
- void LinkActionShortcut(QAction* action, const QString& action_name);
+ void LinkActionShortcut(QAction* action, const QString& action_name,
+ const bool tas_allowed = false);
void RegisterMetaTypes();
@@ -369,6 +370,7 @@ private:
void RemoveVulkanDriverPipelineCache(u64 program_id);
void RemoveAllTransferableShaderCaches(u64 program_id);
void RemoveCustomConfiguration(u64 program_id, const std::string& game_path);
+ void RemoveCacheStorage(u64 program_id);
std::optional<u64> SelectRomFSDumpTarget(const FileSys::ContentProvider&, u64 program_id);
InstallResult InstallNSPXCI(const QString& filename);
InstallResult InstallNCA(const QString& filename);
diff --git a/src/yuzu_cmd/config.cpp b/src/yuzu_cmd/config.cpp
index dc9a3d68f..c5bc472ca 100644
--- a/src/yuzu_cmd/config.cpp
+++ b/src/yuzu_cmd/config.cpp
@@ -318,6 +318,7 @@ void Config::ReadValues() {
ReadSetting("Renderer", Settings::values.nvdec_emulation);
ReadSetting("Renderer", Settings::values.accelerate_astc);
ReadSetting("Renderer", Settings::values.async_astc);
+ ReadSetting("Renderer", Settings::values.astc_recompression);
ReadSetting("Renderer", Settings::values.use_fast_gpu_time);
ReadSetting("Renderer", Settings::values.use_vulkan_driver_pipeline_cache);
diff --git a/src/yuzu_cmd/default_ini.h b/src/yuzu_cmd/default_ini.h
index 5e7c3ac04..644a30e59 100644
--- a/src/yuzu_cmd/default_ini.h
+++ b/src/yuzu_cmd/default_ini.h
@@ -360,6 +360,10 @@ accelerate_astc =
# 0 (default): Off, 1: On
async_astc =
+# Recompress ASTC textures to a different format.
+# 0 (default): Uncompressed, 1: BC1 (Low quality), 2: BC3: (Medium quality)
+async_astc =
+
# Turns on the speed limiter, which will limit the emulation speed to the desired speed limit value
# 0: Off, 1: On (default)
use_speed_limit =
diff --git a/src/yuzu_cmd/yuzu.cpp b/src/yuzu_cmd/yuzu.cpp
index 5f39ece32..7b6d49c63 100644
--- a/src/yuzu_cmd/yuzu.cpp
+++ b/src/yuzu_cmd/yuzu.cpp
@@ -227,7 +227,7 @@ int main(int argc, char** argv) {
};
while (optind < argc) {
- int arg = getopt_long(argc, argv, "g:fhvp::c:", long_options, &option_index);
+ int arg = getopt_long(argc, argv, "g:fhvp::c:u:", long_options, &option_index);
if (arg != -1) {
switch (static_cast<char>(arg)) {
case 'c':
@@ -283,7 +283,7 @@ int main(int argc, char** argv) {
break;
case 'u':
selected_user = atoi(optarg);
- return 0;
+ break;
case 'v':
PrintVersion();
return 0;