summaryrefslogtreecommitdiffstats
path: root/src/video_core
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core')
-rw-r--r--src/video_core/macro/macro.cpp6
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.cpp11
-rw-r--r--src/video_core/texture_cache/texture_cache.h63
3 files changed, 41 insertions, 39 deletions
diff --git a/src/video_core/macro/macro.cpp b/src/video_core/macro/macro.cpp
index 82ad0477d..905505ca1 100644
--- a/src/video_core/macro/macro.cpp
+++ b/src/video_core/macro/macro.cpp
@@ -6,7 +6,7 @@
#include <optional>
#include <span>
-#include <boost/container_hash/hash.hpp>
+#include "common/container_hash.h"
#include <fstream>
#include "common/assert.h"
@@ -89,7 +89,7 @@ void MacroEngine::Execute(u32 method, const std::vector<u32>& parameters) {
if (!mid_method.has_value()) {
cache_info.lle_program = Compile(macro_code->second);
- cache_info.hash = boost::hash_value(macro_code->second);
+ cache_info.hash = Common::HashValue(macro_code->second);
if (Settings::values.dump_macros) {
Dump(cache_info.hash, macro_code->second);
}
@@ -100,7 +100,7 @@ void MacroEngine::Execute(u32 method, const std::vector<u32>& parameters) {
code.resize(macro_cached.size() - rebased_method);
std::memcpy(code.data(), macro_cached.data() + rebased_method,
code.size() * sizeof(u32));
- cache_info.hash = boost::hash_value(code);
+ cache_info.hash = Common::HashValue(code);
cache_info.lle_program = Compile(code);
if (Settings::values.dump_macros) {
Dump(cache_info.hash, code);
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
index c636a1625..b264e6ada 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.cpp
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -65,12 +65,13 @@ void Scheduler::WaitWorker() {
DispatchWork();
// Ensure the queue is drained.
- std::unique_lock ql{queue_mutex};
- event_cv.wait(ql, [this] { return work_queue.empty(); });
+ {
+ std::unique_lock ql{queue_mutex};
+ event_cv.wait(ql, [this] { return work_queue.empty(); });
+ }
// Now wait for execution to finish.
- // This needs to be done in the same order as WorkerThread.
- std::unique_lock el{execution_mutex};
+ std::scoped_lock el{execution_mutex};
}
void Scheduler::DispatchWork() {
@@ -327,7 +328,7 @@ void Scheduler::AcquireNewChunk() {
chunk = std::make_unique<CommandChunk>();
} else {
// Otherwise, we can just take from the reserve.
- chunk = std::make_unique<CommandChunk>();
+ chunk = std::move(chunk_reserve.back());
chunk_reserve.pop_back();
}
}
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 8e8b9a5e6..858449af8 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -1616,37 +1616,38 @@ void TextureCache<P>::ForEachImageInRegionGPU(size_t as_id, GPUVAddr gpu_addr, s
return;
}
auto& gpu_page_table = gpu_page_table_storage[*storage_id];
- ForEachGPUPage(gpu_addr, size, [this, gpu_page_table, &images, gpu_addr, size, func](u64 page) {
- const auto it = gpu_page_table.find(page);
- if (it == gpu_page_table.end()) {
- if constexpr (BOOL_BREAK) {
- return false;
- } else {
- return;
- }
- }
- for (const ImageId image_id : it->second) {
- Image& image = slot_images[image_id];
- if (True(image.flags & ImageFlagBits::Picked)) {
- continue;
- }
- if (!image.OverlapsGPU(gpu_addr, size)) {
- continue;
- }
- image.flags |= ImageFlagBits::Picked;
- images.push_back(image_id);
- if constexpr (BOOL_BREAK) {
- if (func(image_id, image)) {
- return true;
- }
- } else {
- func(image_id, image);
- }
- }
- if constexpr (BOOL_BREAK) {
- return false;
- }
- });
+ ForEachGPUPage(gpu_addr, size,
+ [this, &gpu_page_table, &images, gpu_addr, size, func](u64 page) {
+ const auto it = gpu_page_table.find(page);
+ if (it == gpu_page_table.end()) {
+ if constexpr (BOOL_BREAK) {
+ return false;
+ } else {
+ return;
+ }
+ }
+ for (const ImageId image_id : it->second) {
+ Image& image = slot_images[image_id];
+ if (True(image.flags & ImageFlagBits::Picked)) {
+ continue;
+ }
+ if (!image.OverlapsGPU(gpu_addr, size)) {
+ continue;
+ }
+ image.flags |= ImageFlagBits::Picked;
+ images.push_back(image_id);
+ if constexpr (BOOL_BREAK) {
+ if (func(image_id, image)) {
+ return true;
+ }
+ } else {
+ func(image_id, image);
+ }
+ }
+ if constexpr (BOOL_BREAK) {
+ return false;
+ }
+ });
for (const ImageId image_id : images) {
slot_images[image_id].flags &= ~ImageFlagBits::Picked;
}