summaryrefslogtreecommitdiffstats
path: root/src/video_core
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core')
-rw-r--r--src/video_core/CMakeLists.txt5
-rw-r--r--src/video_core/buffer_cache.h299
-rw-r--r--src/video_core/buffer_cache/buffer_block.h76
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h447
-rw-r--r--src/video_core/buffer_cache/map_interval.h89
-rw-r--r--src/video_core/dma_pusher.cpp1
-rw-r--r--src/video_core/engines/fermi_2d.cpp3
-rw-r--r--src/video_core/engines/fermi_2d.h3
-rw-r--r--src/video_core/engines/kepler_memory.cpp2
-rw-r--r--src/video_core/engines/kepler_memory.h1
-rw-r--r--src/video_core/engines/maxwell_3d.cpp15
-rw-r--r--src/video_core/engines/maxwell_dma.cpp8
-rw-r--r--src/video_core/engines/maxwell_dma.h9
-rw-r--r--src/video_core/engines/shader_bytecode.h25
-rw-r--r--src/video_core/gpu.cpp76
-rw-r--r--src/video_core/gpu.h49
-rw-r--r--src/video_core/gpu_asynch.cpp9
-rw-r--r--src/video_core/gpu_asynch.h3
-rw-r--r--src/video_core/gpu_synch.cpp2
-rw-r--r--src/video_core/gpu_synch.h4
-rw-r--r--src/video_core/gpu_thread.cpp27
-rw-r--r--src/video_core/gpu_thread.h32
-rw-r--r--src/video_core/rasterizer_interface.h3
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.cpp52
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.h39
-rw-r--r--src/video_core/renderer_opengl/gl_device.cpp3
-rw-r--r--src/video_core/renderer_opengl/gl_device.h5
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp8
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h1
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp14
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.h1
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.cpp67
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.cpp5
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.cpp43
-rw-r--r--src/video_core/shader/control_flow.cpp47
-rw-r--r--src/video_core/shader/control_flow.h30
-rw-r--r--src/video_core/shader/decode.cpp5
-rw-r--r--src/video_core/shader/decode/arithmetic.cpp13
-rw-r--r--src/video_core/shader/decode/arithmetic_half_immediate.cpp4
-rw-r--r--src/video_core/shader/decode/conversion.cpp30
-rw-r--r--src/video_core/shader/decode/ffma.cpp10
-rw-r--r--src/video_core/shader/decode/float_set.cpp1
-rw-r--r--src/video_core/shader/decode/float_set_predicate.cpp1
-rw-r--r--src/video_core/shader/decode/half_set_predicate.cpp10
-rw-r--r--src/video_core/shader/decode/hfma2.cpp4
-rw-r--r--src/video_core/shader/decode/integer_set.cpp1
-rw-r--r--src/video_core/shader/decode/integer_set_predicate.cpp1
-rw-r--r--src/video_core/shader/decode/other.cpp13
-rw-r--r--src/video_core/shader/decode/predicate_set_register.cpp1
-rw-r--r--src/video_core/shader/decode/warp.cpp55
-rw-r--r--src/video_core/shader/node.h30
-rw-r--r--src/video_core/shader/shader_ir.cpp5
-rw-r--r--src/video_core/shader/shader_ir.h4
-rw-r--r--src/video_core/shader/track.cpp4
-rw-r--r--src/video_core/texture_cache/surface_base.cpp5
-rw-r--r--src/video_core/texture_cache/surface_params.h1
-rw-r--r--src/video_core/texture_cache/texture_cache.h2
-rw-r--r--src/video_core/textures/texture.h2
59 files changed, 1189 insertions, 518 deletions
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index 7c18c27b3..e2f85c5f1 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -1,5 +1,7 @@
add_library(video_core STATIC
- buffer_cache.h
+ buffer_cache/buffer_block.h
+ buffer_cache/buffer_cache.h
+ buffer_cache/map_interval.h
dma_pusher.cpp
dma_pusher.h
debug_utils/debug_utils.cpp
@@ -100,6 +102,7 @@ add_library(video_core STATIC
shader/decode/integer_set.cpp
shader/decode/half_set.cpp
shader/decode/video.cpp
+ shader/decode/warp.cpp
shader/decode/xmad.cpp
shader/decode/other.cpp
shader/control_flow.cpp
diff --git a/src/video_core/buffer_cache.h b/src/video_core/buffer_cache.h
deleted file mode 100644
index 6f868b8b4..000000000
--- a/src/video_core/buffer_cache.h
+++ /dev/null
@@ -1,299 +0,0 @@
-// Copyright 2019 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#pragma once
-
-#include <array>
-#include <memory>
-#include <mutex>
-#include <unordered_map>
-#include <unordered_set>
-#include <utility>
-#include <vector>
-
-#include "common/alignment.h"
-#include "common/common_types.h"
-#include "core/core.h"
-#include "video_core/memory_manager.h"
-#include "video_core/rasterizer_cache.h"
-
-namespace VideoCore {
-class RasterizerInterface;
-}
-
-namespace VideoCommon {
-
-template <typename BufferStorageType>
-class CachedBuffer final : public RasterizerCacheObject {
-public:
- explicit CachedBuffer(VAddr cpu_addr, u8* host_ptr)
- : RasterizerCacheObject{host_ptr}, host_ptr{host_ptr}, cpu_addr{cpu_addr} {}
- ~CachedBuffer() override = default;
-
- VAddr GetCpuAddr() const override {
- return cpu_addr;
- }
-
- std::size_t GetSizeInBytes() const override {
- return size;
- }
-
- u8* GetWritableHostPtr() const {
- return host_ptr;
- }
-
- std::size_t GetSize() const {
- return size;
- }
-
- std::size_t GetCapacity() const {
- return capacity;
- }
-
- bool IsInternalized() const {
- return is_internal;
- }
-
- const BufferStorageType& GetBuffer() const {
- return buffer;
- }
-
- void SetSize(std::size_t new_size) {
- size = new_size;
- }
-
- void SetInternalState(bool is_internal_) {
- is_internal = is_internal_;
- }
-
- BufferStorageType ExchangeBuffer(BufferStorageType buffer_, std::size_t new_capacity) {
- capacity = new_capacity;
- std::swap(buffer, buffer_);
- return buffer_;
- }
-
-private:
- u8* host_ptr{};
- VAddr cpu_addr{};
- std::size_t size{};
- std::size_t capacity{};
- bool is_internal{};
- BufferStorageType buffer;
-};
-
-template <typename BufferStorageType, typename BufferType, typename StreamBuffer>
-class BufferCache : public RasterizerCache<std::shared_ptr<CachedBuffer<BufferStorageType>>> {
-public:
- using Buffer = std::shared_ptr<CachedBuffer<BufferStorageType>>;
- using BufferInfo = std::pair<const BufferType*, u64>;
-
- explicit BufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system,
- std::unique_ptr<StreamBuffer> stream_buffer)
- : RasterizerCache<Buffer>{rasterizer}, system{system},
- stream_buffer{std::move(stream_buffer)}, stream_buffer_handle{
- this->stream_buffer->GetHandle()} {}
- ~BufferCache() = default;
-
- void Unregister(const Buffer& entry) override {
- std::lock_guard lock{RasterizerCache<Buffer>::mutex};
- if (entry->IsInternalized()) {
- internalized_entries.erase(entry->GetCacheAddr());
- }
- ReserveBuffer(entry);
- RasterizerCache<Buffer>::Unregister(entry);
- }
-
- void TickFrame() {
- marked_for_destruction_index =
- (marked_for_destruction_index + 1) % marked_for_destruction_ring_buffer.size();
- MarkedForDestruction().clear();
- }
-
- BufferInfo UploadMemory(GPUVAddr gpu_addr, std::size_t size, std::size_t alignment = 4,
- bool internalize = false, bool is_written = false) {
- std::lock_guard lock{RasterizerCache<Buffer>::mutex};
-
- auto& memory_manager = system.GPU().MemoryManager();
- const auto host_ptr = memory_manager.GetPointer(gpu_addr);
- if (!host_ptr) {
- return {GetEmptyBuffer(size), 0};
- }
- const auto cache_addr = ToCacheAddr(host_ptr);
-
- // Cache management is a big overhead, so only cache entries with a given size.
- // TODO: Figure out which size is the best for given games.
- constexpr std::size_t max_stream_size = 0x800;
- if (!internalize && size < max_stream_size &&
- internalized_entries.find(cache_addr) == internalized_entries.end()) {
- return StreamBufferUpload(host_ptr, size, alignment);
- }
-
- auto entry = RasterizerCache<Buffer>::TryGet(cache_addr);
- if (!entry) {
- return FixedBufferUpload(gpu_addr, host_ptr, size, internalize, is_written);
- }
-
- if (entry->GetSize() < size) {
- IncreaseBufferSize(entry, size);
- }
- if (is_written) {
- entry->MarkAsModified(true, *this);
- }
- return {ToHandle(entry->GetBuffer()), 0};
- }
-
- /// Uploads from a host memory. Returns the OpenGL buffer where it's located and its offset.
- BufferInfo UploadHostMemory(const void* raw_pointer, std::size_t size,
- std::size_t alignment = 4) {
- std::lock_guard lock{RasterizerCache<Buffer>::mutex};
- return StreamBufferUpload(raw_pointer, size, alignment);
- }
-
- void Map(std::size_t max_size) {
- std::tie(buffer_ptr, buffer_offset_base, invalidated) = stream_buffer->Map(max_size, 4);
- buffer_offset = buffer_offset_base;
- }
-
- /// Finishes the upload stream, returns true on bindings invalidation.
- bool Unmap() {
- stream_buffer->Unmap(buffer_offset - buffer_offset_base);
- return std::exchange(invalidated, false);
- }
-
- virtual const BufferType* GetEmptyBuffer(std::size_t size) = 0;
-
-protected:
- void FlushObjectInner(const Buffer& entry) override {
- DownloadBufferData(entry->GetBuffer(), 0, entry->GetSize(), entry->GetWritableHostPtr());
- }
-
- virtual BufferStorageType CreateBuffer(std::size_t size) = 0;
-
- virtual const BufferType* ToHandle(const BufferStorageType& storage) = 0;
-
- virtual void UploadBufferData(const BufferStorageType& buffer, std::size_t offset,
- std::size_t size, const u8* data) = 0;
-
- virtual void DownloadBufferData(const BufferStorageType& buffer, std::size_t offset,
- std::size_t size, u8* data) = 0;
-
- virtual void CopyBufferData(const BufferStorageType& src, const BufferStorageType& dst,
- std::size_t src_offset, std::size_t dst_offset,
- std::size_t size) = 0;
-
-private:
- BufferInfo StreamBufferUpload(const void* raw_pointer, std::size_t size,
- std::size_t alignment) {
- AlignBuffer(alignment);
- const std::size_t uploaded_offset = buffer_offset;
- std::memcpy(buffer_ptr, raw_pointer, size);
-
- buffer_ptr += size;
- buffer_offset += size;
- return {&stream_buffer_handle, uploaded_offset};
- }
-
- BufferInfo FixedBufferUpload(GPUVAddr gpu_addr, u8* host_ptr, std::size_t size,
- bool internalize, bool is_written) {
- auto& memory_manager = Core::System::GetInstance().GPU().MemoryManager();
- const auto cpu_addr = memory_manager.GpuToCpuAddress(gpu_addr);
- ASSERT(cpu_addr);
-
- auto entry = GetUncachedBuffer(*cpu_addr, host_ptr);
- entry->SetSize(size);
- entry->SetInternalState(internalize);
- RasterizerCache<Buffer>::Register(entry);
-
- if (internalize) {
- internalized_entries.emplace(ToCacheAddr(host_ptr));
- }
- if (is_written) {
- entry->MarkAsModified(true, *this);
- }
-
- if (entry->GetCapacity() < size) {
- MarkedForDestruction().push_back(entry->ExchangeBuffer(CreateBuffer(size), size));
- }
-
- UploadBufferData(entry->GetBuffer(), 0, size, host_ptr);
- return {ToHandle(entry->GetBuffer()), 0};
- }
-
- void IncreaseBufferSize(Buffer& entry, std::size_t new_size) {
- const std::size_t old_size = entry->GetSize();
- if (entry->GetCapacity() < new_size) {
- const auto& old_buffer = entry->GetBuffer();
- auto new_buffer = CreateBuffer(new_size);
-
- // Copy bits from the old buffer to the new buffer.
- CopyBufferData(old_buffer, new_buffer, 0, 0, old_size);
- MarkedForDestruction().push_back(
- entry->ExchangeBuffer(std::move(new_buffer), new_size));
-
- // This buffer could have been used
- invalidated = true;
- }
- // Upload the new bits.
- const std::size_t size_diff = new_size - old_size;
- UploadBufferData(entry->GetBuffer(), old_size, size_diff, entry->GetHostPtr() + old_size);
-
- // Update entry's size in the object and in the cache.
- Unregister(entry);
-
- entry->SetSize(new_size);
- RasterizerCache<Buffer>::Register(entry);
- }
-
- Buffer GetUncachedBuffer(VAddr cpu_addr, u8* host_ptr) {
- if (auto entry = TryGetReservedBuffer(host_ptr)) {
- return entry;
- }
- return std::make_shared<CachedBuffer<BufferStorageType>>(cpu_addr, host_ptr);
- }
-
- Buffer TryGetReservedBuffer(u8* host_ptr) {
- const auto it = buffer_reserve.find(ToCacheAddr(host_ptr));
- if (it == buffer_reserve.end()) {
- return {};
- }
- auto& reserve = it->second;
- auto entry = reserve.back();
- reserve.pop_back();
- return entry;
- }
-
- void ReserveBuffer(Buffer entry) {
- buffer_reserve[entry->GetCacheAddr()].push_back(std::move(entry));
- }
-
- void AlignBuffer(std::size_t alignment) {
- // Align the offset, not the mapped pointer
- const std::size_t offset_aligned = Common::AlignUp(buffer_offset, alignment);
- buffer_ptr += offset_aligned - buffer_offset;
- buffer_offset = offset_aligned;
- }
-
- std::vector<BufferStorageType>& MarkedForDestruction() {
- return marked_for_destruction_ring_buffer[marked_for_destruction_index];
- }
-
- Core::System& system;
-
- std::unique_ptr<StreamBuffer> stream_buffer;
- BufferType stream_buffer_handle{};
-
- bool invalidated = false;
-
- u8* buffer_ptr = nullptr;
- u64 buffer_offset = 0;
- u64 buffer_offset_base = 0;
-
- std::size_t marked_for_destruction_index = 0;
- std::array<std::vector<BufferStorageType>, 4> marked_for_destruction_ring_buffer;
-
- std::unordered_set<CacheAddr> internalized_entries;
- std::unordered_map<CacheAddr, std::vector<Buffer>> buffer_reserve;
-};
-
-} // namespace VideoCommon
diff --git a/src/video_core/buffer_cache/buffer_block.h b/src/video_core/buffer_cache/buffer_block.h
new file mode 100644
index 000000000..4b9193182
--- /dev/null
+++ b/src/video_core/buffer_cache/buffer_block.h
@@ -0,0 +1,76 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <unordered_set>
+#include <utility>
+
+#include "common/alignment.h"
+#include "common/common_types.h"
+#include "video_core/gpu.h"
+
+namespace VideoCommon {
+
+class BufferBlock {
+public:
+ bool Overlaps(const CacheAddr start, const CacheAddr end) const {
+ return (cache_addr < end) && (cache_addr_end > start);
+ }
+
+ bool IsInside(const CacheAddr other_start, const CacheAddr other_end) const {
+ return cache_addr <= other_start && other_end <= cache_addr_end;
+ }
+
+ u8* GetWritableHostPtr() const {
+ return FromCacheAddr(cache_addr);
+ }
+
+ u8* GetWritableHostPtr(std::size_t offset) const {
+ return FromCacheAddr(cache_addr + offset);
+ }
+
+ std::size_t GetOffset(const CacheAddr in_addr) {
+ return static_cast<std::size_t>(in_addr - cache_addr);
+ }
+
+ CacheAddr GetCacheAddr() const {
+ return cache_addr;
+ }
+
+ CacheAddr GetCacheAddrEnd() const {
+ return cache_addr_end;
+ }
+
+ void SetCacheAddr(const CacheAddr new_addr) {
+ cache_addr = new_addr;
+ cache_addr_end = new_addr + size;
+ }
+
+ std::size_t GetSize() const {
+ return size;
+ }
+
+ void SetEpoch(u64 new_epoch) {
+ epoch = new_epoch;
+ }
+
+ u64 GetEpoch() {
+ return epoch;
+ }
+
+protected:
+ explicit BufferBlock(CacheAddr cache_addr, const std::size_t size) : size{size} {
+ SetCacheAddr(cache_addr);
+ }
+ ~BufferBlock() = default;
+
+private:
+ CacheAddr cache_addr{};
+ CacheAddr cache_addr_end{};
+ std::size_t size{};
+ u64 epoch{};
+};
+
+} // namespace VideoCommon
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
new file mode 100644
index 000000000..2442ddfd6
--- /dev/null
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -0,0 +1,447 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <array>
+#include <memory>
+#include <mutex>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+#include "common/alignment.h"
+#include "common/common_types.h"
+#include "core/core.h"
+#include "video_core/buffer_cache/buffer_block.h"
+#include "video_core/buffer_cache/map_interval.h"
+#include "video_core/memory_manager.h"
+#include "video_core/rasterizer_interface.h"
+
+namespace VideoCommon {
+
+using MapInterval = std::shared_ptr<MapIntervalBase>;
+
+template <typename TBuffer, typename TBufferType, typename StreamBuffer>
+class BufferCache {
+public:
+ using BufferInfo = std::pair<const TBufferType*, u64>;
+
+ BufferInfo UploadMemory(GPUVAddr gpu_addr, std::size_t size, std::size_t alignment = 4,
+ bool is_written = false) {
+ std::lock_guard lock{mutex};
+
+ auto& memory_manager = system.GPU().MemoryManager();
+ const auto host_ptr = memory_manager.GetPointer(gpu_addr);
+ if (!host_ptr) {
+ return {GetEmptyBuffer(size), 0};
+ }
+ const auto cache_addr = ToCacheAddr(host_ptr);
+
+ // Cache management is a big overhead, so only cache entries with a given size.
+ // TODO: Figure out which size is the best for given games.
+ constexpr std::size_t max_stream_size = 0x800;
+ if (size < max_stream_size) {
+ if (!is_written && !IsRegionWritten(cache_addr, cache_addr + size - 1)) {
+ return StreamBufferUpload(host_ptr, size, alignment);
+ }
+ }
+
+ auto block = GetBlock(cache_addr, size);
+ auto map = MapAddress(block, gpu_addr, cache_addr, size);
+ if (is_written) {
+ map->MarkAsModified(true, GetModifiedTicks());
+ if (!map->IsWritten()) {
+ map->MarkAsWritten(true);
+ MarkRegionAsWritten(map->GetStart(), map->GetEnd() - 1);
+ }
+ } else {
+ if (map->IsWritten()) {
+ WriteBarrier();
+ }
+ }
+
+ const u64 offset = static_cast<u64>(block->GetOffset(cache_addr));
+
+ return {ToHandle(block), offset};
+ }
+
+ /// Uploads from a host memory. Returns the OpenGL buffer where it's located and its offset.
+ BufferInfo UploadHostMemory(const void* raw_pointer, std::size_t size,
+ std::size_t alignment = 4) {
+ std::lock_guard lock{mutex};
+ return StreamBufferUpload(raw_pointer, size, alignment);
+ }
+
+ void Map(std::size_t max_size) {
+ std::lock_guard lock{mutex};
+
+ std::tie(buffer_ptr, buffer_offset_base, invalidated) = stream_buffer->Map(max_size, 4);
+ buffer_offset = buffer_offset_base;
+ }
+
+ /// Finishes the upload stream, returns true on bindings invalidation.
+ bool Unmap() {
+ std::lock_guard lock{mutex};
+
+ stream_buffer->Unmap(buffer_offset - buffer_offset_base);
+ return std::exchange(invalidated, false);
+ }
+
+ void TickFrame() {
+ ++epoch;
+ while (!pending_destruction.empty()) {
+ if (pending_destruction.front()->GetEpoch() + 1 > epoch) {
+ break;
+ }
+ pending_destruction.pop_front();
+ }
+ }
+
+ /// Write any cached resources overlapping the specified region back to memory
+ void FlushRegion(CacheAddr addr, std::size_t size) {
+ std::lock_guard lock{mutex};
+
+ std::vector<MapInterval> objects = GetMapsInRange(addr, size);
+ std::sort(objects.begin(), objects.end(), [](const MapInterval& a, const MapInterval& b) {
+ return a->GetModificationTick() < b->GetModificationTick();
+ });
+ for (auto& object : objects) {
+ if (object->IsModified() && object->IsRegistered()) {
+ FlushMap(object);
+ }
+ }
+ }
+
+ /// Mark the specified region as being invalidated
+ void InvalidateRegion(CacheAddr addr, u64 size) {
+ std::lock_guard lock{mutex};
+
+ std::vector<MapInterval> objects = GetMapsInRange(addr, size);
+ for (auto& object : objects) {
+ if (object->IsRegistered()) {
+ Unregister(object);
+ }
+ }
+ }
+
+ virtual const TBufferType* GetEmptyBuffer(std::size_t size) = 0;
+
+protected:
+ explicit BufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system,
+ std::unique_ptr<StreamBuffer> stream_buffer)
+ : rasterizer{rasterizer}, system{system}, stream_buffer{std::move(stream_buffer)},
+ stream_buffer_handle{this->stream_buffer->GetHandle()} {}
+
+ ~BufferCache() = default;
+
+ virtual const TBufferType* ToHandle(const TBuffer& storage) = 0;
+
+ virtual void WriteBarrier() = 0;
+
+ virtual TBuffer CreateBlock(CacheAddr cache_addr, std::size_t size) = 0;
+
+ virtual void UploadBlockData(const TBuffer& buffer, std::size_t offset, std::size_t size,
+ const u8* data) = 0;
+
+ virtual void DownloadBlockData(const TBuffer& buffer, std::size_t offset, std::size_t size,
+ u8* data) = 0;
+
+ virtual void CopyBlock(const TBuffer& src, const TBuffer& dst, std::size_t src_offset,
+ std::size_t dst_offset, std::size_t size) = 0;
+
+ /// Register an object into the cache
+ void Register(const MapInterval& new_map, bool inherit_written = false) {
+ const CacheAddr cache_ptr = new_map->GetStart();
+ const std::optional<VAddr> cpu_addr =
+ system.GPU().MemoryManager().GpuToCpuAddress(new_map->GetGpuAddress());
+ if (!cache_ptr || !cpu_addr) {
+ LOG_CRITICAL(HW_GPU, "Failed to register buffer with unmapped gpu_address 0x{:016x}",
+ new_map->GetGpuAddress());
+ return;
+ }
+ const std::size_t size = new_map->GetEnd() - new_map->GetStart();
+ new_map->SetCpuAddress(*cpu_addr);
+ new_map->MarkAsRegistered(true);
+ const IntervalType interval{new_map->GetStart(), new_map->GetEnd()};
+ mapped_addresses.insert({interval, new_map});
+ rasterizer.UpdatePagesCachedCount(*cpu_addr, size, 1);
+ if (inherit_written) {
+ MarkRegionAsWritten(new_map->GetStart(), new_map->GetEnd() - 1);
+ new_map->MarkAsWritten(true);
+ }
+ }
+
+ /// Unregisters an object from the cache
+ void Unregister(MapInterval& map) {
+ const std::size_t size = map->GetEnd() - map->GetStart();
+ rasterizer.UpdatePagesCachedCount(map->GetCpuAddress(), size, -1);
+ map->MarkAsRegistered(false);
+ if (map->IsWritten()) {
+ UnmarkRegionAsWritten(map->GetStart(), map->GetEnd() - 1);
+ }
+ const IntervalType delete_interval{map->GetStart(), map->GetEnd()};
+ mapped_addresses.erase(delete_interval);
+ }
+
+private:
+ MapInterval CreateMap(const CacheAddr start, const CacheAddr end, const GPUVAddr gpu_addr) {
+ return std::make_shared<MapIntervalBase>(start, end, gpu_addr);
+ }
+
+ MapInterval MapAddress(const TBuffer& block, const GPUVAddr gpu_addr,
+ const CacheAddr cache_addr, const std::size_t size) {
+
+ std::vector<MapInterval> overlaps = GetMapsInRange(cache_addr, size);
+ if (overlaps.empty()) {
+ const CacheAddr cache_addr_end = cache_addr + size;
+ MapInterval new_map = CreateMap(cache_addr, cache_addr_end, gpu_addr);
+ u8* host_ptr = FromCacheAddr(cache_addr);
+ UploadBlockData(block, block->GetOffset(cache_addr), size, host_ptr);
+ Register(new_map);
+ return new_map;
+ }
+
+ const CacheAddr cache_addr_end = cache_addr + size;
+ if (overlaps.size() == 1) {
+ MapInterval& current_map = overlaps[0];
+ if (current_map->IsInside(cache_addr, cache_addr_end)) {
+ return current_map;
+ }
+ }
+ CacheAddr new_start = cache_addr;
+ CacheAddr new_end = cache_addr_end;
+ bool write_inheritance = false;
+ bool modified_inheritance = false;
+ // Calculate new buffer parameters
+ for (auto& overlap : overlaps) {
+ new_start = std::min(overlap->GetStart(), new_start);
+ new_end = std::max(overlap->GetEnd(), new_end);
+ write_inheritance |= overlap->IsWritten();
+ modified_inheritance |= overlap->IsModified();
+ }
+ GPUVAddr new_gpu_addr = gpu_addr + new_start - cache_addr;
+ for (auto& overlap : overlaps) {
+ Unregister(overlap);
+ }
+ UpdateBlock(block, new_start, new_end, overlaps);
+ MapInterval new_map = CreateMap(new_start, new_end, new_gpu_addr);
+ if (modified_inheritance) {
+ new_map->MarkAsModified(true, GetModifiedTicks());
+ }
+ Register(new_map, write_inheritance);
+ return new_map;
+ }
+
+ void UpdateBlock(const TBuffer& block, CacheAddr start, CacheAddr end,
+ std::vector<MapInterval>& overlaps) {
+ const IntervalType base_interval{start, end};
+ IntervalSet interval_set{};
+ interval_set.add(base_interval);
+ for (auto& overlap : overlaps) {
+ const IntervalType subtract{overlap->GetStart(), overlap->GetEnd()};
+ interval_set.subtract(subtract);
+ }
+ for (auto& interval : interval_set) {
+ std::size_t size = interval.upper() - interval.lower();
+ if (size > 0) {
+ u8* host_ptr = FromCacheAddr(interval.lower());
+ UploadBlockData(block, block->GetOffset(interval.lower()), size, host_ptr);
+ }
+ }
+ }
+
+ std::vector<MapInterval> GetMapsInRange(CacheAddr addr, std::size_t size) {
+ if (size == 0) {
+ return {};
+ }
+
+ std::vector<MapInterval> objects{};
+ const IntervalType interval{addr, addr + size};
+ for (auto& pair : boost::make_iterator_range(mapped_addresses.equal_range(interval))) {
+ objects.push_back(pair.second);
+ }
+
+ return objects;
+ }
+
+ /// Returns a ticks counter used for tracking when cached objects were last modified
+ u64 GetModifiedTicks() {
+ return ++modified_ticks;
+ }
+
+ void FlushMap(MapInterval map) {
+ std::size_t size = map->GetEnd() - map->GetStart();
+ TBuffer block = blocks[map->GetStart() >> block_page_bits];
+ u8* host_ptr = FromCacheAddr(map->GetStart());
+ DownloadBlockData(block, block->GetOffset(map->GetStart()), size, host_ptr);
+ map->MarkAsModified(false, 0);
+ }
+
+ BufferInfo StreamBufferUpload(const void* raw_pointer, std::size_t size,
+ std::size_t alignment) {
+ AlignBuffer(alignment);
+ const std::size_t uploaded_offset = buffer_offset;
+ std::memcpy(buffer_ptr, raw_pointer, size);
+
+ buffer_ptr += size;
+ buffer_offset += size;
+ return {&stream_buffer_handle, uploaded_offset};
+ }
+
+ void AlignBuffer(std::size_t alignment) {
+ // Align the offset, not the mapped pointer
+ const std::size_t offset_aligned = Common::AlignUp(buffer_offset, alignment);
+ buffer_ptr += offset_aligned - buffer_offset;
+ buffer_offset = offset_aligned;
+ }
+
+ TBuffer EnlargeBlock(TBuffer buffer) {
+ const std::size_t old_size = buffer->GetSize();
+ const std::size_t new_size = old_size + block_page_size;
+ const CacheAddr cache_addr = buffer->GetCacheAddr();
+ TBuffer new_buffer = CreateBlock(cache_addr, new_size);
+ CopyBlock(buffer, new_buffer, 0, 0, old_size);
+ buffer->SetEpoch(epoch);
+ pending_destruction.push_back(buffer);
+ const CacheAddr cache_addr_end = cache_addr + new_size - 1;
+ u64 page_start = cache_addr >> block_page_bits;
+ const u64 page_end = cache_addr_end >> block_page_bits;
+ while (page_start <= page_end) {
+ blocks[page_start] = new_buffer;
+ ++page_start;
+ }
+ return new_buffer;
+ }
+
+ TBuffer MergeBlocks(TBuffer first, TBuffer second) {
+ const std::size_t size_1 = first->GetSize();
+ const std::size_t size_2 = second->GetSize();
+ const CacheAddr first_addr = first->GetCacheAddr();
+ const CacheAddr second_addr = second->GetCacheAddr();
+ const CacheAddr new_addr = std::min(first_addr, second_addr);
+ const std::size_t new_size = size_1 + size_2;
+ TBuffer new_buffer = CreateBlock(new_addr, new_size);
+ CopyBlock(first, new_buffer, 0, new_buffer->GetOffset(first_addr), size_1);
+ CopyBlock(second, new_buffer, 0, new_buffer->GetOffset(second_addr), size_2);
+ first->SetEpoch(epoch);
+ second->SetEpoch(epoch);
+ pending_destruction.push_back(first);
+ pending_destruction.push_back(second);
+ const CacheAddr cache_addr_end = new_addr + new_size - 1;
+ u64 page_start = new_addr >> block_page_bits;
+ const u64 page_end = cache_addr_end >> block_page_bits;
+ while (page_start <= page_end) {
+ blocks[page_start] = new_buffer;
+ ++page_start;
+ }
+ return new_buffer;
+ }
+
+ TBuffer GetBlock(const CacheAddr cache_addr, const std::size_t size) {
+ TBuffer found{};
+ const CacheAddr cache_addr_end = cache_addr + size - 1;
+ u64 page_start = cache_addr >> block_page_bits;
+ const u64 page_end = cache_addr_end >> block_page_bits;
+ while (page_start <= page_end) {
+ auto it = blocks.find(page_start);
+ if (it == blocks.end()) {
+ if (found) {
+ found = EnlargeBlock(found);
+ } else {
+ const CacheAddr start_addr = (page_start << block_page_bits);
+ found = CreateBlock(start_addr, block_page_size);
+ blocks[page_start] = found;
+ }
+ } else {
+ if (found) {
+ if (found == it->second) {
+ ++page_start;
+ continue;
+ }
+ found = MergeBlocks(found, it->second);
+ } else {
+ found = it->second;
+ }
+ }
+ ++page_start;
+ }
+ return found;
+ }
+
+ void MarkRegionAsWritten(const CacheAddr start, const CacheAddr end) {
+ u64 page_start = start >> write_page_bit;
+ const u64 page_end = end >> write_page_bit;
+ while (page_start <= page_end) {
+ auto it = written_pages.find(page_start);
+ if (it != written_pages.end()) {
+ it->second = it->second + 1;
+ } else {
+ written_pages[page_start] = 1;
+ }
+ page_start++;
+ }
+ }
+
+ void UnmarkRegionAsWritten(const CacheAddr start, const CacheAddr end) {
+ u64 page_start = start >> write_page_bit;
+ const u64 page_end = end >> write_page_bit;
+ while (page_start <= page_end) {
+ auto it = written_pages.find(page_start);
+ if (it != written_pages.end()) {
+ if (it->second > 1) {
+ it->second = it->second - 1;
+ } else {
+ written_pages.erase(it);
+ }
+ }
+ page_start++;
+ }
+ }
+
+ bool IsRegionWritten(const CacheAddr start, const CacheAddr end) const {
+ u64 page_start = start >> write_page_bit;
+ const u64 page_end = end >> write_page_bit;
+ while (page_start <= page_end) {
+ if (written_pages.count(page_start) > 0) {
+ return true;
+ }
+ page_start++;
+ }
+ return false;
+ }
+
+ VideoCore::RasterizerInterface& rasterizer;
+ Core::System& system;
+ std::unique_ptr<StreamBuffer> stream_buffer;
+
+ TBufferType stream_buffer_handle{};
+
+ bool invalidated = false;
+
+ u8* buffer_ptr = nullptr;
+ u64 buffer_offset = 0;
+ u64 buffer_offset_base = 0;
+
+ using IntervalSet = boost::icl::interval_set<CacheAddr>;
+ using IntervalCache = boost::icl::interval_map<CacheAddr, MapInterval>;
+ using IntervalType = typename IntervalCache::interval_type;
+ IntervalCache mapped_addresses{};
+
+ static constexpr u64 write_page_bit{11};
+ std::unordered_map<u64, u32> written_pages{};
+
+ static constexpr u64 block_page_bits{21};
+ static constexpr u64 block_page_size{1 << block_page_bits};
+ std::unordered_map<u64, TBuffer> blocks{};
+
+ std::list<TBuffer> pending_destruction{};
+ u64 epoch{};
+ u64 modified_ticks{};
+
+ std::recursive_mutex mutex;
+};
+
+} // namespace VideoCommon
diff --git a/src/video_core/buffer_cache/map_interval.h b/src/video_core/buffer_cache/map_interval.h
new file mode 100644
index 000000000..3a104d5cd
--- /dev/null
+++ b/src/video_core/buffer_cache/map_interval.h
@@ -0,0 +1,89 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include "common/common_types.h"
+#include "video_core/gpu.h"
+
+namespace VideoCommon {
+
+class MapIntervalBase {
+public:
+ MapIntervalBase(const CacheAddr start, const CacheAddr end, const GPUVAddr gpu_addr)
+ : start{start}, end{end}, gpu_addr{gpu_addr} {}
+
+ void SetCpuAddress(VAddr new_cpu_addr) {
+ cpu_addr = new_cpu_addr;
+ }
+
+ VAddr GetCpuAddress() const {
+ return cpu_addr;
+ }
+
+ GPUVAddr GetGpuAddress() const {
+ return gpu_addr;
+ }
+
+ bool IsInside(const CacheAddr other_start, const CacheAddr other_end) const {
+ return (start <= other_start && other_end <= end);
+ }
+
+ bool operator==(const MapIntervalBase& rhs) const {
+ return std::tie(start, end) == std::tie(rhs.start, rhs.end);
+ }
+
+ bool operator!=(const MapIntervalBase& rhs) const {
+ return !operator==(rhs);
+ }
+
+ void MarkAsRegistered(const bool registered) {
+ is_registered = registered;
+ }
+
+ bool IsRegistered() const {
+ return is_registered;
+ }
+
+ CacheAddr GetStart() const {
+ return start;
+ }
+
+ CacheAddr GetEnd() const {
+ return end;
+ }
+
+ void MarkAsModified(const bool is_modified_, const u64 tick) {
+ is_modified = is_modified_;
+ ticks = tick;
+ }
+
+ bool IsModified() const {
+ return is_modified;
+ }
+
+ u64 GetModificationTick() const {
+ return ticks;
+ }
+
+ void MarkAsWritten(const bool is_written_) {
+ is_written = is_written_;
+ }
+
+ bool IsWritten() const {
+ return is_written;
+ }
+
+private:
+ CacheAddr start;
+ CacheAddr end;
+ GPUVAddr gpu_addr;
+ VAddr cpu_addr{};
+ bool is_written{};
+ bool is_modified{};
+ bool is_registered{};
+ u64 ticks{};
+};
+
+} // namespace VideoCommon
diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp
index bd036cbe8..0094fd715 100644
--- a/src/video_core/dma_pusher.cpp
+++ b/src/video_core/dma_pusher.cpp
@@ -31,6 +31,7 @@ void DmaPusher::DispatchCalls() {
break;
}
}
+ gpu.FlushCommands();
}
bool DmaPusher::Step() {
diff --git a/src/video_core/engines/fermi_2d.cpp b/src/video_core/engines/fermi_2d.cpp
index 0ee228e28..98a8b5337 100644
--- a/src/video_core/engines/fermi_2d.cpp
+++ b/src/video_core/engines/fermi_2d.cpp
@@ -10,8 +10,7 @@
namespace Tegra::Engines {
-Fermi2D::Fermi2D(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager)
- : rasterizer{rasterizer}, memory_manager{memory_manager} {}
+Fermi2D::Fermi2D(VideoCore::RasterizerInterface& rasterizer) : rasterizer{rasterizer} {}
void Fermi2D::CallMethod(const GPU::MethodCall& method_call) {
ASSERT_MSG(method_call.method < Regs::NUM_REGS,
diff --git a/src/video_core/engines/fermi_2d.h b/src/video_core/engines/fermi_2d.h
index 05421d185..0901cf2fa 100644
--- a/src/video_core/engines/fermi_2d.h
+++ b/src/video_core/engines/fermi_2d.h
@@ -33,7 +33,7 @@ namespace Tegra::Engines {
class Fermi2D final {
public:
- explicit Fermi2D(VideoCore::RasterizerInterface& rasterizer, MemoryManager& memory_manager);
+ explicit Fermi2D(VideoCore::RasterizerInterface& rasterizer);
~Fermi2D() = default;
/// Write the value to the register identified by method.
@@ -145,7 +145,6 @@ public:
private:
VideoCore::RasterizerInterface& rasterizer;
- MemoryManager& memory_manager;
/// Performs the copy from the source surface to the destination surface as configured in the
/// registers.
diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp
index 44279de00..fa4a7c5c1 100644
--- a/src/video_core/engines/kepler_memory.cpp
+++ b/src/video_core/engines/kepler_memory.cpp
@@ -15,7 +15,7 @@
namespace Tegra::Engines {
KeplerMemory::KeplerMemory(Core::System& system, MemoryManager& memory_manager)
- : system{system}, memory_manager{memory_manager}, upload_state{memory_manager, regs.upload} {}
+ : system{system}, upload_state{memory_manager, regs.upload} {}
KeplerMemory::~KeplerMemory() = default;
diff --git a/src/video_core/engines/kepler_memory.h b/src/video_core/engines/kepler_memory.h
index f3bc675a9..e0e25c321 100644
--- a/src/video_core/engines/kepler_memory.h
+++ b/src/video_core/engines/kepler_memory.h
@@ -65,7 +65,6 @@ public:
private:
Core::System& system;
- MemoryManager& memory_manager;
Upload::State upload_state;
};
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index 74c46ec04..6a5a4f5c4 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -524,9 +524,10 @@ void Maxwell3D::ProcessQueryCondition() {
void Maxwell3D::ProcessSyncPoint() {
const u32 sync_point = regs.sync_info.sync_point.Value();
const u32 increment = regs.sync_info.increment.Value();
- const u32 cache_flush = regs.sync_info.unknown.Value();
- LOG_DEBUG(HW_GPU, "Syncpoint set {}, increment: {}, unk: {}", sync_point, increment,
- cache_flush);
+ [[maybe_unused]] const u32 cache_flush = regs.sync_info.unknown.Value();
+ if (increment) {
+ system.GPU().IncrementSyncPoint(sync_point);
+ }
}
void Maxwell3D::DrawArrays() {
@@ -625,10 +626,10 @@ Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const {
Texture::TICEntry tic_entry;
memory_manager.ReadBlockUnsafe(tic_address_gpu, &tic_entry, sizeof(Texture::TICEntry));
- const auto r_type{tic_entry.r_type.Value()};
- const auto g_type{tic_entry.g_type.Value()};
- const auto b_type{tic_entry.b_type.Value()};
- const auto a_type{tic_entry.a_type.Value()};
+ [[maybe_unused]] const auto r_type{tic_entry.r_type.Value()};
+ [[maybe_unused]] const auto g_type{tic_entry.g_type.Value()};
+ [[maybe_unused]] const auto b_type{tic_entry.b_type.Value()};
+ [[maybe_unused]] const auto a_type{tic_entry.a_type.Value()};
// TODO(Subv): Different data types for separate components are not supported
DEBUG_ASSERT(r_type == g_type && r_type == b_type && r_type == a_type);
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index a71d98e36..ad8453c5f 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -9,15 +9,13 @@
#include "video_core/engines/maxwell_3d.h"
#include "video_core/engines/maxwell_dma.h"
#include "video_core/memory_manager.h"
-#include "video_core/rasterizer_interface.h"
#include "video_core/renderer_base.h"
#include "video_core/textures/decoders.h"
namespace Tegra::Engines {
-MaxwellDMA::MaxwellDMA(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
- MemoryManager& memory_manager)
- : system{system}, rasterizer{rasterizer}, memory_manager{memory_manager} {}
+MaxwellDMA::MaxwellDMA(Core::System& system, MemoryManager& memory_manager)
+ : system{system}, memory_manager{memory_manager} {}
void MaxwellDMA::CallMethod(const GPU::MethodCall& method_call) {
ASSERT_MSG(method_call.method < Regs::NUM_REGS,
@@ -39,7 +37,7 @@ void MaxwellDMA::CallMethod(const GPU::MethodCall& method_call) {
}
void MaxwellDMA::HandleCopy() {
- LOG_WARNING(HW_GPU, "Requested a DMA copy");
+ LOG_TRACE(HW_GPU, "Requested a DMA copy");
const GPUVAddr source = regs.src_address.Address();
const GPUVAddr dest = regs.dst_address.Address();
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h
index 17b015ca7..93808a9bb 100644
--- a/src/video_core/engines/maxwell_dma.h
+++ b/src/video_core/engines/maxwell_dma.h
@@ -20,10 +20,6 @@ namespace Tegra {
class MemoryManager;
}
-namespace VideoCore {
-class RasterizerInterface;
-}
-
namespace Tegra::Engines {
/**
@@ -33,8 +29,7 @@ namespace Tegra::Engines {
class MaxwellDMA final {
public:
- explicit MaxwellDMA(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
- MemoryManager& memory_manager);
+ explicit MaxwellDMA(Core::System& system, MemoryManager& memory_manager);
~MaxwellDMA() = default;
/// Write the value to the register identified by method.
@@ -180,8 +175,6 @@ public:
private:
Core::System& system;
- VideoCore::RasterizerInterface& rasterizer;
-
MemoryManager& memory_manager;
std::vector<u8> read_buffer;
diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h
index 8520a0143..bc8c2a1c5 100644
--- a/src/video_core/engines/shader_bytecode.h
+++ b/src/video_core/engines/shader_bytecode.h
@@ -538,6 +538,12 @@ enum class PhysicalAttributeDirection : u64 {
Output = 1,
};
+enum class VoteOperation : u64 {
+ All = 0, // allThreadsNV
+ Any = 1, // anyThreadNV
+ Eq = 2, // allThreadsEqualNV
+};
+
union Instruction {
Instruction& operator=(const Instruction& instr) {
value = instr.value;
@@ -560,6 +566,18 @@ union Instruction {
BitField<48, 16, u64> opcode;
union {
+ BitField<8, 5, ConditionCode> cc;
+ BitField<13, 1, u64> trigger;
+ } nop;
+
+ union {
+ BitField<48, 2, VoteOperation> operation;
+ BitField<45, 3, u64> dest_pred;
+ BitField<39, 3, u64> value;
+ BitField<42, 1, u64> negate_value;
+ } vote;
+
+ union {
BitField<8, 8, Register> gpr;
BitField<20, 24, s64> offset;
} gmem;
@@ -1018,8 +1036,6 @@ union Instruction {
} f2i;
union {
- BitField<8, 2, Register::Size> src_size;
- BitField<10, 2, Register::Size> dst_size;
BitField<39, 4, u64> rounding;
// H0, H1 extract for F16 missing
BitField<41, 1, u64> selector; // Guessed as some games set it, TODO: reverse this value
@@ -1484,6 +1500,7 @@ public:
SYNC,
BRK,
DEPBAR,
+ VOTE,
BFE_C,
BFE_R,
BFE_IMM,
@@ -1516,6 +1533,7 @@ public:
TMML, // Texture Mip Map Level
SUST, // Surface Store
EXIT,
+ NOP,
IPA,
OUT_R, // Emit vertex/primitive
ISBERD,
@@ -1645,6 +1663,7 @@ public:
Hfma2,
Flow,
Synch,
+ Warp,
Memory,
Texture,
Image,
@@ -1771,6 +1790,7 @@ private:
INST("111000110100---", Id::BRK, Type::Flow, "BRK"),
INST("111000110000----", Id::EXIT, Type::Flow, "EXIT"),
INST("1111000011110---", Id::DEPBAR, Type::Synch, "DEPBAR"),
+ INST("0101000011011---", Id::VOTE, Type::Warp, "VOTE"),
INST("1110111111011---", Id::LD_A, Type::Memory, "LD_A"),
INST("1110111101001---", Id::LD_S, Type::Memory, "LD_S"),
INST("1110111101000---", Id::LD_L, Type::Memory, "LD_L"),
@@ -1795,6 +1815,7 @@ private:
INST("110111110110----", Id::TMML_B, Type::Texture, "TMML_B"),
INST("1101111101011---", Id::TMML, Type::Texture, "TMML"),
INST("11101011001-----", Id::SUST, Type::Image, "SUST"),
+ INST("0101000010110---", Id::NOP, Type::Trivial, "NOP"),
INST("11100000--------", Id::IPA, Type::Trivial, "IPA"),
INST("1111101111100---", Id::OUT_R, Type::Trivial, "OUT_R"),
INST("1110111111010---", Id::ISBERD, Type::Trivial, "ISBERD"),
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index e25754e37..8d9db45f5 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -29,14 +29,15 @@ u32 FramebufferConfig::BytesPerPixel(PixelFormat format) {
UNREACHABLE();
}
-GPU::GPU(Core::System& system, VideoCore::RendererBase& renderer) : renderer{renderer} {
+GPU::GPU(Core::System& system, VideoCore::RendererBase& renderer, bool is_async)
+ : system{system}, renderer{renderer}, is_async{is_async} {
auto& rasterizer{renderer.Rasterizer()};
memory_manager = std::make_unique<Tegra::MemoryManager>(system, rasterizer);
dma_pusher = std::make_unique<Tegra::DmaPusher>(*this);
maxwell_3d = std::make_unique<Engines::Maxwell3D>(system, rasterizer, *memory_manager);
- fermi_2d = std::make_unique<Engines::Fermi2D>(rasterizer, *memory_manager);
+ fermi_2d = std::make_unique<Engines::Fermi2D>(rasterizer);
kepler_compute = std::make_unique<Engines::KeplerCompute>(system, rasterizer, *memory_manager);
- maxwell_dma = std::make_unique<Engines::MaxwellDMA>(system, rasterizer, *memory_manager);
+ maxwell_dma = std::make_unique<Engines::MaxwellDMA>(system, *memory_manager);
kepler_memory = std::make_unique<Engines::KeplerMemory>(system, *memory_manager);
}
@@ -74,6 +75,55 @@ const DmaPusher& GPU::DmaPusher() const {
return *dma_pusher;
}
+void GPU::IncrementSyncPoint(const u32 syncpoint_id) {
+ syncpoints[syncpoint_id]++;
+ std::lock_guard lock{sync_mutex};
+ if (!syncpt_interrupts[syncpoint_id].empty()) {
+ u32 value = syncpoints[syncpoint_id].load();
+ auto it = syncpt_interrupts[syncpoint_id].begin();
+ while (it != syncpt_interrupts[syncpoint_id].end()) {
+ if (value >= *it) {
+ TriggerCpuInterrupt(syncpoint_id, *it);
+ it = syncpt_interrupts[syncpoint_id].erase(it);
+ continue;
+ }
+ it++;
+ }
+ }
+}
+
+u32 GPU::GetSyncpointValue(const u32 syncpoint_id) const {
+ return syncpoints[syncpoint_id].load();
+}
+
+void GPU::RegisterSyncptInterrupt(const u32 syncpoint_id, const u32 value) {
+ auto& interrupt = syncpt_interrupts[syncpoint_id];
+ bool contains = std::any_of(interrupt.begin(), interrupt.end(),
+ [value](u32 in_value) { return in_value == value; });
+ if (contains) {
+ return;
+ }
+ syncpt_interrupts[syncpoint_id].emplace_back(value);
+}
+
+bool GPU::CancelSyncptInterrupt(const u32 syncpoint_id, const u32 value) {
+ std::lock_guard lock{sync_mutex};
+ auto& interrupt = syncpt_interrupts[syncpoint_id];
+ const auto iter =
+ std::find_if(interrupt.begin(), interrupt.end(),
+ [value](u32 interrupt_value) { return value == interrupt_value; });
+
+ if (iter == interrupt.end()) {
+ return false;
+ }
+ interrupt.erase(iter);
+ return true;
+}
+
+void GPU::FlushCommands() {
+ renderer.Rasterizer().FlushCommands();
+}
+
u32 RenderTargetBytesPerPixel(RenderTargetFormat format) {
ASSERT(format != RenderTargetFormat::NONE);
@@ -151,12 +201,12 @@ enum class BufferMethods {
NotifyIntr = 0x8,
WrcacheFlush = 0x9,
Unk28 = 0xA,
- Unk2c = 0xB,
+ UnkCacheFlush = 0xB,
RefCnt = 0x14,
SemaphoreAcquire = 0x1A,
SemaphoreRelease = 0x1B,
- Unk70 = 0x1C,
- Unk74 = 0x1D,
+ FenceValue = 0x1C,
+ FenceAction = 0x1D,
Unk78 = 0x1E,
Unk7c = 0x1F,
Yield = 0x20,
@@ -202,6 +252,10 @@ void GPU::CallPullerMethod(const MethodCall& method_call) {
case BufferMethods::SemaphoreAddressLow:
case BufferMethods::SemaphoreSequence:
case BufferMethods::RefCnt:
+ case BufferMethods::UnkCacheFlush:
+ case BufferMethods::WrcacheFlush:
+ case BufferMethods::FenceValue:
+ case BufferMethods::FenceAction:
break;
case BufferMethods::SemaphoreTrigger: {
ProcessSemaphoreTriggerMethod();
@@ -212,21 +266,11 @@ void GPU::CallPullerMethod(const MethodCall& method_call) {
LOG_ERROR(HW_GPU, "Special puller engine method NotifyIntr not implemented");
break;
}
- case BufferMethods::WrcacheFlush: {
- // TODO(Kmather73): Research and implement this method.
- LOG_ERROR(HW_GPU, "Special puller engine method WrcacheFlush not implemented");
- break;
- }
case BufferMethods::Unk28: {
// TODO(Kmather73): Research and implement this method.
LOG_ERROR(HW_GPU, "Special puller engine method Unk28 not implemented");
break;
}
- case BufferMethods::Unk2c: {
- // TODO(Kmather73): Research and implement this method.
- LOG_ERROR(HW_GPU, "Special puller engine method Unk2c not implemented");
- break;
- }
case BufferMethods::SemaphoreAcquire: {
ProcessSemaphoreAcquire();
break;
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index 0ace0ff4f..544340ecd 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -5,8 +5,12 @@
#pragma once
#include <array>
+#include <atomic>
+#include <list>
#include <memory>
+#include <mutex>
#include "common/common_types.h"
+#include "core/hle/service/nvdrv/nvdata.h"
#include "core/hle/service/nvflinger/buffer_queue.h"
#include "video_core/dma_pusher.h"
@@ -15,6 +19,10 @@ inline CacheAddr ToCacheAddr(const void* host_ptr) {
return reinterpret_cast<CacheAddr>(host_ptr);
}
+inline u8* FromCacheAddr(CacheAddr cache_addr) {
+ return reinterpret_cast<u8*>(cache_addr);
+}
+
namespace Core {
class System;
}
@@ -127,7 +135,7 @@ class MemoryManager;
class GPU {
public:
- explicit GPU(Core::System& system, VideoCore::RendererBase& renderer);
+ explicit GPU(Core::System& system, VideoCore::RendererBase& renderer, bool is_async);
virtual ~GPU();
@@ -149,6 +157,8 @@ public:
/// Calls a GPU method.
void CallMethod(const MethodCall& method_call);
+ void FlushCommands();
+
/// Returns a reference to the Maxwell3D GPU engine.
Engines::Maxwell3D& Maxwell3D();
@@ -170,6 +180,22 @@ public:
/// Returns a reference to the GPU DMA pusher.
Tegra::DmaPusher& DmaPusher();
+ void IncrementSyncPoint(u32 syncpoint_id);
+
+ u32 GetSyncpointValue(u32 syncpoint_id) const;
+
+ void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value);
+
+ bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value);
+
+ std::unique_lock<std::mutex> LockSync() {
+ return std::unique_lock{sync_mutex};
+ }
+
+ bool IsAsync() const {
+ return is_async;
+ }
+
/// Returns a const reference to the GPU DMA pusher.
const Tegra::DmaPusher& DmaPusher() const;
@@ -200,7 +226,12 @@ public:
u32 semaphore_acquire;
u32 semaphore_release;
- INSERT_PADDING_WORDS(0xE4);
+ u32 fence_value;
+ union {
+ BitField<4, 4, u32> operation;
+ BitField<8, 8, u32> id;
+ } fence_action;
+ INSERT_PADDING_WORDS(0xE2);
// Puller state
u32 acquire_mode;
@@ -234,6 +265,9 @@ public:
/// Notify rasterizer that any caches of the specified region should be flushed and invalidated
virtual void FlushAndInvalidateRegion(CacheAddr addr, u64 size) = 0;
+protected:
+ virtual void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const = 0;
+
private:
void ProcessBindMethod(const MethodCall& method_call);
void ProcessSemaphoreTriggerMethod();
@@ -251,6 +285,7 @@ private:
protected:
std::unique_ptr<Tegra::DmaPusher> dma_pusher;
+ Core::System& system;
VideoCore::RendererBase& renderer;
private:
@@ -268,6 +303,14 @@ private:
std::unique_ptr<Engines::MaxwellDMA> maxwell_dma;
/// Inline memory engine
std::unique_ptr<Engines::KeplerMemory> kepler_memory;
+
+ std::array<std::atomic<u32>, Service::Nvidia::MaxSyncPoints> syncpoints{};
+
+ std::array<std::list<u32>, Service::Nvidia::MaxSyncPoints> syncpt_interrupts;
+
+ std::mutex sync_mutex;
+
+ const bool is_async;
};
#define ASSERT_REG_POSITION(field_name, position) \
@@ -280,6 +323,8 @@ ASSERT_REG_POSITION(semaphore_trigger, 0x7);
ASSERT_REG_POSITION(reference_count, 0x14);
ASSERT_REG_POSITION(semaphore_acquire, 0x1A);
ASSERT_REG_POSITION(semaphore_release, 0x1B);
+ASSERT_REG_POSITION(fence_value, 0x1C);
+ASSERT_REG_POSITION(fence_action, 0x1D);
ASSERT_REG_POSITION(acquire_mode, 0x100);
ASSERT_REG_POSITION(acquire_source, 0x101);
diff --git a/src/video_core/gpu_asynch.cpp b/src/video_core/gpu_asynch.cpp
index d4e2553a9..ea67be831 100644
--- a/src/video_core/gpu_asynch.cpp
+++ b/src/video_core/gpu_asynch.cpp
@@ -2,6 +2,8 @@
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
+#include "core/core.h"
+#include "core/hardware_interrupt_manager.h"
#include "video_core/gpu_asynch.h"
#include "video_core/gpu_thread.h"
#include "video_core/renderer_base.h"
@@ -9,7 +11,7 @@
namespace VideoCommon {
GPUAsynch::GPUAsynch(Core::System& system, VideoCore::RendererBase& renderer)
- : GPU(system, renderer), gpu_thread{system} {}
+ : GPU(system, renderer, true), gpu_thread{system} {}
GPUAsynch::~GPUAsynch() = default;
@@ -38,4 +40,9 @@ void GPUAsynch::FlushAndInvalidateRegion(CacheAddr addr, u64 size) {
gpu_thread.FlushAndInvalidateRegion(addr, size);
}
+void GPUAsynch::TriggerCpuInterrupt(const u32 syncpoint_id, const u32 value) const {
+ auto& interrupt_manager = system.InterruptManager();
+ interrupt_manager.GPUInterruptSyncpt(syncpoint_id, value);
+}
+
} // namespace VideoCommon
diff --git a/src/video_core/gpu_asynch.h b/src/video_core/gpu_asynch.h
index 30be74cba..36377d677 100644
--- a/src/video_core/gpu_asynch.h
+++ b/src/video_core/gpu_asynch.h
@@ -27,6 +27,9 @@ public:
void InvalidateRegion(CacheAddr addr, u64 size) override;
void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override;
+protected:
+ void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const override;
+
private:
GPUThread::ThreadManager gpu_thread;
};
diff --git a/src/video_core/gpu_synch.cpp b/src/video_core/gpu_synch.cpp
index 45e43b1dc..d4ead9c47 100644
--- a/src/video_core/gpu_synch.cpp
+++ b/src/video_core/gpu_synch.cpp
@@ -8,7 +8,7 @@
namespace VideoCommon {
GPUSynch::GPUSynch(Core::System& system, VideoCore::RendererBase& renderer)
- : GPU(system, renderer) {}
+ : GPU(system, renderer, false) {}
GPUSynch::~GPUSynch() = default;
diff --git a/src/video_core/gpu_synch.h b/src/video_core/gpu_synch.h
index 3031fcf72..07bcc47f1 100644
--- a/src/video_core/gpu_synch.h
+++ b/src/video_core/gpu_synch.h
@@ -25,6 +25,10 @@ public:
void FlushRegion(CacheAddr addr, u64 size) override;
void InvalidateRegion(CacheAddr addr, u64 size) override;
void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override;
+
+protected:
+ void TriggerCpuInterrupt([[maybe_unused]] u32 syncpoint_id,
+ [[maybe_unused]] u32 value) const override {}
};
} // namespace VideoCommon
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp
index 3f0939ec9..b441e92b0 100644
--- a/src/video_core/gpu_thread.cpp
+++ b/src/video_core/gpu_thread.cpp
@@ -21,7 +21,8 @@ static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_p
MicroProfileOnThreadCreate("GpuThread");
// Wait for first GPU command before acquiring the window context
- state.WaitForCommands();
+ while (state.queue.Empty())
+ ;
// If emulation was stopped during disk shader loading, abort before trying to acquire context
if (!state.is_running) {
@@ -32,7 +33,6 @@ static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_p
CommandDataContainer next;
while (state.is_running) {
- state.WaitForCommands();
while (!state.queue.Empty()) {
state.queue.Pop(next);
if (const auto submit_list = std::get_if<SubmitListCommand>(&next.data)) {
@@ -49,8 +49,7 @@ static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_p
} else {
UNREACHABLE();
}
- state.signaled_fence = next.fence;
- state.TrySynchronize();
+ state.signaled_fence.store(next.fence);
}
}
}
@@ -89,12 +88,7 @@ void ThreadManager::FlushRegion(CacheAddr addr, u64 size) {
}
void ThreadManager::InvalidateRegion(CacheAddr addr, u64 size) {
- if (state.queue.Empty()) {
- // It's quicker to invalidate a single region on the CPU if the queue is already empty
- system.Renderer().Rasterizer().InvalidateRegion(addr, size);
- } else {
- PushCommand(InvalidateRegionCommand(addr, size));
- }
+ system.Renderer().Rasterizer().InvalidateRegion(addr, size);
}
void ThreadManager::FlushAndInvalidateRegion(CacheAddr addr, u64 size) {
@@ -105,22 +99,13 @@ void ThreadManager::FlushAndInvalidateRegion(CacheAddr addr, u64 size) {
u64 ThreadManager::PushCommand(CommandData&& command_data) {
const u64 fence{++state.last_fence};
state.queue.Push(CommandDataContainer(std::move(command_data), fence));
- state.SignalCommands();
return fence;
}
MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
void SynchState::WaitForSynchronization(u64 fence) {
- if (signaled_fence >= fence) {
- return;
- }
-
- // Wait for the GPU to be idle (all commands to be executed)
- {
- MICROPROFILE_SCOPE(GPU_wait);
- std::unique_lock lock{synchronization_mutex};
- synchronization_condition.wait(lock, [this, fence] { return signaled_fence >= fence; });
- }
+ while (signaled_fence.load() < fence)
+ ;
}
} // namespace VideoCommon::GPUThread
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h
index 05a168a72..1d9d0c39e 100644
--- a/src/video_core/gpu_thread.h
+++ b/src/video_core/gpu_thread.h
@@ -88,41 +88,9 @@ struct CommandDataContainer {
/// Struct used to synchronize the GPU thread
struct SynchState final {
std::atomic_bool is_running{true};
- std::atomic_int queued_frame_count{};
- std::mutex synchronization_mutex;
- std::mutex commands_mutex;
- std::condition_variable commands_condition;
- std::condition_variable synchronization_condition;
-
- /// Returns true if the gap in GPU commands is small enough that we can consider the CPU and GPU
- /// synchronized. This is entirely empirical.
- bool IsSynchronized() const {
- constexpr std::size_t max_queue_gap{5};
- return queue.Size() <= max_queue_gap;
- }
-
- void TrySynchronize() {
- if (IsSynchronized()) {
- std::lock_guard lock{synchronization_mutex};
- synchronization_condition.notify_one();
- }
- }
void WaitForSynchronization(u64 fence);
- void SignalCommands() {
- if (queue.Empty()) {
- return;
- }
-
- commands_condition.notify_one();
- }
-
- void WaitForCommands() {
- std::unique_lock lock{commands_mutex};
- commands_condition.wait(lock, [this] { return !queue.Empty(); });
- }
-
using CommandQueue = Common::SPSCQueue<CommandDataContainer>;
CommandQueue queue;
u64 last_fence{};
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h
index 9881df0d5..6b3f2d50a 100644
--- a/src/video_core/rasterizer_interface.h
+++ b/src/video_core/rasterizer_interface.h
@@ -50,6 +50,9 @@ public:
/// and invalidated
virtual void FlushAndInvalidateRegion(CacheAddr addr, u64 size) = 0;
+ /// Notify the rasterizer to send all written commands to the host GPU.
+ virtual void FlushCommands() = 0;
+
/// Notify rasterizer that a frame is about to finish
virtual void TickFrame() = 0;
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
index 2a9b523f5..f8a807c84 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
@@ -7,28 +7,41 @@
#include <glad/glad.h>
#include "common/assert.h"
+#include "common/microprofile.h"
+#include "video_core/rasterizer_interface.h"
#include "video_core/renderer_opengl/gl_buffer_cache.h"
#include "video_core/renderer_opengl/gl_rasterizer.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
namespace OpenGL {
+MICROPROFILE_DEFINE(OpenGL_Buffer_Download, "OpenGL", "Buffer Download", MP_RGB(192, 192, 128));
+
+CachedBufferBlock::CachedBufferBlock(CacheAddr cache_addr, const std::size_t size)
+ : VideoCommon::BufferBlock{cache_addr, size} {
+ gl_buffer.Create();
+ glNamedBufferData(gl_buffer.handle, static_cast<GLsizeiptr>(size), nullptr, GL_DYNAMIC_DRAW);
+}
+
+CachedBufferBlock::~CachedBufferBlock() = default;
+
OGLBufferCache::OGLBufferCache(RasterizerOpenGL& rasterizer, Core::System& system,
std::size_t stream_size)
- : VideoCommon::BufferCache<OGLBuffer, GLuint, OGLStreamBuffer>{
+ : VideoCommon::BufferCache<Buffer, GLuint, OGLStreamBuffer>{
rasterizer, system, std::make_unique<OGLStreamBuffer>(stream_size, true)} {}
OGLBufferCache::~OGLBufferCache() = default;
-OGLBuffer OGLBufferCache::CreateBuffer(std::size_t size) {
- OGLBuffer buffer;
- buffer.Create();
- glNamedBufferData(buffer.handle, static_cast<GLsizeiptr>(size), nullptr, GL_DYNAMIC_DRAW);
- return buffer;
+Buffer OGLBufferCache::CreateBlock(CacheAddr cache_addr, std::size_t size) {
+ return std::make_shared<CachedBufferBlock>(cache_addr, size);
+}
+
+void OGLBufferCache::WriteBarrier() {
+ glMemoryBarrier(GL_ALL_BARRIER_BITS);
}
-const GLuint* OGLBufferCache::ToHandle(const OGLBuffer& buffer) {
- return &buffer.handle;
+const GLuint* OGLBufferCache::ToHandle(const Buffer& buffer) {
+ return buffer->GetHandle();
}
const GLuint* OGLBufferCache::GetEmptyBuffer(std::size_t) {
@@ -36,23 +49,24 @@ const GLuint* OGLBufferCache::GetEmptyBuffer(std::size_t) {
return &null_buffer;
}
-void OGLBufferCache::UploadBufferData(const OGLBuffer& buffer, std::size_t offset, std::size_t size,
- const u8* data) {
- glNamedBufferSubData(buffer.handle, static_cast<GLintptr>(offset),
+void OGLBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
+ const u8* data) {
+ glNamedBufferSubData(*buffer->GetHandle(), static_cast<GLintptr>(offset),
static_cast<GLsizeiptr>(size), data);
}
-void OGLBufferCache::DownloadBufferData(const OGLBuffer& buffer, std::size_t offset,
- std::size_t size, u8* data) {
- glGetNamedBufferSubData(buffer.handle, static_cast<GLintptr>(offset),
+void OGLBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
+ u8* data) {
+ MICROPROFILE_SCOPE(OpenGL_Buffer_Download);
+ glGetNamedBufferSubData(*buffer->GetHandle(), static_cast<GLintptr>(offset),
static_cast<GLsizeiptr>(size), data);
}
-void OGLBufferCache::CopyBufferData(const OGLBuffer& src, const OGLBuffer& dst,
- std::size_t src_offset, std::size_t dst_offset,
- std::size_t size) {
- glCopyNamedBufferSubData(src.handle, dst.handle, static_cast<GLintptr>(src_offset),
- static_cast<GLintptr>(dst_offset), static_cast<GLsizeiptr>(size));
+void OGLBufferCache::CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset,
+ std::size_t dst_offset, std::size_t size) {
+ glCopyNamedBufferSubData(*src->GetHandle(), *dst->GetHandle(),
+ static_cast<GLintptr>(src_offset), static_cast<GLintptr>(dst_offset),
+ static_cast<GLsizeiptr>(size));
}
} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h
index 8c8ac4038..022e7bfa9 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.h
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.h
@@ -7,7 +7,7 @@
#include <memory>
#include "common/common_types.h"
-#include "video_core/buffer_cache.h"
+#include "video_core/buffer_cache/buffer_cache.h"
#include "video_core/rasterizer_cache.h"
#include "video_core/renderer_opengl/gl_resource_manager.h"
#include "video_core/renderer_opengl/gl_stream_buffer.h"
@@ -21,7 +21,24 @@ namespace OpenGL {
class OGLStreamBuffer;
class RasterizerOpenGL;
-class OGLBufferCache final : public VideoCommon::BufferCache<OGLBuffer, GLuint, OGLStreamBuffer> {
+class CachedBufferBlock;
+
+using Buffer = std::shared_ptr<CachedBufferBlock>;
+
+class CachedBufferBlock : public VideoCommon::BufferBlock {
+public:
+ explicit CachedBufferBlock(CacheAddr cache_addr, const std::size_t size);
+ ~CachedBufferBlock();
+
+ const GLuint* GetHandle() const {
+ return &gl_buffer.handle;
+ }
+
+private:
+ OGLBuffer gl_buffer{};
+};
+
+class OGLBufferCache final : public VideoCommon::BufferCache<Buffer, GLuint, OGLStreamBuffer> {
public:
explicit OGLBufferCache(RasterizerOpenGL& rasterizer, Core::System& system,
std::size_t stream_size);
@@ -30,18 +47,20 @@ public:
const GLuint* GetEmptyBuffer(std::size_t) override;
protected:
- OGLBuffer CreateBuffer(std::size_t size) override;
+ Buffer CreateBlock(CacheAddr cache_addr, std::size_t size) override;
+
+ void WriteBarrier() override;
- const GLuint* ToHandle(const OGLBuffer& buffer) override;
+ const GLuint* ToHandle(const Buffer& buffer) override;
- void UploadBufferData(const OGLBuffer& buffer, std::size_t offset, std::size_t size,
- const u8* data) override;
+ void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
+ const u8* data) override;
- void DownloadBufferData(const OGLBuffer& buffer, std::size_t offset, std::size_t size,
- u8* data) override;
+ void DownloadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
+ u8* data) override;
- void CopyBufferData(const OGLBuffer& src, const OGLBuffer& dst, std::size_t src_offset,
- std::size_t dst_offset, std::size_t size) override;
+ void CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset,
+ std::size_t dst_offset, std::size_t size) override;
};
} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp
index 85424a4c9..03d434b28 100644
--- a/src/video_core/renderer_opengl/gl_device.cpp
+++ b/src/video_core/renderer_opengl/gl_device.cpp
@@ -27,6 +27,8 @@ Device::Device() {
shader_storage_alignment = GetInteger<std::size_t>(GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT);
max_vertex_attributes = GetInteger<u32>(GL_MAX_VERTEX_ATTRIBS);
max_varyings = GetInteger<u32>(GL_MAX_VARYING_VECTORS);
+ has_warp_intrinsics = GLAD_GL_NV_gpu_shader5 && GLAD_GL_NV_shader_thread_group &&
+ GLAD_GL_NV_shader_thread_shuffle;
has_vertex_viewport_layer = GLAD_GL_ARB_shader_viewport_layer_array;
has_variable_aoffi = TestVariableAoffi();
has_component_indexing_bug = TestComponentIndexingBug();
@@ -36,6 +38,7 @@ Device::Device(std::nullptr_t) {
uniform_buffer_alignment = 0;
max_vertex_attributes = 16;
max_varyings = 15;
+ has_warp_intrinsics = true;
has_vertex_viewport_layer = true;
has_variable_aoffi = true;
has_component_indexing_bug = false;
diff --git a/src/video_core/renderer_opengl/gl_device.h b/src/video_core/renderer_opengl/gl_device.h
index dc883722d..3ef7c6dd8 100644
--- a/src/video_core/renderer_opengl/gl_device.h
+++ b/src/video_core/renderer_opengl/gl_device.h
@@ -30,6 +30,10 @@ public:
return max_varyings;
}
+ bool HasWarpIntrinsics() const {
+ return has_warp_intrinsics;
+ }
+
bool HasVertexViewportLayer() const {
return has_vertex_viewport_layer;
}
@@ -50,6 +54,7 @@ private:
std::size_t shader_storage_alignment{};
u32 max_vertex_attributes{};
u32 max_varyings{};
+ bool has_warp_intrinsics{};
bool has_vertex_viewport_layer{};
bool has_variable_aoffi{};
bool has_component_indexing_bug{};
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index c28ae795c..bb09ecd52 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -708,8 +708,6 @@ void RasterizerOpenGL::DrawArrays() {
return;
}
- const auto& regs = gpu.regs;
-
SyncColorMask();
SyncFragmentColorClampState();
SyncMultiSampleState();
@@ -863,6 +861,10 @@ void RasterizerOpenGL::FlushAndInvalidateRegion(CacheAddr addr, u64 size) {
InvalidateRegion(addr, size);
}
+void RasterizerOpenGL::FlushCommands() {
+ glFlush();
+}
+
void RasterizerOpenGL::TickFrame() {
buffer_cache.TickFrame();
}
@@ -976,7 +978,7 @@ void RasterizerOpenGL::SetupGlobalMemory(const GLShader::GlobalMemoryEntry& entr
GPUVAddr gpu_addr, std::size_t size) {
const auto alignment{device.GetShaderStorageBufferAlignment()};
const auto [ssbo, buffer_offset] =
- buffer_cache.UploadMemory(gpu_addr, size, alignment, true, entry.IsWritten());
+ buffer_cache.UploadMemory(gpu_addr, size, alignment, entry.IsWritten());
bind_ssbo_pushbuffer.Push(ssbo, buffer_offset, static_cast<GLsizeiptr>(size));
}
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index 8b123c48d..9d20a4fbf 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -63,6 +63,7 @@ public:
void FlushRegion(CacheAddr addr, u64 size) override;
void InvalidateRegion(CacheAddr addr, u64 size) override;
void FlushAndInvalidateRegion(CacheAddr addr, u64 size) override;
+ void FlushCommands() override;
void TickFrame() override;
bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
const Tegra::Engines::Fermi2D::Regs::Surface& dst,
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 1c90facc3..cf6a5cddf 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -212,7 +212,9 @@ CachedProgram SpecializeShader(const std::string& code, const GLShader::ShaderEn
const auto texture_buffer_usage{variant.texture_buffer_usage};
std::string source = "#version 430 core\n"
- "#extension GL_ARB_separate_shader_objects : enable\n";
+ "#extension GL_ARB_separate_shader_objects : enable\n"
+ "#extension GL_NV_gpu_shader5 : enable\n"
+ "#extension GL_NV_shader_thread_group : enable\n";
if (entries.shader_viewport_layer_array) {
source += "#extension GL_ARB_shader_viewport_layer_array : enable\n";
}
@@ -247,20 +249,24 @@ CachedProgram SpecializeShader(const std::string& code, const GLShader::ShaderEn
if (!texture_buffer_usage.test(i)) {
continue;
}
- source += fmt::format("#define SAMPLER_{}_IS_BUFFER", i);
+ source += fmt::format("#define SAMPLER_{}_IS_BUFFER\n", i);
+ }
+ if (texture_buffer_usage.any()) {
+ source += '\n';
}
if (program_type == ProgramType::Geometry) {
const auto [glsl_topology, debug_name, max_vertices] =
GetPrimitiveDescription(primitive_mode);
- source += "layout (" + std::string(glsl_topology) + ") in;\n";
+ source += "layout (" + std::string(glsl_topology) + ") in;\n\n";
source += "#define MAX_VERTEX_INPUT " + std::to_string(max_vertices) + '\n';
}
if (program_type == ProgramType::Compute) {
source += "layout (local_size_variable) in;\n";
}
+ source += '\n';
source += code;
OGLShader shader;
@@ -289,7 +295,7 @@ std::set<GLenum> GetSupportedFormats() {
CachedShader::CachedShader(const ShaderParameters& params, ProgramType program_type,
GLShader::ProgramResult result)
- : RasterizerCacheObject{params.host_ptr}, host_ptr{params.host_ptr}, cpu_addr{params.cpu_addr},
+ : RasterizerCacheObject{params.host_ptr}, cpu_addr{params.cpu_addr},
unique_identifier{params.unique_identifier}, program_type{program_type},
disk_cache{params.disk_cache}, precompiled_programs{params.precompiled_programs},
entries{result.second}, code{std::move(result.first)}, shader_length{entries.shader_length} {}
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h
index a3106a0ff..2c8faf855 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.h
+++ b/src/video_core/renderer_opengl/gl_shader_cache.h
@@ -106,7 +106,6 @@ private:
ShaderDiskCacheUsage GetUsage(const ProgramVariant& variant) const;
- u8* host_ptr{};
VAddr cpu_addr{};
u64 unique_identifier{};
ProgramType program_type{};
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
index ffe26b241..359d58cbe 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
@@ -565,7 +565,7 @@ private:
case Tegra::Shader::ImageType::Texture1D:
return "image1D";
case Tegra::Shader::ImageType::TextureBuffer:
- return "bufferImage";
+ return "imageBuffer";
case Tegra::Shader::ImageType::Texture1DArray:
return "image1DArray";
case Tegra::Shader::ImageType::Texture2D:
@@ -1136,6 +1136,16 @@ private:
Type::Float);
}
+ std::string FCastHalf0(Operation operation) {
+ const std::string op_a = VisitOperand(operation, 0, Type::HalfFloat);
+ return fmt::format("({})[0]", op_a);
+ }
+
+ std::string FCastHalf1(Operation operation) {
+ const std::string op_a = VisitOperand(operation, 0, Type::HalfFloat);
+ return fmt::format("({})[1]", op_a);
+ }
+
template <Type type>
std::string Min(Operation operation) {
return GenerateBinaryCall(operation, "min", type, type, type);
@@ -1292,6 +1302,11 @@ private:
return ApplyPrecise(operation, BitwiseCastResult(clamped, Type::HalfFloat));
}
+ std::string HCastFloat(Operation operation) {
+ const std::string op_a = VisitOperand(operation, 0, Type::Float);
+ return fmt::format("fromHalf2(vec2({}, 0.0f))", op_a);
+ }
+
std::string HUnpack(Operation operation) {
const std::string operand{VisitOperand(operation, 0, Type::HalfFloat)};
const auto value = [&]() -> std::string {
@@ -1720,6 +1735,48 @@ private:
return "utof(gl_WorkGroupID"s + GetSwizzle(element) + ')';
}
+ std::string BallotThread(Operation operation) {
+ const std::string value = VisitOperand(operation, 0, Type::Bool);
+ if (!device.HasWarpIntrinsics()) {
+ LOG_ERROR(Render_OpenGL,
+ "Nvidia warp intrinsics are not available and its required by a shader");
+ // Stub on non-Nvidia devices by simulating all threads voting the same as the active
+ // one.
+ return fmt::format("utof({} ? 0xFFFFFFFFU : 0U)", value);
+ }
+ return fmt::format("utof(ballotThreadNV({}))", value);
+ }
+
+ std::string Vote(Operation operation, const char* func) {
+ const std::string value = VisitOperand(operation, 0, Type::Bool);
+ if (!device.HasWarpIntrinsics()) {
+ LOG_ERROR(Render_OpenGL,
+ "Nvidia vote intrinsics are not available and its required by a shader");
+ // Stub with a warp size of one.
+ return value;
+ }
+ return fmt::format("{}({})", func, value);
+ }
+
+ std::string VoteAll(Operation operation) {
+ return Vote(operation, "allThreadsNV");
+ }
+
+ std::string VoteAny(Operation operation) {
+ return Vote(operation, "anyThreadNV");
+ }
+
+ std::string VoteEqual(Operation operation) {
+ if (!device.HasWarpIntrinsics()) {
+ LOG_ERROR(Render_OpenGL,
+ "Nvidia vote intrinsics are not available and its required by a shader");
+ // We must return true here since a stub for a theoretical warp size of 1 will always
+ // return an equal result for all its votes.
+ return "true";
+ }
+ return Vote(operation, "allThreadsEqualNV");
+ }
+
static constexpr std::array operation_decompilers = {
&GLSLDecompiler::Assign,
@@ -1732,6 +1789,8 @@ private:
&GLSLDecompiler::Negate<Type::Float>,
&GLSLDecompiler::Absolute<Type::Float>,
&GLSLDecompiler::FClamp,
+ &GLSLDecompiler::FCastHalf0,
+ &GLSLDecompiler::FCastHalf1,
&GLSLDecompiler::Min<Type::Float>,
&GLSLDecompiler::Max<Type::Float>,
&GLSLDecompiler::FCos,
@@ -1792,6 +1851,7 @@ private:
&GLSLDecompiler::Absolute<Type::HalfFloat>,
&GLSLDecompiler::HNegate,
&GLSLDecompiler::HClamp,
+ &GLSLDecompiler::HCastFloat,
&GLSLDecompiler::HUnpack,
&GLSLDecompiler::HMergeF32,
&GLSLDecompiler::HMergeH0,
@@ -1867,6 +1927,11 @@ private:
&GLSLDecompiler::WorkGroupId<0>,
&GLSLDecompiler::WorkGroupId<1>,
&GLSLDecompiler::WorkGroupId<2>,
+
+ &GLSLDecompiler::BallotThread,
+ &GLSLDecompiler::VoteAll,
+ &GLSLDecompiler::VoteAny,
+ &GLSLDecompiler::VoteEqual,
};
static_assert(operation_decompilers.size() == static_cast<std::size_t>(OperationCode::Amount));
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.cpp b/src/video_core/renderer_opengl/gl_texture_cache.cpp
index 8fcd39a69..4f135fe03 100644
--- a/src/video_core/renderer_opengl/gl_texture_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_texture_cache.cpp
@@ -137,7 +137,6 @@ constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> tex_format
const FormatTuple& GetFormatTuple(PixelFormat pixel_format, ComponentType component_type) {
ASSERT(static_cast<std::size_t>(pixel_format) < tex_format_tuples.size());
const auto& format{tex_format_tuples[static_cast<std::size_t>(pixel_format)]};
- ASSERT(component_type == format.component_type);
return format;
}
@@ -185,6 +184,9 @@ GLint GetSwizzleSource(SwizzleSource source) {
}
void ApplyTextureDefaults(const SurfaceParams& params, GLuint texture) {
+ if (params.IsBuffer()) {
+ return;
+ }
glTextureParameteri(texture, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTextureParameteri(texture, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTextureParameteri(texture, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
@@ -209,6 +211,7 @@ OGLTexture CreateTexture(const SurfaceParams& params, GLenum target, GLenum inte
glNamedBufferStorage(texture_buffer.handle, params.width * params.GetBytesPerPixel(),
nullptr, GL_DYNAMIC_STORAGE_BIT);
glTextureBuffer(texture.handle, internal_format, texture_buffer.handle);
+ break;
case SurfaceTarget::Texture2D:
case SurfaceTarget::TextureCubemap:
glTextureStorage2D(texture.handle, params.emulated_levels, internal_format, params.width,
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.h b/src/video_core/renderer_opengl/gl_texture_cache.h
index ff6ab6988..21324488a 100644
--- a/src/video_core/renderer_opengl/gl_texture_cache.h
+++ b/src/video_core/renderer_opengl/gl_texture_cache.h
@@ -51,7 +51,7 @@ public:
}
protected:
- void DecorateSurfaceName();
+ void DecorateSurfaceName() override;
View CreateView(const ViewParams& view_key) override;
View CreateViewInner(const ViewParams& view_key, bool is_proxy);
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
index d267712c9..a35b45c9c 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
@@ -735,6 +735,16 @@ private:
return {};
}
+ Id FCastHalf0(Operation operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id FCastHalf1(Operation operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
Id HNegate(Operation operation) {
UNIMPLEMENTED();
return {};
@@ -745,6 +755,11 @@ private:
return {};
}
+ Id HCastFloat(Operation operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
Id HUnpack(Operation operation) {
UNIMPLEMENTED();
return {};
@@ -1057,6 +1072,26 @@ private:
return {};
}
+ Id BallotThread(Operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id VoteAll(Operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id VoteAny(Operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
+ Id VoteEqual(Operation) {
+ UNIMPLEMENTED();
+ return {};
+ }
+
Id DeclareBuiltIn(spv::BuiltIn builtin, spv::StorageClass storage, Id type,
const std::string& name) {
const Id id = OpVariable(type, storage);
@@ -1210,6 +1245,8 @@ private:
&SPIRVDecompiler::Unary<&Module::OpFNegate, Type::Float>,
&SPIRVDecompiler::Unary<&Module::OpFAbs, Type::Float>,
&SPIRVDecompiler::Ternary<&Module::OpFClamp, Type::Float>,
+ &SPIRVDecompiler::FCastHalf0,
+ &SPIRVDecompiler::FCastHalf1,
&SPIRVDecompiler::Binary<&Module::OpFMin, Type::Float>,
&SPIRVDecompiler::Binary<&Module::OpFMax, Type::Float>,
&SPIRVDecompiler::Unary<&Module::OpCos, Type::Float>,
@@ -1270,6 +1307,7 @@ private:
&SPIRVDecompiler::Unary<&Module::OpFAbs, Type::HalfFloat>,
&SPIRVDecompiler::HNegate,
&SPIRVDecompiler::HClamp,
+ &SPIRVDecompiler::HCastFloat,
&SPIRVDecompiler::HUnpack,
&SPIRVDecompiler::HMergeF32,
&SPIRVDecompiler::HMergeH0,
@@ -1346,6 +1384,11 @@ private:
&SPIRVDecompiler::WorkGroupId<0>,
&SPIRVDecompiler::WorkGroupId<1>,
&SPIRVDecompiler::WorkGroupId<2>,
+
+ &SPIRVDecompiler::BallotThread,
+ &SPIRVDecompiler::VoteAll,
+ &SPIRVDecompiler::VoteAny,
+ &SPIRVDecompiler::VoteEqual,
};
static_assert(operation_decompilers.size() == static_cast<std::size_t>(OperationCode::Amount));
diff --git a/src/video_core/shader/control_flow.cpp b/src/video_core/shader/control_flow.cpp
index fdcc970ff..ec3a76690 100644
--- a/src/video_core/shader/control_flow.cpp
+++ b/src/video_core/shader/control_flow.cpp
@@ -15,7 +15,7 @@
#include "video_core/shader/shader_ir.h"
namespace VideoCommon::Shader {
-
+namespace {
using Tegra::Shader::Instruction;
using Tegra::Shader::OpCode;
@@ -29,8 +29,7 @@ struct Query {
struct BlockStack {
BlockStack() = default;
- BlockStack(const BlockStack& b) = default;
- BlockStack(const Query& q) : ssy_stack{q.ssy_stack}, pbk_stack{q.pbk_stack} {}
+ explicit BlockStack(const Query& q) : ssy_stack{q.ssy_stack}, pbk_stack{q.pbk_stack} {}
std::stack<u32> ssy_stack{};
std::stack<u32> pbk_stack{};
};
@@ -58,7 +57,7 @@ struct BlockInfo {
struct CFGRebuildState {
explicit CFGRebuildState(const ProgramCode& program_code, const std::size_t program_size,
const u32 start)
- : program_code{program_code}, program_size{program_size}, start{start} {}
+ : start{start}, program_code{program_code}, program_size{program_size} {}
u32 start{};
std::vector<BlockInfo> block_info{};
@@ -85,7 +84,7 @@ std::pair<BlockCollision, u32> TryGetBlock(CFGRebuildState& state, u32 address)
return {BlockCollision::Inside, index};
}
}
- return {BlockCollision::None, -1};
+ return {BlockCollision::None, 0xFFFFFFFF};
}
struct ParseInfo {
@@ -365,27 +364,29 @@ bool TryQuery(CFGRebuildState& state) {
const auto gather_end = labels.upper_bound(block.end);
while (gather_start != gather_end) {
cc.push(gather_start->second);
- gather_start++;
+ ++gather_start;
}
};
if (state.queries.empty()) {
return false;
}
+
Query& q = state.queries.front();
const u32 block_index = state.registered[q.address];
BlockInfo& block = state.block_info[block_index];
- // If the block is visted, check if the stacks match, else gather the ssy/pbk
+ // If the block is visited, check if the stacks match, else gather the ssy/pbk
// labels into the current stack and look if the branch at the end of the block
// consumes a label. Schedule new queries accordingly
if (block.visited) {
BlockStack& stack = state.stacks[q.address];
- const bool all_okay = (stack.ssy_stack.size() == 0 || q.ssy_stack == stack.ssy_stack) &&
- (stack.pbk_stack.size() == 0 || q.pbk_stack == stack.pbk_stack);
+ const bool all_okay = (stack.ssy_stack.empty() || q.ssy_stack == stack.ssy_stack) &&
+ (stack.pbk_stack.empty() || q.pbk_stack == stack.pbk_stack);
state.queries.pop_front();
return all_okay;
}
block.visited = true;
- state.stacks[q.address] = BlockStack{q};
+ state.stacks.insert_or_assign(q.address, BlockStack{q});
+
Query q2(q);
state.queries.pop_front();
gather_labels(q2.ssy_stack, state.ssy_labels, block);
@@ -394,6 +395,7 @@ bool TryQuery(CFGRebuildState& state) {
q2.address = block.end + 1;
state.queries.push_back(q2);
}
+
Query conditional_query{q2};
if (block.branch.is_sync) {
if (block.branch.address == unassigned_branch) {
@@ -408,13 +410,15 @@ bool TryQuery(CFGRebuildState& state) {
conditional_query.pbk_stack.pop();
}
conditional_query.address = block.branch.address;
- state.queries.push_back(conditional_query);
+ state.queries.push_back(std::move(conditional_query));
return true;
}
+} // Anonymous namespace
-std::optional<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code, u32 program_size,
- u32 start_address) {
+std::optional<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code,
+ std::size_t program_size, u32 start_address) {
CFGRebuildState state{program_code, program_size, start_address};
+
// Inspect Code and generate blocks
state.labels.clear();
state.labels.emplace(start_address);
@@ -424,10 +428,9 @@ std::optional<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code, u
return {};
}
}
+
// Decompile Stacks
- Query start_query{};
- start_query.address = state.start;
- state.queries.push_back(start_query);
+ state.queries.push_back(Query{state.start, {}, {}});
bool decompiled = true;
while (!state.queries.empty()) {
if (!TryQuery(state)) {
@@ -435,14 +438,15 @@ std::optional<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code, u
break;
}
}
+
// Sort and organize results
std::sort(state.block_info.begin(), state.block_info.end(),
- [](const BlockInfo& a, const BlockInfo& b) -> bool { return a.start < b.start; });
+ [](const BlockInfo& a, const BlockInfo& b) { return a.start < b.start; });
ShaderCharacteristics result_out{};
result_out.decompilable = decompiled;
result_out.start = start_address;
result_out.end = start_address;
- for (auto& block : state.block_info) {
+ for (const auto& block : state.block_info) {
ShaderBlock new_block{};
new_block.start = block.start;
new_block.end = block.end;
@@ -457,8 +461,9 @@ std::optional<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code, u
}
if (result_out.decompilable) {
result_out.labels = std::move(state.labels);
- return {result_out};
+ return {std::move(result_out)};
}
+
// If it's not decompilable, merge the unlabelled blocks together
auto back = result_out.blocks.begin();
auto next = std::next(back);
@@ -469,8 +474,8 @@ std::optional<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code, u
continue;
}
back = next;
- next++;
+ ++next;
}
- return {result_out};
+ return {std::move(result_out)};
}
} // namespace VideoCommon::Shader
diff --git a/src/video_core/shader/control_flow.h b/src/video_core/shader/control_flow.h
index 5e8ea3271..b0a5e4f8c 100644
--- a/src/video_core/shader/control_flow.h
+++ b/src/video_core/shader/control_flow.h
@@ -4,7 +4,6 @@
#pragma once
-#include <cstring>
#include <list>
#include <optional>
#include <unordered_set>
@@ -26,27 +25,44 @@ struct Condition {
bool IsUnconditional() const {
return predicate == Pred::UnusedIndex && cc == ConditionCode::T;
}
+
bool operator==(const Condition& other) const {
return std::tie(predicate, cc) == std::tie(other.predicate, other.cc);
}
+
+ bool operator!=(const Condition& other) const {
+ return !operator==(other);
+ }
};
struct ShaderBlock {
- u32 start{};
- u32 end{};
- bool ignore_branch{};
struct Branch {
Condition cond{};
bool kills{};
s32 address{};
+
bool operator==(const Branch& b) const {
return std::tie(cond, kills, address) == std::tie(b.cond, b.kills, b.address);
}
- } branch{};
+
+ bool operator!=(const Branch& b) const {
+ return !operator==(b);
+ }
+ };
+
+ u32 start{};
+ u32 end{};
+ bool ignore_branch{};
+ Branch branch{};
+
bool operator==(const ShaderBlock& sb) const {
return std::tie(start, end, ignore_branch, branch) ==
std::tie(sb.start, sb.end, sb.ignore_branch, sb.branch);
}
+
+ bool operator!=(const ShaderBlock& sb) const {
+ return !operator==(sb);
+ }
};
struct ShaderCharacteristics {
@@ -57,7 +73,7 @@ struct ShaderCharacteristics {
std::unordered_set<u32> labels{};
};
-std::optional<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code, u32 program_size,
- u32 start_address);
+std::optional<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code,
+ std::size_t program_size, u32 start_address);
} // namespace VideoCommon::Shader
diff --git a/src/video_core/shader/decode.cpp b/src/video_core/shader/decode.cpp
index afffd157f..47a9fd961 100644
--- a/src/video_core/shader/decode.cpp
+++ b/src/video_core/shader/decode.cpp
@@ -47,14 +47,14 @@ void ShaderIR::Decode() {
if (shader_info.decompilable) {
disable_flow_stack = true;
const auto insert_block = [this](NodeBlock& nodes, u32 label) {
- if (label == exit_branch) {
+ if (label == static_cast<u32>(exit_branch)) {
return;
}
basic_blocks.insert({label, nodes});
};
const auto& blocks = shader_info.blocks;
NodeBlock current_block;
- u32 current_label = exit_branch;
+ u32 current_label = static_cast<u32>(exit_branch);
for (auto& block : blocks) {
if (shader_info.labels.count(block.start) != 0) {
insert_block(current_block, current_label);
@@ -176,6 +176,7 @@ u32 ShaderIR::DecodeInstr(NodeBlock& bb, u32 pc) {
{OpCode::Type::Ffma, &ShaderIR::DecodeFfma},
{OpCode::Type::Hfma2, &ShaderIR::DecodeHfma2},
{OpCode::Type::Conversion, &ShaderIR::DecodeConversion},
+ {OpCode::Type::Warp, &ShaderIR::DecodeWarp},
{OpCode::Type::Memory, &ShaderIR::DecodeMemory},
{OpCode::Type::Texture, &ShaderIR::DecodeTexture},
{OpCode::Type::Image, &ShaderIR::DecodeImage},
diff --git a/src/video_core/shader/decode/arithmetic.cpp b/src/video_core/shader/decode/arithmetic.cpp
index 87d8fecaa..1473c282a 100644
--- a/src/video_core/shader/decode/arithmetic.cpp
+++ b/src/video_core/shader/decode/arithmetic.cpp
@@ -42,11 +42,14 @@ u32 ShaderIR::DecodeArithmetic(NodeBlock& bb, u32 pc) {
case OpCode::Id::FMUL_R:
case OpCode::Id::FMUL_IMM: {
// FMUL does not have 'abs' bits and only the second operand has a 'neg' bit.
- UNIMPLEMENTED_IF_MSG(instr.fmul.tab5cb8_2 != 0, "FMUL tab5cb8_2({}) is not implemented",
- instr.fmul.tab5cb8_2.Value());
- UNIMPLEMENTED_IF_MSG(
- instr.fmul.tab5c68_0 != 1, "FMUL tab5cb8_0({}) is not implemented",
- instr.fmul.tab5c68_0.Value()); // SMO typical sends 1 here which seems to be the default
+ if (instr.fmul.tab5cb8_2 != 0) {
+ LOG_WARNING(HW_GPU, "FMUL tab5cb8_2({}) is not implemented",
+ instr.fmul.tab5cb8_2.Value());
+ }
+ if (instr.fmul.tab5c68_0 != 1) {
+ LOG_WARNING(HW_GPU, "FMUL tab5cb8_0({}) is not implemented",
+ instr.fmul.tab5c68_0.Value());
+ }
op_b = GetOperandAbsNegFloat(op_b, false, instr.fmul.negate_b);
diff --git a/src/video_core/shader/decode/arithmetic_half_immediate.cpp b/src/video_core/shader/decode/arithmetic_half_immediate.cpp
index 7bcf38f23..6466fc011 100644
--- a/src/video_core/shader/decode/arithmetic_half_immediate.cpp
+++ b/src/video_core/shader/decode/arithmetic_half_immediate.cpp
@@ -23,7 +23,9 @@ u32 ShaderIR::DecodeArithmeticHalfImmediate(NodeBlock& bb, u32 pc) {
LOG_WARNING(HW_GPU, "{} FTZ not implemented", opcode->get().GetName());
}
} else {
- UNIMPLEMENTED_IF(instr.alu_half_imm.precision != Tegra::Shader::HalfPrecision::None);
+ if (instr.alu_half_imm.precision != Tegra::Shader::HalfPrecision::None) {
+ LOG_WARNING(HW_GPU, "{} FTZ not implemented", opcode->get().GetName());
+ }
}
Node op_a = UnpackHalfFloat(GetRegister(instr.gpr8), instr.alu_half_imm.type_a);
diff --git a/src/video_core/shader/decode/conversion.cpp b/src/video_core/shader/decode/conversion.cpp
index 4221f0c58..8973fbefa 100644
--- a/src/video_core/shader/decode/conversion.cpp
+++ b/src/video_core/shader/decode/conversion.cpp
@@ -57,7 +57,7 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) {
case OpCode::Id::I2F_R:
case OpCode::Id::I2F_C:
case OpCode::Id::I2F_IMM: {
- UNIMPLEMENTED_IF(instr.conversion.dst_size != Register::Size::Word);
+ UNIMPLEMENTED_IF(instr.conversion.dst_size == Register::Size::Long);
UNIMPLEMENTED_IF(instr.conversion.selector);
UNIMPLEMENTED_IF_MSG(instr.generates_cc,
"Condition codes generation in I2F is not implemented");
@@ -82,14 +82,19 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) {
value = GetOperandAbsNegFloat(value, false, instr.conversion.negate_a);
SetInternalFlagsFromFloat(bb, value, instr.generates_cc);
+
+ if (instr.conversion.dst_size == Register::Size::Short) {
+ value = Operation(OperationCode::HCastFloat, PRECISE, value);
+ }
+
SetRegister(bb, instr.gpr0, value);
break;
}
case OpCode::Id::F2F_R:
case OpCode::Id::F2F_C:
case OpCode::Id::F2F_IMM: {
- UNIMPLEMENTED_IF(instr.conversion.f2f.dst_size != Register::Size::Word);
- UNIMPLEMENTED_IF(instr.conversion.f2f.src_size != Register::Size::Word);
+ UNIMPLEMENTED_IF(instr.conversion.dst_size == Register::Size::Long);
+ UNIMPLEMENTED_IF(instr.conversion.src_size == Register::Size::Long);
UNIMPLEMENTED_IF_MSG(instr.generates_cc,
"Condition codes generation in F2F is not implemented");
@@ -107,6 +112,11 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) {
}
}();
+ if (instr.conversion.src_size == Register::Size::Short) {
+ // TODO: figure where extract is sey in the encoding
+ value = Operation(OperationCode::FCastHalf0, PRECISE, value);
+ }
+
value = GetOperandAbsNegFloat(value, instr.conversion.abs_a, instr.conversion.negate_a);
value = [&]() {
@@ -124,19 +134,24 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) {
default:
UNIMPLEMENTED_MSG("Unimplemented F2F rounding mode {}",
static_cast<u32>(instr.conversion.f2f.rounding.Value()));
- return Immediate(0);
+ return value;
}
}();
value = GetSaturatedFloat(value, instr.alu.saturate_d);
SetInternalFlagsFromFloat(bb, value, instr.generates_cc);
+
+ if (instr.conversion.dst_size == Register::Size::Short) {
+ value = Operation(OperationCode::HCastFloat, PRECISE, value);
+ }
+
SetRegister(bb, instr.gpr0, value);
break;
}
case OpCode::Id::F2I_R:
case OpCode::Id::F2I_C:
case OpCode::Id::F2I_IMM: {
- UNIMPLEMENTED_IF(instr.conversion.src_size != Register::Size::Word);
+ UNIMPLEMENTED_IF(instr.conversion.src_size == Register::Size::Long);
UNIMPLEMENTED_IF_MSG(instr.generates_cc,
"Condition codes generation in F2I is not implemented");
Node value = [&]() {
@@ -153,6 +168,11 @@ u32 ShaderIR::DecodeConversion(NodeBlock& bb, u32 pc) {
}
}();
+ if (instr.conversion.src_size == Register::Size::Short) {
+ // TODO: figure where extract is sey in the encoding
+ value = Operation(OperationCode::FCastHalf0, PRECISE, value);
+ }
+
value = GetOperandAbsNegFloat(value, instr.conversion.abs_a, instr.conversion.negate_a);
value = [&]() {
diff --git a/src/video_core/shader/decode/ffma.cpp b/src/video_core/shader/decode/ffma.cpp
index 29be25ca3..ca2f39e8d 100644
--- a/src/video_core/shader/decode/ffma.cpp
+++ b/src/video_core/shader/decode/ffma.cpp
@@ -18,10 +18,12 @@ u32 ShaderIR::DecodeFfma(NodeBlock& bb, u32 pc) {
const auto opcode = OpCode::Decode(instr);
UNIMPLEMENTED_IF_MSG(instr.ffma.cc != 0, "FFMA cc not implemented");
- UNIMPLEMENTED_IF_MSG(instr.ffma.tab5980_0 != 1, "FFMA tab5980_0({}) not implemented",
- instr.ffma.tab5980_0.Value()); // Seems to be 1 by default based on SMO
- UNIMPLEMENTED_IF_MSG(instr.ffma.tab5980_1 != 0, "FFMA tab5980_1({}) not implemented",
- instr.ffma.tab5980_1.Value());
+ if (instr.ffma.tab5980_0 != 1) {
+ LOG_WARNING(HW_GPU, "FFMA tab5980_0({}) not implemented", instr.ffma.tab5980_0.Value());
+ }
+ if (instr.ffma.tab5980_1 != 0) {
+ LOG_WARNING(HW_GPU, "FFMA tab5980_1({}) not implemented", instr.ffma.tab5980_1.Value());
+ }
const Node op_a = GetRegister(instr.gpr8);
diff --git a/src/video_core/shader/decode/float_set.cpp b/src/video_core/shader/decode/float_set.cpp
index f5013e44a..5614e8a0d 100644
--- a/src/video_core/shader/decode/float_set.cpp
+++ b/src/video_core/shader/decode/float_set.cpp
@@ -15,7 +15,6 @@ using Tegra::Shader::OpCode;
u32 ShaderIR::DecodeFloatSet(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
- const auto opcode = OpCode::Decode(instr);
const Node op_a = GetOperandAbsNegFloat(GetRegister(instr.gpr8), instr.fset.abs_a != 0,
instr.fset.neg_a != 0);
diff --git a/src/video_core/shader/decode/float_set_predicate.cpp b/src/video_core/shader/decode/float_set_predicate.cpp
index 2323052b0..34854fcca 100644
--- a/src/video_core/shader/decode/float_set_predicate.cpp
+++ b/src/video_core/shader/decode/float_set_predicate.cpp
@@ -16,7 +16,6 @@ using Tegra::Shader::Pred;
u32 ShaderIR::DecodeFloatSetPredicate(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
- const auto opcode = OpCode::Decode(instr);
const Node op_a = GetOperandAbsNegFloat(GetRegister(instr.gpr8), instr.fsetp.abs_a != 0,
instr.fsetp.neg_a != 0);
diff --git a/src/video_core/shader/decode/half_set_predicate.cpp b/src/video_core/shader/decode/half_set_predicate.cpp
index ad180d6df..afea33e5f 100644
--- a/src/video_core/shader/decode/half_set_predicate.cpp
+++ b/src/video_core/shader/decode/half_set_predicate.cpp
@@ -18,7 +18,7 @@ u32 ShaderIR::DecodeHalfSetPredicate(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
const auto opcode = OpCode::Decode(instr);
- UNIMPLEMENTED_IF(instr.hsetp2.ftz != 0);
+ DEBUG_ASSERT(instr.hsetp2.ftz == 0);
Node op_a = UnpackHalfFloat(GetRegister(instr.gpr8), instr.hsetp2.type_a);
op_a = GetOperandAbsNegHalf(op_a, instr.hsetp2.abs_a, instr.hsetp2.negate_a);
@@ -30,7 +30,7 @@ u32 ShaderIR::DecodeHalfSetPredicate(NodeBlock& bb, u32 pc) {
case OpCode::Id::HSETP2_C:
cond = instr.hsetp2.cbuf_and_imm.cond;
h_and = instr.hsetp2.cbuf_and_imm.h_and;
- op_b = GetOperandAbsNegHalf(GetConstBuffer(instr.cbuf34.index, instr.cbuf34.offset),
+ op_b = GetOperandAbsNegHalf(GetConstBuffer(instr.cbuf34.index, instr.cbuf34.GetOffset()),
instr.hsetp2.cbuf.abs_b, instr.hsetp2.cbuf.negate_b);
break;
case OpCode::Id::HSETP2_IMM:
@@ -52,15 +52,15 @@ u32 ShaderIR::DecodeHalfSetPredicate(NodeBlock& bb, u32 pc) {
}
const OperationCode combiner = GetPredicateCombiner(instr.hsetp2.op);
- const Node pred39 = GetPredicate(instr.hsetp2.pred39, instr.hsetp2.neg_pred);
+ const Node combined_pred = GetPredicate(instr.hsetp2.pred3, instr.hsetp2.neg_pred);
const auto Write = [&](u64 dest, Node src) {
- SetPredicate(bb, dest, Operation(combiner, std::move(src), pred39));
+ SetPredicate(bb, dest, Operation(combiner, std::move(src), combined_pred));
};
const Node comparison = GetPredicateComparisonHalf(cond, op_a, op_b);
const u64 first = instr.hsetp2.pred0;
- const u64 second = instr.hsetp2.pred3;
+ const u64 second = instr.hsetp2.pred39;
if (h_and) {
const Node joined = Operation(OperationCode::LogicalAnd2, comparison);
Write(first, joined);
diff --git a/src/video_core/shader/decode/hfma2.cpp b/src/video_core/shader/decode/hfma2.cpp
index c3bcf1ae9..5b44cb79c 100644
--- a/src/video_core/shader/decode/hfma2.cpp
+++ b/src/video_core/shader/decode/hfma2.cpp
@@ -22,9 +22,9 @@ u32 ShaderIR::DecodeHfma2(NodeBlock& bb, u32 pc) {
const auto opcode = OpCode::Decode(instr);
if (opcode->get().GetId() == OpCode::Id::HFMA2_RR) {
- UNIMPLEMENTED_IF(instr.hfma2.rr.precision != HalfPrecision::None);
+ DEBUG_ASSERT(instr.hfma2.rr.precision == HalfPrecision::None);
} else {
- UNIMPLEMENTED_IF(instr.hfma2.precision != HalfPrecision::None);
+ DEBUG_ASSERT(instr.hfma2.precision == HalfPrecision::None);
}
constexpr auto identity = HalfType::H0_H1;
diff --git a/src/video_core/shader/decode/integer_set.cpp b/src/video_core/shader/decode/integer_set.cpp
index 46e3d5905..59809bcd8 100644
--- a/src/video_core/shader/decode/integer_set.cpp
+++ b/src/video_core/shader/decode/integer_set.cpp
@@ -14,7 +14,6 @@ using Tegra::Shader::OpCode;
u32 ShaderIR::DecodeIntegerSet(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
- const auto opcode = OpCode::Decode(instr);
const Node op_a = GetRegister(instr.gpr8);
const Node op_b = [&]() {
diff --git a/src/video_core/shader/decode/integer_set_predicate.cpp b/src/video_core/shader/decode/integer_set_predicate.cpp
index dd20775d7..25e48fef8 100644
--- a/src/video_core/shader/decode/integer_set_predicate.cpp
+++ b/src/video_core/shader/decode/integer_set_predicate.cpp
@@ -16,7 +16,6 @@ using Tegra::Shader::Pred;
u32 ShaderIR::DecodeIntegerSetPredicate(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
- const auto opcode = OpCode::Decode(instr);
const Node op_a = GetRegister(instr.gpr8);
diff --git a/src/video_core/shader/decode/other.cpp b/src/video_core/shader/decode/other.cpp
index c0f64d7a0..d46e0f823 100644
--- a/src/video_core/shader/decode/other.cpp
+++ b/src/video_core/shader/decode/other.cpp
@@ -22,6 +22,12 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
const auto opcode = OpCode::Decode(instr);
switch (opcode->get().GetId()) {
+ case OpCode::Id::NOP: {
+ UNIMPLEMENTED_IF(instr.nop.cc != Tegra::Shader::ConditionCode::T);
+ UNIMPLEMENTED_IF(instr.nop.trigger != 0);
+ // With the previous preconditions, this instruction is a no-operation.
+ break;
+ }
case OpCode::Id::EXIT: {
const Tegra::Shader::ConditionCode cc = instr.flow_condition_code;
UNIMPLEMENTED_IF_MSG(cc != Tegra::Shader::ConditionCode::T, "EXIT condition code used: {}",
@@ -68,6 +74,13 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
case SystemVariable::InvocationInfo:
LOG_WARNING(HW_GPU, "MOV_SYS instruction with InvocationInfo is incomplete");
return Immediate(0u);
+ case SystemVariable::Tid: {
+ Node value = Immediate(0);
+ value = BitfieldInsert(value, Operation(OperationCode::LocalInvocationIdX), 0, 9);
+ value = BitfieldInsert(value, Operation(OperationCode::LocalInvocationIdY), 16, 9);
+ value = BitfieldInsert(value, Operation(OperationCode::LocalInvocationIdZ), 26, 5);
+ return value;
+ }
case SystemVariable::TidX:
return Operation(OperationCode::LocalInvocationIdX);
case SystemVariable::TidY:
diff --git a/src/video_core/shader/decode/predicate_set_register.cpp b/src/video_core/shader/decode/predicate_set_register.cpp
index febbfeb50..84dbc50fe 100644
--- a/src/video_core/shader/decode/predicate_set_register.cpp
+++ b/src/video_core/shader/decode/predicate_set_register.cpp
@@ -15,7 +15,6 @@ using Tegra::Shader::OpCode;
u32 ShaderIR::DecodePredicateSetRegister(NodeBlock& bb, u32 pc) {
const Instruction instr = {program_code[pc]};
- const auto opcode = OpCode::Decode(instr);
UNIMPLEMENTED_IF_MSG(instr.generates_cc,
"Condition codes generation in PSET is not implemented");
diff --git a/src/video_core/shader/decode/warp.cpp b/src/video_core/shader/decode/warp.cpp
new file mode 100644
index 000000000..04ca74f46
--- /dev/null
+++ b/src/video_core/shader/decode/warp.cpp
@@ -0,0 +1,55 @@
+// Copyright 2019 yuzu Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/assert.h"
+#include "common/common_types.h"
+#include "video_core/engines/shader_bytecode.h"
+#include "video_core/shader/node_helper.h"
+#include "video_core/shader/shader_ir.h"
+
+namespace VideoCommon::Shader {
+
+using Tegra::Shader::Instruction;
+using Tegra::Shader::OpCode;
+using Tegra::Shader::Pred;
+using Tegra::Shader::VoteOperation;
+
+namespace {
+OperationCode GetOperationCode(VoteOperation vote_op) {
+ switch (vote_op) {
+ case VoteOperation::All:
+ return OperationCode::VoteAll;
+ case VoteOperation::Any:
+ return OperationCode::VoteAny;
+ case VoteOperation::Eq:
+ return OperationCode::VoteEqual;
+ default:
+ UNREACHABLE_MSG("Invalid vote operation={}", static_cast<u64>(vote_op));
+ return OperationCode::VoteAll;
+ }
+}
+} // Anonymous namespace
+
+u32 ShaderIR::DecodeWarp(NodeBlock& bb, u32 pc) {
+ const Instruction instr = {program_code[pc]};
+ const auto opcode = OpCode::Decode(instr);
+
+ switch (opcode->get().GetId()) {
+ case OpCode::Id::VOTE: {
+ const Node value = GetPredicate(instr.vote.value, instr.vote.negate_value != 0);
+ const Node active = Operation(OperationCode::BallotThread, value);
+ const Node vote = Operation(GetOperationCode(instr.vote.operation), value);
+ SetRegister(bb, instr.gpr0, active);
+ SetPredicate(bb, instr.vote.dest_pred, vote);
+ break;
+ }
+ default:
+ UNIMPLEMENTED_MSG("Unhandled warp instruction: {}", opcode->get().GetName());
+ break;
+ }
+
+ return pc;
+}
+
+} // namespace VideoCommon::Shader
diff --git a/src/video_core/shader/node.h b/src/video_core/shader/node.h
index 715184d67..5db9313c4 100644
--- a/src/video_core/shader/node.h
+++ b/src/video_core/shader/node.h
@@ -30,6 +30,8 @@ enum class OperationCode {
FNegate, /// (MetaArithmetic, float a) -> float
FAbsolute, /// (MetaArithmetic, float a) -> float
FClamp, /// (MetaArithmetic, float value, float min, float max) -> float
+ FCastHalf0, /// (MetaArithmetic, f16vec2 a) -> float
+ FCastHalf1, /// (MetaArithmetic, f16vec2 a) -> float
FMin, /// (MetaArithmetic, float a, float b) -> float
FMax, /// (MetaArithmetic, float a, float b) -> float
FCos, /// (MetaArithmetic, float a) -> float
@@ -83,17 +85,18 @@ enum class OperationCode {
UBitfieldExtract, /// (MetaArithmetic, uint value, int offset, int offset) -> uint
UBitCount, /// (MetaArithmetic, uint) -> uint
- HAdd, /// (MetaArithmetic, f16vec2 a, f16vec2 b) -> f16vec2
- HMul, /// (MetaArithmetic, f16vec2 a, f16vec2 b) -> f16vec2
- HFma, /// (MetaArithmetic, f16vec2 a, f16vec2 b, f16vec2 c) -> f16vec2
- HAbsolute, /// (f16vec2 a) -> f16vec2
- HNegate, /// (f16vec2 a, bool first, bool second) -> f16vec2
- HClamp, /// (f16vec2 src, float min, float max) -> f16vec2
- HUnpack, /// (Tegra::Shader::HalfType, T value) -> f16vec2
- HMergeF32, /// (f16vec2 src) -> float
- HMergeH0, /// (f16vec2 dest, f16vec2 src) -> f16vec2
- HMergeH1, /// (f16vec2 dest, f16vec2 src) -> f16vec2
- HPack2, /// (float a, float b) -> f16vec2
+ HAdd, /// (MetaArithmetic, f16vec2 a, f16vec2 b) -> f16vec2
+ HMul, /// (MetaArithmetic, f16vec2 a, f16vec2 b) -> f16vec2
+ HFma, /// (MetaArithmetic, f16vec2 a, f16vec2 b, f16vec2 c) -> f16vec2
+ HAbsolute, /// (f16vec2 a) -> f16vec2
+ HNegate, /// (f16vec2 a, bool first, bool second) -> f16vec2
+ HClamp, /// (f16vec2 src, float min, float max) -> f16vec2
+ HCastFloat, /// (MetaArithmetic, float a) -> f16vec2
+ HUnpack, /// (Tegra::Shader::HalfType, T value) -> f16vec2
+ HMergeF32, /// (f16vec2 src) -> float
+ HMergeH0, /// (f16vec2 dest, f16vec2 src) -> f16vec2
+ HMergeH1, /// (f16vec2 dest, f16vec2 src) -> f16vec2
+ HPack2, /// (float a, float b) -> f16vec2
LogicalAssign, /// (bool& dst, bool src) -> void
LogicalAnd, /// (bool a, bool b) -> bool
@@ -165,6 +168,11 @@ enum class OperationCode {
WorkGroupIdY, /// () -> uint
WorkGroupIdZ, /// () -> uint
+ BallotThread, /// (bool) -> uint
+ VoteAll, /// (bool) -> bool
+ VoteAny, /// (bool) -> bool
+ VoteEqual, /// (bool) -> bool
+
Amount,
};
diff --git a/src/video_core/shader/shader_ir.cpp b/src/video_core/shader/shader_ir.cpp
index 5e91fe129..1e5c7f660 100644
--- a/src/video_core/shader/shader_ir.cpp
+++ b/src/video_core/shader/shader_ir.cpp
@@ -405,4 +405,9 @@ Node ShaderIR::BitfieldExtract(Node value, u32 offset, u32 bits) {
Immediate(offset), Immediate(bits));
}
+Node ShaderIR::BitfieldInsert(Node base, Node insert, u32 offset, u32 bits) {
+ return Operation(OperationCode::UBitfieldInsert, NO_PRECISE, base, insert, Immediate(offset),
+ Immediate(bits));
+}
+
} // namespace VideoCommon::Shader
diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h
index 59a083d90..bcc9b79b6 100644
--- a/src/video_core/shader/shader_ir.h
+++ b/src/video_core/shader/shader_ir.h
@@ -167,6 +167,7 @@ private:
u32 DecodeFfma(NodeBlock& bb, u32 pc);
u32 DecodeHfma2(NodeBlock& bb, u32 pc);
u32 DecodeConversion(NodeBlock& bb, u32 pc);
+ u32 DecodeWarp(NodeBlock& bb, u32 pc);
u32 DecodeMemory(NodeBlock& bb, u32 pc);
u32 DecodeTexture(NodeBlock& bb, u32 pc);
u32 DecodeImage(NodeBlock& bb, u32 pc);
@@ -279,6 +280,9 @@ private:
/// Extracts a sequence of bits from a node
Node BitfieldExtract(Node value, u32 offset, u32 bits);
+ /// Inserts a sequence of bits from a node
+ Node BitfieldInsert(Node base, Node insert, u32 offset, u32 bits);
+
void WriteTexInstructionFloat(NodeBlock& bb, Tegra::Shader::Instruction instr,
const Node4& components);
diff --git a/src/video_core/shader/track.cpp b/src/video_core/shader/track.cpp
index a53e02253..55f5949e4 100644
--- a/src/video_core/shader/track.cpp
+++ b/src/video_core/shader/track.cpp
@@ -59,8 +59,8 @@ std::tuple<Node, u32, u32> ShaderIR::TrackCbuf(Node tracked, const NodeBlock& co
return TrackCbuf(source, code, new_cursor);
}
if (const auto operation = std::get_if<OperationNode>(&*tracked)) {
- for (std::size_t i = 0; i < operation->GetOperandsCount(); ++i) {
- if (auto found = TrackCbuf((*operation)[i], code, cursor); std::get<0>(found)) {
+ for (std::size_t i = operation->GetOperandsCount(); i > 0; --i) {
+ if (auto found = TrackCbuf((*operation)[i - 1], code, cursor); std::get<0>(found)) {
// Cbuf found in operand.
return found;
}
diff --git a/src/video_core/texture_cache/surface_base.cpp b/src/video_core/texture_cache/surface_base.cpp
index 6af9044ca..683c49207 100644
--- a/src/video_core/texture_cache/surface_base.cpp
+++ b/src/video_core/texture_cache/surface_base.cpp
@@ -24,9 +24,8 @@ StagingCache::StagingCache() = default;
StagingCache::~StagingCache() = default;
SurfaceBaseImpl::SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params)
- : params{params}, mipmap_sizes(params.num_levels),
- mipmap_offsets(params.num_levels), gpu_addr{gpu_addr}, host_memory_size{
- params.GetHostSizeInBytes()} {
+ : params{params}, host_memory_size{params.GetHostSizeInBytes()}, gpu_addr{gpu_addr},
+ mipmap_sizes(params.num_levels), mipmap_offsets(params.num_levels) {
std::size_t offset = 0;
for (u32 level = 0; level < params.num_levels; ++level) {
const std::size_t mipmap_size{params.GetGuestMipmapSize(level)};
diff --git a/src/video_core/texture_cache/surface_params.h b/src/video_core/texture_cache/surface_params.h
index 358d6757c..e7ef66ee2 100644
--- a/src/video_core/texture_cache/surface_params.h
+++ b/src/video_core/texture_cache/surface_params.h
@@ -58,7 +58,6 @@ public:
std::size_t GetHostSizeInBytes() const {
std::size_t host_size_in_bytes;
if (GetCompressionType() == SurfaceCompression::Converted) {
- constexpr std::size_t rgb8_bpp = 4ULL;
// ASTC is uncompressed in software, in emulated as RGBA8
host_size_in_bytes = 0;
for (u32 level = 0; level < num_levels; ++level) {
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index a3a3770a7..2ec0203d1 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -308,8 +308,6 @@ protected:
if (!guard_render_targets && surface->IsRenderTarget()) {
ManageRenderTargetUnregister(surface);
}
- const GPUVAddr gpu_addr = surface->GetGpuAddr();
- const CacheAddr cache_ptr = surface->GetCacheAddr();
const std::size_t size = surface->GetSizeInBytes();
const VAddr cpu_addr = surface->GetCpuAddr();
rasterizer.UpdatePagesCachedCount(cpu_addr, size, -1);
diff --git a/src/video_core/textures/texture.h b/src/video_core/textures/texture.h
index e3be018b9..e36bc2c04 100644
--- a/src/video_core/textures/texture.h
+++ b/src/video_core/textures/texture.h
@@ -213,7 +213,7 @@ struct TICEntry {
if (header_version != TICHeaderVersion::OneDBuffer) {
return width_minus_1 + 1;
}
- return (buffer_high_width_minus_one << 16) | buffer_low_width_minus_one;
+ return ((buffer_high_width_minus_one << 16) | buffer_low_width_minus_one) + 1;
}
u32 Height() const {