diff options
author | Fernando Sahmkow <fsahmkow27@gmail.com> | 2020-04-08 18:08:06 +0200 |
---|---|---|
committer | Fernando Sahmkow <fsahmkow27@gmail.com> | 2020-04-08 18:08:06 +0200 |
commit | e00d992848d8c7a0117b79f1fe9c521c333bf6f7 (patch) | |
tree | 1b7cd21d9b0dc68fb75df763490315e950566047 /src/video_core | |
parent | Clang Format. (diff) | |
download | yuzu-e00d992848d8c7a0117b79f1fe9c521c333bf6f7.tar yuzu-e00d992848d8c7a0117b79f1fe9c521c333bf6f7.tar.gz yuzu-e00d992848d8c7a0117b79f1fe9c521c333bf6f7.tar.bz2 yuzu-e00d992848d8c7a0117b79f1fe9c521c333bf6f7.tar.lz yuzu-e00d992848d8c7a0117b79f1fe9c521c333bf6f7.tar.xz yuzu-e00d992848d8c7a0117b79f1fe9c521c333bf6f7.tar.zst yuzu-e00d992848d8c7a0117b79f1fe9c521c333bf6f7.zip |
Diffstat (limited to 'src/video_core')
-rw-r--r-- | src/video_core/buffer_cache/buffer_cache.h | 6 | ||||
-rw-r--r-- | src/video_core/memory_manager.cpp | 91 | ||||
-rw-r--r-- | src/video_core/memory_manager.h | 5 |
3 files changed, 47 insertions, 55 deletions
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index 262d0fc6e..eb2f9ecf2 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -53,7 +53,7 @@ public: if (!is_written && !IsRegionWritten(cpu_addr, cpu_addr + size - 1)) { auto& memory_manager = system.GPU().MemoryManager(); if (use_fast_cbuf) { - if (Tegra::MemoryManager::IsGranularRange(gpu_addr, size)) { + if (memory_manager.IsGranularRange(gpu_addr, size)) { const auto host_ptr = memory_manager.GetPointer(gpu_addr); return ConstBufferUpload(host_ptr, size); } else { @@ -62,7 +62,7 @@ public: return ConstBufferUpload(staging_buffer.data(), size); } } else { - if (Tegra::MemoryManager::IsGranularRange(gpu_addr, size)) { + if (memory_manager.IsGranularRange(gpu_addr, size)) { const auto host_ptr = memory_manager.GetPointer(gpu_addr); return StreamBufferUpload(host_ptr, size, alignment); } else { @@ -228,7 +228,7 @@ private: auto& memory_manager = system.GPU().MemoryManager(); const VAddr cpu_addr_end = cpu_addr + size; MapInterval new_map = CreateMap(cpu_addr, cpu_addr_end, gpu_addr); - if (Tegra::MemoryManager::IsGranularRange(gpu_addr, size)) { + if (memory_manager.IsGranularRange(gpu_addr, size)) { u8* host_ptr = memory_manager.GetPointer(gpu_addr); UploadBlockData(block, block->GetOffset(cpu_addr), size, host_ptr); } else { diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index cddef8d86..eb934ad5e 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -139,11 +139,11 @@ T MemoryManager::Read(GPUVAddr addr) const { return {}; } - const u8* page_pointer{page_table.pointers[addr >> page_bits]}; + const u8* page_pointer{GetPointer(addr)}; if (page_pointer) { // NOTE: Avoid adding any extra logic to this fast-path block T value; - std::memcpy(&value, &page_pointer[addr & page_mask], sizeof(T)); + std::memcpy(&value, page_pointer, sizeof(T)); return value; } @@ -166,10 +166,10 @@ void MemoryManager::Write(GPUVAddr addr, T data) { return; } - u8* page_pointer{page_table.pointers[addr >> page_bits]}; + u8* page_pointer{GetPointer(addr)}; if (page_pointer) { // NOTE: Avoid adding any extra logic to this fast-path block - std::memcpy(&page_pointer[addr & page_mask], &data, sizeof(T)); + std::memcpy(page_pointer, &data, sizeof(T)); return; } @@ -200,9 +200,12 @@ u8* MemoryManager::GetPointer(GPUVAddr addr) { return {}; } - u8* const page_pointer{page_table.pointers[addr >> page_bits]}; - if (page_pointer != nullptr) { - return page_pointer + (addr & page_mask); + auto& memory = system.Memory(); + + const VAddr const page_addr{page_table.backing_addr[addr >> page_bits]}; + + if (page_addr != 0) { + return memory.GetPointer(page_addr + (addr & page_mask)); } LOG_ERROR(HW_GPU, "Unknown GetPointer @ 0x{:016X}", addr); @@ -214,9 +217,12 @@ const u8* MemoryManager::GetPointer(GPUVAddr addr) const { return {}; } - const u8* const page_pointer{page_table.pointers[addr >> page_bits]}; - if (page_pointer != nullptr) { - return page_pointer + (addr & page_mask); + const auto& memory = system.Memory(); + + const VAddr const page_addr{page_table.backing_addr[addr >> page_bits]}; + + if (page_addr != 0) { + return memory.GetPointer(page_addr + (addr & page_mask)); } LOG_ERROR(HW_GPU, "Unknown GetPointer @ 0x{:016X}", addr); @@ -237,17 +243,19 @@ void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, const std::s std::size_t page_index{src_addr >> page_bits}; std::size_t page_offset{src_addr & page_mask}; + auto& memory = system.Memory(); + while (remaining_size > 0) { const std::size_t copy_amount{ std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; switch (page_table.attributes[page_index]) { case Common::PageType::Memory: { - const u8* src_ptr{page_table.pointers[page_index] + page_offset}; + const VAddr src_addr{page_table.backing_addr[page_index] + page_offset}; // Flush must happen on the rasterizer interface, such that memory is always synchronous // when it is read (even when in asynchronous GPU mode). Fixes Dead Cells title menu. - rasterizer.FlushRegion(page_table.backing_addr[page_index] + page_offset, copy_amount); - std::memcpy(dest_buffer, src_ptr, copy_amount); + rasterizer.FlushRegion(src_addr, copy_amount); + memory.ReadBlockUnsafe(src_addr, dest_buffer, copy_amount); break; } default: @@ -267,13 +275,15 @@ void MemoryManager::ReadBlockUnsafe(GPUVAddr src_addr, void* dest_buffer, std::size_t page_index{src_addr >> page_bits}; std::size_t page_offset{src_addr & page_mask}; + auto& memory = system.Memory(); + while (remaining_size > 0) { const std::size_t copy_amount{ std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; const u8* page_pointer = page_table.pointers[page_index]; if (page_pointer) { - const u8* src_ptr{page_pointer + page_offset}; - std::memcpy(dest_buffer, src_ptr, copy_amount); + const VAddr src_addr{page_table.backing_addr[page_index] + page_offset}; + memory.ReadBlockUnsafe(src_addr, dest_buffer, copy_amount); } else { std::memset(dest_buffer, 0, copy_amount); } @@ -289,18 +299,19 @@ void MemoryManager::WriteBlock(GPUVAddr dest_addr, const void* src_buffer, const std::size_t page_index{dest_addr >> page_bits}; std::size_t page_offset{dest_addr & page_mask}; + auto& memory = system.Memory(); + while (remaining_size > 0) { const std::size_t copy_amount{ std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; switch (page_table.attributes[page_index]) { case Common::PageType::Memory: { - u8* dest_ptr{page_table.pointers[page_index] + page_offset}; + const VAddr dest_addr{page_table.backing_addr[page_index] + page_offset}; // Invalidate must happen on the rasterizer interface, such that memory is always // synchronous when it is written (even when in asynchronous GPU mode). - rasterizer.InvalidateRegion(page_table.backing_addr[page_index] + page_offset, - copy_amount); - std::memcpy(dest_ptr, src_buffer, copy_amount); + rasterizer.InvalidateRegion(dest_addr, copy_amount); + memory.WriteBlockUnsafe(dest_addr, src_buffer, copy_amount); break; } default: @@ -320,13 +331,15 @@ void MemoryManager::WriteBlockUnsafe(GPUVAddr dest_addr, const void* src_buffer, std::size_t page_index{dest_addr >> page_bits}; std::size_t page_offset{dest_addr & page_mask}; + auto& memory = system.Memory(); + while (remaining_size > 0) { const std::size_t copy_amount{ std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; u8* page_pointer = page_table.pointers[page_index]; if (page_pointer) { - u8* dest_ptr{page_pointer + page_offset}; - std::memcpy(dest_ptr, src_buffer, copy_amount); + const VAddr dest_addr{page_table.backing_addr[page_index] + page_offset}; + memory.WriteBlockUnsafe(dest_addr, src_buffer, copy_amount); } page_index++; page_offset = 0; @@ -336,33 +349,9 @@ void MemoryManager::WriteBlockUnsafe(GPUVAddr dest_addr, const void* src_buffer, } void MemoryManager::CopyBlock(GPUVAddr dest_addr, GPUVAddr src_addr, const std::size_t size) { - std::size_t remaining_size{size}; - std::size_t page_index{src_addr >> page_bits}; - std::size_t page_offset{src_addr & page_mask}; - - while (remaining_size > 0) { - const std::size_t copy_amount{ - std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; - - switch (page_table.attributes[page_index]) { - case Common::PageType::Memory: { - // Flush must happen on the rasterizer interface, such that memory is always synchronous - // when it is copied (even when in asynchronous GPU mode). - const u8* src_ptr{page_table.pointers[page_index] + page_offset}; - rasterizer.FlushRegion(page_table.backing_addr[page_index] + page_offset, copy_amount); - WriteBlock(dest_addr, src_ptr, copy_amount); - break; - } - default: - UNREACHABLE(); - } - - page_index++; - page_offset = 0; - dest_addr += static_cast<VAddr>(copy_amount); - src_addr += static_cast<VAddr>(copy_amount); - remaining_size -= copy_amount; - } + std::vector<u8> tmp_buffer(size); + ReadBlock(src_addr, tmp_buffer.data(), size); + WriteBlock(dest_addr, tmp_buffer.data(), size); } void MemoryManager::CopyBlockUnsafe(GPUVAddr dest_addr, GPUVAddr src_addr, const std::size_t size) { @@ -371,6 +360,12 @@ void MemoryManager::CopyBlockUnsafe(GPUVAddr dest_addr, GPUVAddr src_addr, const WriteBlockUnsafe(dest_addr, tmp_buffer.data(), size); } +bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) { + const VAddr addr = page_table.backing_addr[gpu_addr >> page_bits]; + const std::size_t page = (addr & Memory::PAGE_MASK) + size; + return page <= Memory::PAGE_SIZE; +} + void MemoryManager::MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type, VAddr backing_addr) { LOG_DEBUG(HW_GPU, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * page_size, diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index f4ec77a3d..987400fdd 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -97,10 +97,7 @@ public: void WriteBlockUnsafe(GPUVAddr dest_addr, const void* src_buffer, std::size_t size); void CopyBlockUnsafe(GPUVAddr dest_addr, GPUVAddr src_addr, std::size_t size); - static bool IsGranularRange(GPUVAddr gpu_addr, std::size_t size) { - const std::size_t page = (gpu_addr & page_mask) + size; - return page <= page_size; - } + bool IsGranularRange(GPUVAddr gpu_addr, std::size_t size); private: using VMAMap = std::map<GPUVAddr, VirtualMemoryArea>; |