diff options
author | Fernando Sahmkow <fsahmkow27@gmail.com> | 2021-06-13 03:34:06 +0200 |
---|---|---|
committer | Fernando Sahmkow <fsahmkow27@gmail.com> | 2021-07-04 22:32:35 +0200 |
commit | fd98fcf7f00d096322ccfaa1e35a314b4d698efd (patch) | |
tree | 90fe2a180845d1c28ad3996264188f6ae4aa9f11 /src/video_core/memory_manager.cpp | |
parent | Texture Cache: Initial Implementation of Sparse Textures. (diff) | |
download | yuzu-fd98fcf7f00d096322ccfaa1e35a314b4d698efd.tar yuzu-fd98fcf7f00d096322ccfaa1e35a314b4d698efd.tar.gz yuzu-fd98fcf7f00d096322ccfaa1e35a314b4d698efd.tar.bz2 yuzu-fd98fcf7f00d096322ccfaa1e35a314b4d698efd.tar.lz yuzu-fd98fcf7f00d096322ccfaa1e35a314b4d698efd.tar.xz yuzu-fd98fcf7f00d096322ccfaa1e35a314b4d698efd.tar.zst yuzu-fd98fcf7f00d096322ccfaa1e35a314b4d698efd.zip |
Diffstat (limited to 'src/video_core/memory_manager.cpp')
-rw-r--r-- | src/video_core/memory_manager.cpp | 105 |
1 files changed, 100 insertions, 5 deletions
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index 3589c72ea..e66af4443 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -69,11 +69,17 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { } else { UNREACHABLE_MSG("Unmapping non-existent GPU address=0x{:x}", gpu_addr); } - // Flush and invalidate through the GPU interface, to be asynchronous if possible. - const std::optional<VAddr> cpu_addr = GpuToCpuAddress(gpu_addr); - ASSERT(cpu_addr); - rasterizer->UnmapMemory(*cpu_addr, size); + const auto submapped_ranges = GetSubmappedRange(gpu_addr, size); + + for (const auto& map : submapped_ranges) { + // Flush and invalidate through the GPU interface, to be asynchronous if possible. + const std::optional<VAddr> cpu_addr = GpuToCpuAddress(map.first); + ASSERT(cpu_addr); + + rasterizer->UnmapMemory(*cpu_addr, map.second); + } + UpdateRange(gpu_addr, PageEntry::State::Unmapped, size); } @@ -128,7 +134,8 @@ void MemoryManager::SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::s //// Lock the new page // TryLockPage(page_entry, size); auto& current_page = page_table[PageEntryIndex(gpu_addr)]; - if (current_page.IsValid() != page_entry.IsValid() || + + if ((!current_page.IsValid() && page_entry.IsValid()) || current_page.ToAddress() != page_entry.ToAddress()) { rasterizer->ModifyGPUMemory(gpu_addr, size); } @@ -179,6 +186,19 @@ std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const { return page_entry.ToAddress() + (gpu_addr & page_mask); } +std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t size) const { + size_t page_index{addr >> page_bits}; + const size_t page_last{(addr + size + page_size - 1) >> page_bits}; + while (page_index < page_last) { + const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; + if (page_addr && *page_addr != 0) { + return page_addr; + } + ++page_index; + } + return std::nullopt; +} + template <typename T> T MemoryManager::Read(GPUVAddr addr) const { if (auto page_pointer{GetPointer(addr)}; page_pointer) { @@ -375,4 +395,79 @@ bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const { return page <= Core::Memory::PAGE_SIZE; } +bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const { + size_t page_index{gpu_addr >> page_bits}; + const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits}; + std::optional<VAddr> old_page_addr{}; + while (page_index != page_last) { + const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; + if (!page_addr || *page_addr == 0) { + return false; + } + if (old_page_addr) { + if (*old_page_addr + page_size != *page_addr) { + return false; + } + } + old_page_addr = page_addr; + ++page_index; + } + return true; +} + +bool MemoryManager::IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) const { + size_t page_index{gpu_addr >> page_bits}; + const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits}; + while (page_index < page_last) { + if (!page_table[page_index].IsValid() || page_table[page_index].ToAddress() == 0) { + return false; + } + ++page_index; + } + return true; +} + +std::vector<std::pair<GPUVAddr, std::size_t>> MemoryManager::GetSubmappedRange( + GPUVAddr gpu_addr, std::size_t size) const { + std::vector<std::pair<GPUVAddr, std::size_t>> result{}; + size_t page_index{gpu_addr >> page_bits}; + size_t remaining_size{size}; + size_t page_offset{gpu_addr & page_mask}; + std::optional<std::pair<GPUVAddr, std::size_t>> last_segment{}; + std::optional<VAddr> old_page_addr{}; + const auto extend_size = [this, &last_segment, &page_index](std::size_t bytes) { + if (!last_segment) { + GPUVAddr new_base_addr = page_index << page_bits; + last_segment = {new_base_addr, bytes}; + } else { + last_segment->second += bytes; + } + }; + const auto split = [this, &last_segment, &result] { + if (last_segment) { + result.push_back(*last_segment); + last_segment = std::nullopt; + } + }; + while (remaining_size > 0) { + const size_t num_bytes{std::min(page_size - page_offset, remaining_size)}; + const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; + if (!page_addr) { + split(); + } else if (old_page_addr) { + if (*old_page_addr + page_size != *page_addr) { + split(); + } + extend_size(num_bytes); + } else { + extend_size(num_bytes); + } + ++page_index; + page_offset = 0; + remaining_size -= num_bytes; + } + split(); + return result; +} + } // namespace Tegra |