summaryrefslogtreecommitdiffstats
path: root/src/core
diff options
context:
space:
mode:
authorliamwhite <liamwhite@users.noreply.github.com>2022-08-21 18:59:01 +0200
committerGitHub <noreply@github.com>2022-08-21 18:59:01 +0200
commit064f836d1c4f6de7d7c100b4aff700bf66448e22 (patch)
tree7f087cacbde745c7d64f205618b80624c8599074 /src/core
parentMerge pull request #8790 from liamwhite/too-many-ways-to-name-a-byte-string (diff)
parentcode: dodge PAGE_SIZE #define (diff)
downloadyuzu-064f836d1c4f6de7d7c100b4aff700bf66448e22.tar
yuzu-064f836d1c4f6de7d7c100b4aff700bf66448e22.tar.gz
yuzu-064f836d1c4f6de7d7c100b4aff700bf66448e22.tar.bz2
yuzu-064f836d1c4f6de7d7c100b4aff700bf66448e22.tar.lz
yuzu-064f836d1c4f6de7d7c100b4aff700bf66448e22.tar.xz
yuzu-064f836d1c4f6de7d7c100b4aff700bf66448e22.tar.zst
yuzu-064f836d1c4f6de7d7c100b4aff700bf66448e22.zip
Diffstat (limited to 'src/core')
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.cpp4
-rw-r--r--src/core/loader/kip.cpp2
-rw-r--r--src/core/loader/nro.cpp2
-rw-r--r--src/core/loader/nso.cpp2
-rw-r--r--src/core/memory.cpp81
-rw-r--r--src/core/memory.h6
6 files changed, 50 insertions, 47 deletions
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index 3b8b43994..d1e70f19d 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -190,8 +190,8 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
config.callbacks = cb.get();
config.coprocessors[15] = cp15;
config.define_unpredictable_behaviour = true;
- static constexpr std::size_t PAGE_BITS = 12;
- static constexpr std::size_t NUM_PAGE_TABLE_ENTRIES = 1 << (32 - PAGE_BITS);
+ static constexpr std::size_t YUZU_PAGEBITS = 12;
+ static constexpr std::size_t NUM_PAGE_TABLE_ENTRIES = 1 << (32 - YUZU_PAGEBITS);
if (page_table) {
config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>(
page_table->pointers.data());
diff --git a/src/core/loader/kip.cpp b/src/core/loader/kip.cpp
index 9af46a0f7..d8a1bf82a 100644
--- a/src/core/loader/kip.cpp
+++ b/src/core/loader/kip.cpp
@@ -14,7 +14,7 @@ namespace Loader {
namespace {
constexpr u32 PageAlignSize(u32 size) {
- return static_cast<u32>((size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK);
+ return static_cast<u32>((size + Core::Memory::YUZU_PAGEMASK) & ~Core::Memory::YUZU_PAGEMASK);
}
} // Anonymous namespace
diff --git a/src/core/loader/nro.cpp b/src/core/loader/nro.cpp
index 1b0bb0876..73d04d7ee 100644
--- a/src/core/loader/nro.cpp
+++ b/src/core/loader/nro.cpp
@@ -125,7 +125,7 @@ FileType AppLoader_NRO::IdentifyType(const FileSys::VirtualFile& nro_file) {
}
static constexpr u32 PageAlignSize(u32 size) {
- return static_cast<u32>((size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK);
+ return static_cast<u32>((size + Core::Memory::YUZU_PAGEMASK) & ~Core::Memory::YUZU_PAGEMASK);
}
static bool LoadNroImpl(Kernel::KProcess& process, const std::vector<u8>& data) {
diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp
index 8dd956fc6..4c3b3c655 100644
--- a/src/core/loader/nso.cpp
+++ b/src/core/loader/nso.cpp
@@ -45,7 +45,7 @@ std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data,
}
constexpr u32 PageAlignSize(u32 size) {
- return static_cast<u32>((size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK);
+ return static_cast<u32>((size + Core::Memory::YUZU_PAGEMASK) & ~Core::Memory::YUZU_PAGEMASK);
}
} // Anonymous namespace
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 1b44280b5..34ad7cadd 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -36,10 +36,11 @@ struct Memory::Impl {
}
void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
- ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
- ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
+ ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
+ ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", base);
ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", target);
- MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory);
+ MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, target,
+ Common::PageType::Memory);
if (Settings::IsFastmemEnabled()) {
system.DeviceMemory().buffer.Map(base, target - DramMemoryMap::Base, size);
@@ -47,9 +48,10 @@ struct Memory::Impl {
}
void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
- ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size);
- ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base);
- MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, 0, Common::PageType::Unmapped);
+ ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
+ ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", base);
+ MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0,
+ Common::PageType::Unmapped);
if (Settings::IsFastmemEnabled()) {
system.DeviceMemory().buffer.Unmap(base, size);
@@ -57,7 +59,7 @@ struct Memory::Impl {
}
[[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const {
- const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]};
+ const PAddr paddr{current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
if (!paddr) {
return {};
@@ -67,7 +69,7 @@ struct Memory::Impl {
}
[[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const {
- const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]};
+ const PAddr paddr{current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
if (paddr == 0) {
return {};
@@ -176,13 +178,14 @@ struct Memory::Impl {
auto on_unmapped, auto on_memory, auto on_rasterizer, auto increment) {
const auto& page_table = process.PageTable().PageTableImpl();
std::size_t remaining_size = size;
- std::size_t page_index = addr >> PAGE_BITS;
- std::size_t page_offset = addr & PAGE_MASK;
+ std::size_t page_index = addr >> YUZU_PAGEBITS;
+ std::size_t page_offset = addr & YUZU_PAGEMASK;
while (remaining_size) {
const std::size_t copy_amount =
- std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size);
- const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
+ std::min(static_cast<std::size_t>(YUZU_PAGESIZE) - page_offset, remaining_size);
+ const auto current_vaddr =
+ static_cast<VAddr>((page_index << YUZU_PAGEBITS) + page_offset);
const auto [pointer, type] = page_table.pointers[page_index].PointerType();
switch (type) {
@@ -192,7 +195,7 @@ struct Memory::Impl {
}
case Common::PageType::Memory: {
DEBUG_ASSERT(pointer);
- u8* mem_ptr = pointer + page_offset + (page_index << PAGE_BITS);
+ u8* mem_ptr = pointer + page_offset + (page_index << YUZU_PAGEBITS);
on_memory(copy_amount, mem_ptr);
break;
}
@@ -339,10 +342,10 @@ struct Memory::Impl {
// Iterate over a contiguous CPU address space, marking/unmarking the region.
// The region is at a granularity of CPU pages.
- const u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
- for (u64 i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
+ const u64 num_pages = ((vaddr + size - 1) >> YUZU_PAGEBITS) - (vaddr >> YUZU_PAGEBITS) + 1;
+ for (u64 i = 0; i < num_pages; ++i, vaddr += YUZU_PAGESIZE) {
const Common::PageType page_type{
- current_page_table->pointers[vaddr >> PAGE_BITS].Type()};
+ current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Type()};
if (debug) {
// Switch page type to debug if now debug
switch (page_type) {
@@ -354,7 +357,7 @@ struct Memory::Impl {
// Page is already marked.
break;
case Common::PageType::Memory:
- current_page_table->pointers[vaddr >> PAGE_BITS].Store(
+ current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
nullptr, Common::PageType::DebugMemory);
break;
default:
@@ -371,9 +374,9 @@ struct Memory::Impl {
// Don't mess with already non-debug or rasterizer memory.
break;
case Common::PageType::DebugMemory: {
- u8* const pointer{GetPointerFromDebugMemory(vaddr & ~PAGE_MASK)};
- current_page_table->pointers[vaddr >> PAGE_BITS].Store(
- pointer - (vaddr & ~PAGE_MASK), Common::PageType::Memory);
+ u8* const pointer{GetPointerFromDebugMemory(vaddr & ~YUZU_PAGEMASK)};
+ current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
+ pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory);
break;
}
default:
@@ -398,10 +401,10 @@ struct Memory::Impl {
// granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size
// is different). This assumes the specified GPU address region is contiguous as well.
- const u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1;
- for (u64 i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) {
+ const u64 num_pages = ((vaddr + size - 1) >> YUZU_PAGEBITS) - (vaddr >> YUZU_PAGEBITS) + 1;
+ for (u64 i = 0; i < num_pages; ++i, vaddr += YUZU_PAGESIZE) {
const Common::PageType page_type{
- current_page_table->pointers[vaddr >> PAGE_BITS].Type()};
+ current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Type()};
if (cached) {
// Switch page type to cached if now cached
switch (page_type) {
@@ -411,7 +414,7 @@ struct Memory::Impl {
break;
case Common::PageType::DebugMemory:
case Common::PageType::Memory:
- current_page_table->pointers[vaddr >> PAGE_BITS].Store(
+ current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
nullptr, Common::PageType::RasterizerCachedMemory);
break;
case Common::PageType::RasterizerCachedMemory:
@@ -434,16 +437,16 @@ struct Memory::Impl {
// that this area is already unmarked as cached.
break;
case Common::PageType::RasterizerCachedMemory: {
- u8* const pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~PAGE_MASK)};
+ u8* const pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~YUZU_PAGEMASK)};
if (pointer == nullptr) {
// It's possible that this function has been called while updating the
// pagetable after unmapping a VMA. In that case the underlying VMA will no
// longer exist, and we should just leave the pagetable entry blank.
- current_page_table->pointers[vaddr >> PAGE_BITS].Store(
+ current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
nullptr, Common::PageType::Unmapped);
} else {
- current_page_table->pointers[vaddr >> PAGE_BITS].Store(
- pointer - (vaddr & ~PAGE_MASK), Common::PageType::Memory);
+ current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
+ pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory);
}
break;
}
@@ -465,8 +468,8 @@ struct Memory::Impl {
*/
void MapPages(Common::PageTable& page_table, VAddr base, u64 size, PAddr target,
Common::PageType type) {
- LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * PAGE_SIZE,
- (base + size) * PAGE_SIZE);
+ LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * YUZU_PAGESIZE,
+ (base + size) * YUZU_PAGESIZE);
// During boot, current_page_table might not be set yet, in which case we need not flush
if (system.IsPoweredOn()) {
@@ -474,7 +477,7 @@ struct Memory::Impl {
for (u64 i = 0; i < size; i++) {
const auto page = base + i;
if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) {
- gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE);
+ gpu.FlushAndInvalidateRegion(page << YUZU_PAGEBITS, YUZU_PAGESIZE);
}
}
}
@@ -485,7 +488,7 @@ struct Memory::Impl {
if (!target) {
ASSERT_MSG(type != Common::PageType::Memory,
- "Mapping memory page without a pointer @ {:016x}", base * PAGE_SIZE);
+ "Mapping memory page without a pointer @ {:016x}", base * YUZU_PAGESIZE);
while (base != end) {
page_table.pointers[base].Store(nullptr, type);
@@ -496,14 +499,14 @@ struct Memory::Impl {
} else {
while (base != end) {
page_table.pointers[base].Store(
- system.DeviceMemory().GetPointer(target) - (base << PAGE_BITS), type);
- page_table.backing_addr[base] = target - (base << PAGE_BITS);
+ system.DeviceMemory().GetPointer(target) - (base << YUZU_PAGEBITS), type);
+ page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS);
ASSERT_MSG(page_table.pointers[base].Pointer(),
"memory mapping base yield a nullptr within the table");
base += 1;
- target += PAGE_SIZE;
+ target += YUZU_PAGESIZE;
}
}
}
@@ -518,7 +521,7 @@ struct Memory::Impl {
}
// Avoid adding any extra logic to this fast-path block
- const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw();
+ const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw();
if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
return &pointer[vaddr];
}
@@ -657,7 +660,7 @@ void Memory::UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
bool Memory::IsValidVirtualAddress(const VAddr vaddr) const {
const Kernel::KProcess& process = *system.CurrentProcess();
const auto& page_table = process.PageTable().PageTableImpl();
- const size_t page = vaddr >> PAGE_BITS;
+ const size_t page = vaddr >> YUZU_PAGEBITS;
if (page >= page_table.pointers.size()) {
return false;
}
@@ -668,9 +671,9 @@ bool Memory::IsValidVirtualAddress(const VAddr vaddr) const {
bool Memory::IsValidVirtualAddressRange(VAddr base, u64 size) const {
VAddr end = base + size;
- VAddr page = Common::AlignDown(base, PAGE_SIZE);
+ VAddr page = Common::AlignDown(base, YUZU_PAGESIZE);
- for (; page < end; page += PAGE_SIZE) {
+ for (; page < end; page += YUZU_PAGESIZE) {
if (!IsValidVirtualAddress(page)) {
return false;
}
diff --git a/src/core/memory.h b/src/core/memory.h
index 2a21fbcfd..a11ff8766 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -27,9 +27,9 @@ namespace Core::Memory {
* Page size used by the ARM architecture. This is the smallest granularity with which memory can
* be mapped.
*/
-constexpr std::size_t PAGE_BITS = 12;
-constexpr u64 PAGE_SIZE = 1ULL << PAGE_BITS;
-constexpr u64 PAGE_MASK = PAGE_SIZE - 1;
+constexpr std::size_t YUZU_PAGEBITS = 12;
+constexpr u64 YUZU_PAGESIZE = 1ULL << YUZU_PAGEBITS;
+constexpr u64 YUZU_PAGEMASK = YUZU_PAGESIZE - 1;
/// Virtual user-space memory regions
enum : VAddr {