diff options
author | Franco M <francomaro@gmail.com> | 2023-10-21 07:25:27 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-10-21 07:25:27 +0200 |
commit | b76a1d987ff83b831a19a0c19f9fcd96c504c077 (patch) | |
tree | 4b08482cc3d34e341d7d8620182854c248f899b5 /src | |
parent | Reverted dirty code in main. (diff) | |
parent | Merge pull request #11748 from liamwhite/kern_1700 (diff) | |
download | yuzu-b76a1d987ff83b831a19a0c19f9fcd96c504c077.tar yuzu-b76a1d987ff83b831a19a0c19f9fcd96c504c077.tar.gz yuzu-b76a1d987ff83b831a19a0c19f9fcd96c504c077.tar.bz2 yuzu-b76a1d987ff83b831a19a0c19f9fcd96c504c077.tar.lz yuzu-b76a1d987ff83b831a19a0c19f9fcd96c504c077.tar.xz yuzu-b76a1d987ff83b831a19a0c19f9fcd96c504c077.tar.zst yuzu-b76a1d987ff83b831a19a0c19f9fcd96c504c077.zip |
Diffstat (limited to 'src')
33 files changed, 588 insertions, 289 deletions
diff --git a/src/core/debugger/gdbstub.cpp b/src/core/debugger/gdbstub.cpp index 82964f0a1..2076aa8a2 100644 --- a/src/core/debugger/gdbstub.cpp +++ b/src/core/debugger/gdbstub.cpp @@ -822,11 +822,13 @@ void GDBStub::HandleRcmd(const std::vector<u8>& command) { const char i = True(mem_info.attribute & MemoryAttribute::IpcLocked) ? 'I' : '-'; const char d = True(mem_info.attribute & MemoryAttribute::DeviceShared) ? 'D' : '-'; const char u = True(mem_info.attribute & MemoryAttribute::Uncached) ? 'U' : '-'; + const char p = + True(mem_info.attribute & MemoryAttribute::PermissionLocked) ? 'P' : '-'; - reply += - fmt::format(" {:#012x} - {:#012x} {} {} {}{}{}{} [{}, {}]\n", - mem_info.base_address, mem_info.base_address + mem_info.size - 1, - perm, state, l, i, d, u, mem_info.ipc_count, mem_info.device_count); + reply += fmt::format(" {:#012x} - {:#012x} {} {} {}{}{}{}{} [{}, {}]\n", + mem_info.base_address, + mem_info.base_address + mem_info.size - 1, perm, state, l, i, + d, u, p, mem_info.ipc_count, mem_info.device_count); } const uintptr_t next_address = mem_info.base_address + mem_info.size; diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp index 1f2db673c..a0e20bbbb 100644 --- a/src/core/hle/kernel/init/init_slab_setup.cpp +++ b/src/core/hle/kernel/init/init_slab_setup.cpp @@ -106,7 +106,7 @@ static_assert(KernelPageBufferAdditionalSize == /// memory. static KPhysicalAddress TranslateSlabAddrToPhysical(KMemoryLayout& memory_layout, KVirtualAddress slab_addr) { - slab_addr -= GetInteger(memory_layout.GetSlabRegionAddress()); + slab_addr -= memory_layout.GetSlabRegion().GetAddress(); return GetInteger(slab_addr) + Core::DramMemoryMap::SlabHeapBase; } @@ -196,7 +196,12 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) { auto& kernel = system.Kernel(); // Get the start of the slab region, since that's where we'll be working. - KVirtualAddress address = memory_layout.GetSlabRegionAddress(); + const KMemoryRegion& slab_region = memory_layout.GetSlabRegion(); + KVirtualAddress address = slab_region.GetAddress(); + + // Clear the slab region. + // TODO: implement access to kernel VAs. + // std::memset(device_ptr, 0, slab_region.GetSize()); // Initialize slab type array to be in sorted order. std::array<KSlabType, KSlabType_Count> slab_types; diff --git a/src/core/hle/kernel/initial_process.h b/src/core/hle/kernel/initial_process.h index 82195f4f7..2c95269fc 100644 --- a/src/core/hle/kernel/initial_process.h +++ b/src/core/hle/kernel/initial_process.h @@ -19,4 +19,8 @@ static inline KPhysicalAddress GetInitialProcessBinaryPhysicalAddress() { MainMemoryAddress); } +static inline size_t GetInitialProcessBinarySize() { + return InitialProcessBinarySizeMax; +} + } // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h index 41a29da24..ef3f61321 100644 --- a/src/core/hle/kernel/k_memory_block.h +++ b/src/core/hle/kernel/k_memory_block.h @@ -36,6 +36,7 @@ enum class KMemoryState : u32 { FlagCanChangeAttribute = (1 << 24), FlagCanCodeMemory = (1 << 25), FlagLinearMapped = (1 << 26), + FlagCanPermissionLock = (1 << 27), FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical | @@ -50,12 +51,16 @@ enum class KMemoryState : u32 { FlagLinearMapped, Free = static_cast<u32>(Svc::MemoryState::Free), - Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap | - FlagCanAlignedDeviceMap, + + IoMemory = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped | FlagCanDeviceMap | + FlagCanAlignedDeviceMap, + IoRegister = + static_cast<u32>(Svc::MemoryState::Io) | FlagCanDeviceMap | FlagCanAlignedDeviceMap, + Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical, Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess, CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess | - FlagCanCodeMemory, + FlagCanCodeMemory | FlagCanPermissionLock, Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory, Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted | FlagLinearMapped, @@ -65,7 +70,8 @@ enum class KMemoryState : u32 { AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess | FlagCanCodeAlias, AliasCodeData = static_cast<u32>(Svc::MemoryState::AliasCodeData) | FlagsData | - FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory, + FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory | + FlagCanPermissionLock, Ipc = static_cast<u32>(Svc::MemoryState::Ipc) | FlagsMisc | FlagCanAlignedDeviceMap | FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, @@ -73,7 +79,7 @@ enum class KMemoryState : u32 { Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap | FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, - ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagLinearMapped, + ThreadLocal = static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagLinearMapped, Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc | FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc | @@ -94,7 +100,7 @@ enum class KMemoryState : u32 { NonDeviceIpc = static_cast<u32>(Svc::MemoryState::NonDeviceIpc) | FlagsMisc | FlagCanUseNonDeviceIpc, - Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped, + Kernel = static_cast<u32>(Svc::MemoryState::Kernel), GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped | FlagReferenceCounted | FlagCanDebug | FlagLinearMapped, @@ -105,34 +111,36 @@ enum class KMemoryState : u32 { Insecure = static_cast<u32>(Svc::MemoryState::Insecure) | FlagMapped | FlagReferenceCounted | FlagLinearMapped | FlagCanChangeAttribute | FlagCanDeviceMap | - FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, + FlagCanAlignedDeviceMap | FlagCanQueryPhysical | FlagCanUseNonSecureIpc | + FlagCanUseNonDeviceIpc, }; DECLARE_ENUM_FLAG_OPERATORS(KMemoryState); static_assert(static_cast<u32>(KMemoryState::Free) == 0x00000000); -static_assert(static_cast<u32>(KMemoryState::Io) == 0x00182001); +static_assert(static_cast<u32>(KMemoryState::IoMemory) == 0x00182001); +static_assert(static_cast<u32>(KMemoryState::IoRegister) == 0x00180001); static_assert(static_cast<u32>(KMemoryState::Static) == 0x00042002); static_assert(static_cast<u32>(KMemoryState::Code) == 0x04DC7E03); -static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x07FEBD04); +static_assert(static_cast<u32>(KMemoryState::CodeData) == 0x0FFEBD04); static_assert(static_cast<u32>(KMemoryState::Normal) == 0x077EBD05); static_assert(static_cast<u32>(KMemoryState::Shared) == 0x04402006); static_assert(static_cast<u32>(KMemoryState::AliasCode) == 0x04DD7E08); -static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x07FFBD09); +static_assert(static_cast<u32>(KMemoryState::AliasCodeData) == 0x0FFFBD09); static_assert(static_cast<u32>(KMemoryState::Ipc) == 0x045C3C0A); static_assert(static_cast<u32>(KMemoryState::Stack) == 0x045C3C0B); -static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400200C); +static_assert(static_cast<u32>(KMemoryState::ThreadLocal) == 0x0400000C); static_assert(static_cast<u32>(KMemoryState::Transfered) == 0x055C3C0D); static_assert(static_cast<u32>(KMemoryState::SharedTransfered) == 0x045C380E); static_assert(static_cast<u32>(KMemoryState::SharedCode) == 0x0440380F); static_assert(static_cast<u32>(KMemoryState::Inaccessible) == 0x00000010); static_assert(static_cast<u32>(KMemoryState::NonSecureIpc) == 0x045C3811); static_assert(static_cast<u32>(KMemoryState::NonDeviceIpc) == 0x044C2812); -static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00002013); +static_assert(static_cast<u32>(KMemoryState::Kernel) == 0x00000013); static_assert(static_cast<u32>(KMemoryState::GeneratedCode) == 0x04402214); static_assert(static_cast<u32>(KMemoryState::CodeOut) == 0x04402015); static_assert(static_cast<u32>(KMemoryState::Coverage) == 0x00002016); -static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x05583817); +static_assert(static_cast<u32>(KMemoryState::Insecure) == 0x055C3817); enum class KMemoryPermission : u8 { None = 0, @@ -182,8 +190,9 @@ enum class KMemoryAttribute : u8 { IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked), DeviceShared = static_cast<u8>(Svc::MemoryAttribute::DeviceShared), Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached), + PermissionLocked = static_cast<u8>(Svc::MemoryAttribute::PermissionLocked), - SetMask = Uncached, + SetMask = Uncached | PermissionLocked, }; DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute); @@ -261,6 +270,10 @@ struct KMemoryInfo { return m_state; } + constexpr Svc::MemoryState GetSvcState() const { + return static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask); + } + constexpr KMemoryPermission GetPermission() const { return m_permission; } @@ -326,6 +339,10 @@ public: return this->GetEndAddress() - 1; } + constexpr KMemoryState GetState() const { + return m_memory_state; + } + constexpr u16 GetIpcLockCount() const { return m_ipc_lock_count; } @@ -443,6 +460,13 @@ public: } } + constexpr void UpdateAttribute(KMemoryAttribute mask, KMemoryAttribute attr) { + ASSERT(False(mask & KMemoryAttribute::IpcLocked)); + ASSERT(False(mask & KMemoryAttribute::DeviceShared)); + + m_attribute = (m_attribute & ~mask) | attr; + } + constexpr void Split(KMemoryBlock* block, KProcessAddress addr) { ASSERT(this->GetAddress() < addr); ASSERT(this->Contains(addr)); diff --git a/src/core/hle/kernel/k_memory_block_manager.cpp b/src/core/hle/kernel/k_memory_block_manager.cpp index ab75f550e..58a1e7216 100644 --- a/src/core/hle/kernel/k_memory_block_manager.cpp +++ b/src/core/hle/kernel/k_memory_block_manager.cpp @@ -160,8 +160,8 @@ void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, } // Update block state. - it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr), - static_cast<u8>(clear_disable_attr)); + it->Update(state, perm, attr, it->GetAddress() == address, + static_cast<u8>(set_disable_attr), static_cast<u8>(clear_disable_attr)); cur_address += cur_info.GetSize(); remaining_pages -= cur_info.GetNumPages(); } @@ -175,7 +175,9 @@ void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allo KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, - KMemoryPermission perm, KMemoryAttribute attr) { + KMemoryPermission perm, KMemoryAttribute attr, + KMemoryBlockDisableMergeAttribute set_disable_attr, + KMemoryBlockDisableMergeAttribute clear_disable_attr) { // Ensure for auditing that we never end up with an invalid tree. KScopedMemoryBlockManagerAuditor auditor(this); ASSERT(Common::IsAligned(GetInteger(address), PageSize)); @@ -214,7 +216,8 @@ void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allo } // Update block state. - it->Update(state, perm, attr, false, 0, 0); + it->Update(state, perm, attr, false, static_cast<u8>(set_disable_attr), + static_cast<u8>(clear_disable_attr)); cur_address += cur_info.GetSize(); remaining_pages -= cur_info.GetNumPages(); } else { @@ -284,6 +287,65 @@ void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocat this->CoalesceForUpdate(allocator, address, num_pages); } +void KMemoryBlockManager::UpdateAttribute(KMemoryBlockManagerUpdateAllocator* allocator, + KProcessAddress address, size_t num_pages, + KMemoryAttribute mask, KMemoryAttribute attr) { + // Ensure for auditing that we never end up with an invalid tree. + KScopedMemoryBlockManagerAuditor auditor(this); + ASSERT(Common::IsAligned(GetInteger(address), PageSize)); + + KProcessAddress cur_address = address; + size_t remaining_pages = num_pages; + iterator it = this->FindIterator(address); + + while (remaining_pages > 0) { + const size_t remaining_size = remaining_pages * PageSize; + KMemoryInfo cur_info = it->GetMemoryInfo(); + + if ((it->GetAttribute() & mask) != attr) { + // If we need to, create a new block before and insert it. + if (cur_info.GetAddress() != GetInteger(cur_address)) { + KMemoryBlock* new_block = allocator->Allocate(); + + it->Split(new_block, cur_address); + it = m_memory_block_tree.insert(*new_block); + it++; + + cur_info = it->GetMemoryInfo(); + cur_address = cur_info.GetAddress(); + } + + // If we need to, create a new block after and insert it. + if (cur_info.GetSize() > remaining_size) { + KMemoryBlock* new_block = allocator->Allocate(); + + it->Split(new_block, cur_address + remaining_size); + it = m_memory_block_tree.insert(*new_block); + + cur_info = it->GetMemoryInfo(); + } + + // Update block state. + it->UpdateAttribute(mask, attr); + cur_address += cur_info.GetSize(); + remaining_pages -= cur_info.GetNumPages(); + } else { + // If we already have the right attributes, just advance. + if (cur_address + remaining_size < cur_info.GetEndAddress()) { + remaining_pages = 0; + cur_address += remaining_size; + } else { + remaining_pages = + (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize; + cur_address = cur_info.GetEndAddress(); + } + } + it++; + } + + this->CoalesceForUpdate(allocator, address, num_pages); +} + // Debug. bool KMemoryBlockManager::CheckState() const { // Loop over every block, ensuring that we are sorted and coalesced. diff --git a/src/core/hle/kernel/k_memory_block_manager.h b/src/core/hle/kernel/k_memory_block_manager.h index 96496e990..cb7b6f430 100644 --- a/src/core/hle/kernel/k_memory_block_manager.h +++ b/src/core/hle/kernel/k_memory_block_manager.h @@ -115,7 +115,11 @@ public: void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, - KMemoryAttribute attr); + KMemoryAttribute attr, KMemoryBlockDisableMergeAttribute set_disable_attr, + KMemoryBlockDisableMergeAttribute clear_disable_attr); + + void UpdateAttribute(KMemoryBlockManagerUpdateAllocator* allocator, KProcessAddress address, + size_t num_pages, KMemoryAttribute mask, KMemoryAttribute attr); iterator FindIterator(KProcessAddress address) const { return m_memory_block_tree.find(KMemoryBlock( diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h index 54a71df56..c8122644f 100644 --- a/src/core/hle/kernel/k_memory_layout.h +++ b/src/core/hle/kernel/k_memory_layout.h @@ -137,11 +137,9 @@ public: return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack); } - KVirtualAddress GetSlabRegionAddress() const { - return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab)) - .GetAddress(); + const KMemoryRegion& GetSlabRegion() const { + return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab)); } - const KMemoryRegion& GetDeviceRegion(KMemoryRegionType type) const { return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type)); } diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index 74d8169e0..637558e10 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp @@ -119,7 +119,8 @@ void KMemoryManager::Initialize(KVirtualAddress management_region, size_t manage // Free each region to its corresponding heap. size_t reserved_sizes[MaxManagerCount] = {}; const KPhysicalAddress ini_start = GetInitialProcessBinaryPhysicalAddress(); - const KPhysicalAddress ini_end = ini_start + InitialProcessBinarySizeMax; + const size_t ini_size = GetInitialProcessBinarySize(); + const KPhysicalAddress ini_end = ini_start + ini_size; const KPhysicalAddress ini_last = ini_end - 1; for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) { if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) { @@ -137,13 +138,13 @@ void KMemoryManager::Initialize(KVirtualAddress management_region, size_t manage } // Open/reserve the ini memory. - manager.OpenFirst(ini_start, InitialProcessBinarySizeMax / PageSize); - reserved_sizes[it.GetAttributes()] += InitialProcessBinarySizeMax; + manager.OpenFirst(ini_start, ini_size / PageSize); + reserved_sizes[it.GetAttributes()] += ini_size; // Free memory after the ini to the heap. if (ini_last != cur_last) { ASSERT(cur_end != 0); - manager.Free(ini_end, cur_end - ini_end); + manager.Free(ini_end, (cur_end - ini_end) / PageSize); } } else { // Ensure there's no partial overlap with the ini image. diff --git a/src/core/hle/kernel/k_memory_region_type.h b/src/core/hle/kernel/k_memory_region_type.h index e5630c1ac..bcbf450f0 100644 --- a/src/core/hle/kernel/k_memory_region_type.h +++ b/src/core/hle/kernel/k_memory_region_type.h @@ -190,9 +190,15 @@ static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() == constexpr inline auto KMemoryRegionType_DramKernelSecureAppletMemory = KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute( KMemoryRegionAttr_LinearMapped); +constexpr inline const auto KMemoryRegionType_DramKernelSecureUnknown = + KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 1).SetAttribute( + KMemoryRegionAttr_LinearMapped); static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() == (0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped)); +static_assert(KMemoryRegionType_DramKernelSecureUnknown.GetValue() == + (0x28E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | + KMemoryRegionAttr_LinearMapped)); constexpr inline auto KMemoryRegionType_DramReservedEarly = KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap); @@ -217,16 +223,18 @@ constexpr inline auto KMemoryRegionType_DramPoolPartition = static_assert(KMemoryRegionType_DramPoolPartition.GetValue() == (0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); -constexpr inline auto KMemoryRegionType_DramPoolManagement = - KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute( +// UNUSED: .Derive(4, 1); +// UNUSED: .Derive(4, 2); +constexpr inline const auto KMemoryRegionType_DramPoolManagement = + KMemoryRegionType_DramPoolPartition.Derive(4, 0).SetAttribute( KMemoryRegionAttr_CarveoutProtected); -constexpr inline auto KMemoryRegionType_DramUserPool = - KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition(); +constexpr inline const auto KMemoryRegionType_DramUserPool = + KMemoryRegionType_DramPoolPartition.Derive(4, 3); static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == - (0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | + (0xE6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected)); static_assert(KMemoryRegionType_DramUserPool.GetValue() == - (0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); + (0x266 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); constexpr inline auto KMemoryRegionType_DramApplicationPool = KMemoryRegionType_DramUserPool.Derive(4, 0); @@ -237,60 +245,63 @@ constexpr inline auto KMemoryRegionType_DramSystemNonSecurePool = constexpr inline auto KMemoryRegionType_DramSystemPool = KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected); static_assert(KMemoryRegionType_DramApplicationPool.GetValue() == - (0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); + (0xE66 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); static_assert(KMemoryRegionType_DramAppletPool.GetValue() == - (0xBA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); + (0x1666 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); static_assert(KMemoryRegionType_DramSystemNonSecurePool.GetValue() == - (0xDA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); + (0x1A66 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap)); static_assert(KMemoryRegionType_DramSystemPool.GetValue() == - (0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | + (0x2666 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected)); constexpr inline auto KMemoryRegionType_VirtualDramHeapBase = - KMemoryRegionType_Dram.DeriveSparse(1, 3, 0); + KMemoryRegionType_Dram.DeriveSparse(1, 4, 0); constexpr inline auto KMemoryRegionType_VirtualDramKernelPtHeap = - KMemoryRegionType_Dram.DeriveSparse(1, 3, 1); + KMemoryRegionType_Dram.DeriveSparse(1, 4, 1); constexpr inline auto KMemoryRegionType_VirtualDramKernelTraceBuffer = - KMemoryRegionType_Dram.DeriveSparse(1, 3, 2); + KMemoryRegionType_Dram.DeriveSparse(1, 4, 2); static_assert(KMemoryRegionType_VirtualDramHeapBase.GetValue() == 0x1A); static_assert(KMemoryRegionType_VirtualDramKernelPtHeap.GetValue() == 0x2A); static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A); -// UNUSED: .DeriveSparse(2, 2, 0); -constexpr inline auto KMemoryRegionType_VirtualDramUnknownDebug = - KMemoryRegionType_Dram.DeriveSparse(2, 2, 1); -static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52)); - -constexpr inline auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory = - KMemoryRegionType_Dram.DeriveSparse(3, 1, 0); -static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x62)); - -constexpr inline auto KMemoryRegionType_VirtualDramKernelInitPt = - KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0); -constexpr inline auto KMemoryRegionType_VirtualDramPoolManagement = - KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1); -constexpr inline auto KMemoryRegionType_VirtualDramUserPool = - KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2); -static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x19A); -static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A); -static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x31A); +// UNUSED: .Derive(4, 2); +constexpr inline const auto KMemoryRegionType_VirtualDramUnknownDebug = + KMemoryRegionType_Dram.Advance(2).Derive(4, 0); +constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory = + KMemoryRegionType_Dram.Advance(2).Derive(4, 1); +constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureUnknown = + KMemoryRegionType_Dram.Advance(2).Derive(4, 3); +static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x32)); +static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x52)); +static_assert(KMemoryRegionType_VirtualDramKernelSecureUnknown.GetValue() == (0x92)); + +// UNUSED: .Derive(4, 3); +constexpr inline const auto KMemoryRegionType_VirtualDramKernelInitPt = + KMemoryRegionType_VirtualDramHeapBase.Derive(4, 0); +constexpr inline const auto KMemoryRegionType_VirtualDramPoolManagement = + KMemoryRegionType_VirtualDramHeapBase.Derive(4, 1); +constexpr inline const auto KMemoryRegionType_VirtualDramUserPool = + KMemoryRegionType_VirtualDramHeapBase.Derive(4, 2); +static_assert(KMemoryRegionType_VirtualDramKernelInitPt.GetValue() == 0x31A); +static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x51A); +static_assert(KMemoryRegionType_VirtualDramUserPool.GetValue() == 0x61A); // NOTE: For unknown reason, the pools are derived out-of-order here. // It's worth eventually trying to understand why Nintendo made this choice. // UNUSED: .Derive(6, 0); // UNUSED: .Derive(6, 1); -constexpr inline auto KMemoryRegionType_VirtualDramAppletPool = - KMemoryRegionType_VirtualDramUserPool.Derive(6, 2); -constexpr inline auto KMemoryRegionType_VirtualDramApplicationPool = - KMemoryRegionType_VirtualDramUserPool.Derive(6, 3); -constexpr inline auto KMemoryRegionType_VirtualDramSystemNonSecurePool = - KMemoryRegionType_VirtualDramUserPool.Derive(6, 4); -constexpr inline auto KMemoryRegionType_VirtualDramSystemPool = - KMemoryRegionType_VirtualDramUserPool.Derive(6, 5); -static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x1B1A); -static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x271A); -static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A); -static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x331A); +constexpr inline const auto KMemoryRegionType_VirtualDramApplicationPool = + KMemoryRegionType_VirtualDramUserPool.Derive(4, 0); +constexpr inline const auto KMemoryRegionType_VirtualDramAppletPool = + KMemoryRegionType_VirtualDramUserPool.Derive(4, 1); +constexpr inline const auto KMemoryRegionType_VirtualDramSystemNonSecurePool = + KMemoryRegionType_VirtualDramUserPool.Derive(4, 2); +constexpr inline const auto KMemoryRegionType_VirtualDramSystemPool = + KMemoryRegionType_VirtualDramUserPool.Derive(4, 3); +static_assert(KMemoryRegionType_VirtualDramApplicationPool.GetValue() == 0x361A); +static_assert(KMemoryRegionType_VirtualDramAppletPool.GetValue() == 0x561A); +static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x661A); +static_assert(KMemoryRegionType_VirtualDramSystemPool.GetValue() == 0x961A); constexpr inline auto KMemoryRegionType_ArchDeviceBase = KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly(); @@ -354,12 +365,14 @@ constexpr inline auto KMemoryRegionType_KernelTemp = static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31); constexpr KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { - if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) { - return KMemoryRegionType_VirtualDramKernelTraceBuffer; - } else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) { + if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) { return KMemoryRegionType_VirtualDramKernelPtHeap; } else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) { return KMemoryRegionType_VirtualDramKernelSecureAppletMemory; + } else if (KMemoryRegionType_DramKernelSecureUnknown.IsAncestorOf(type_id)) { + return KMemoryRegionType_VirtualDramKernelSecureUnknown; + } else if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) { + return KMemoryRegionType_VirtualDramKernelTraceBuffer; } else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) { return KMemoryRegionType_VirtualDramUnknownDebug; } else { diff --git a/src/core/hle/kernel/k_page_group.h b/src/core/hle/kernel/k_page_group.h index b32909f05..de9d63a8d 100644 --- a/src/core/hle/kernel/k_page_group.h +++ b/src/core/hle/kernel/k_page_group.h @@ -183,12 +183,17 @@ private: class KScopedPageGroup { public: - explicit KScopedPageGroup(const KPageGroup* gp) : m_pg(gp) { + explicit KScopedPageGroup(const KPageGroup* gp, bool not_first = true) : m_pg(gp) { if (m_pg) { - m_pg->Open(); + if (not_first) { + m_pg->Open(); + } else { + m_pg->OpenFirst(); + } } } - explicit KScopedPageGroup(const KPageGroup& gp) : KScopedPageGroup(std::addressof(gp)) {} + explicit KScopedPageGroup(const KPageGroup& gp, bool not_first = true) + : KScopedPageGroup(std::addressof(gp), not_first) {} ~KScopedPageGroup() { if (m_pg) { m_pg->Close(); diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 0b0cef984..217ccbae3 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp @@ -505,7 +505,7 @@ Result KPageTable::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress R_TRY(this->CheckMemoryStateContiguous( std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias, KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, - KMemoryAttribute::All, KMemoryAttribute::None)); + KMemoryAttribute::All & ~KMemoryAttribute::PermissionLocked, KMemoryAttribute::None)); // Determine whether any pages being unmapped are code. bool any_code_pages = false; @@ -1724,29 +1724,43 @@ Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) { PageSize; // While we have pages to map, map them. - while (map_pages > 0) { - // Check if we're at the end of the physical block. - if (pg_pages == 0) { - // Ensure there are more pages to map. - ASSERT(pg_it != pg.end()); - - // Advance our physical block. - ++pg_it; - pg_phys_addr = pg_it->GetAddress(); - pg_pages = pg_it->GetNumPages(); + { + // Create a page group for the current mapping range. + KPageGroup cur_pg(m_kernel, m_block_info_manager); + { + ON_RESULT_FAILURE_2 { + cur_pg.OpenFirst(); + cur_pg.Close(); + }; + + size_t remain_pages = map_pages; + while (remain_pages > 0) { + // Check if we're at the end of the physical block. + if (pg_pages == 0) { + // Ensure there are more pages to map. + ASSERT(pg_it != pg.end()); + + // Advance our physical block. + ++pg_it; + pg_phys_addr = pg_it->GetAddress(); + pg_pages = pg_it->GetNumPages(); + } + + // Add whatever we can to the current block. + const size_t cur_pages = std::min(pg_pages, remain_pages); + R_TRY(cur_pg.AddBlock(pg_phys_addr + + ((pg_pages - cur_pages) * PageSize), + cur_pages)); + + // Advance. + remain_pages -= cur_pages; + pg_pages -= cur_pages; + } } - // Map whatever we can. - const size_t cur_pages = std::min(pg_pages, map_pages); - R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite, - OperationType::MapFirst, pg_phys_addr)); - - // Advance. - cur_address += cur_pages * PageSize; - map_pages -= cur_pages; - - pg_phys_addr += cur_pages * PageSize; - pg_pages -= cur_pages; + // Map the pages. + R_TRY(this->Operate(cur_address, map_pages, cur_pg, + OperationType::MapFirstGroup)); } } @@ -1770,7 +1784,11 @@ Result KPageTable::MapPhysicalMemory(KProcessAddress address, size_t size) { m_memory_block_manager.UpdateIfMatch( std::addressof(allocator), address, size / PageSize, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, - KMemoryPermission::UserReadWrite, KMemoryAttribute::None); + KMemoryPermission::UserReadWrite, KMemoryAttribute::None, + address == this->GetAliasRegionStart() + ? KMemoryBlockDisableMergeAttribute::Normal + : KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); R_SUCCEED(); } @@ -1868,6 +1886,13 @@ Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) { // Iterate over the memory, unmapping as we go. auto it = m_memory_block_manager.FindIterator(cur_address); + + const auto clear_merge_attr = + (it->GetState() == KMemoryState::Normal && + it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address) + ? KMemoryBlockDisableMergeAttribute::Normal + : KMemoryBlockDisableMergeAttribute::None; + while (true) { // Check that the iterator is valid. ASSERT(it != m_memory_block_manager.end()); @@ -1905,7 +1930,7 @@ Result KPageTable::UnmapPhysicalMemory(KProcessAddress address, size_t size) { m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::None); + clear_merge_attr); // We succeeded. R_SUCCEED(); @@ -2379,8 +2404,7 @@ Result KPageTable::MapPageGroup(KProcessAddress* out_addr, const KPageGroup& pg, KScopedPageTableUpdater updater(this); // Perform mapping operation. - const KPageProperties properties = {perm, state == KMemoryState::Io, false, - DisableMergeAttribute::DisableHead}; + const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); // Update the blocks. @@ -2422,8 +2446,7 @@ Result KPageTable::MapPageGroup(KProcessAddress addr, const KPageGroup& pg, KMem KScopedPageTableUpdater updater(this); // Perform mapping operation. - const KPageProperties properties = {perm, state == KMemoryState::Io, false, - DisableMergeAttribute::DisableHead}; + const KPageProperties properties = {perm, false, false, DisableMergeAttribute::DisableHead}; R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false)); // Update the blocks. @@ -2652,11 +2675,18 @@ Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mas size_t num_allocator_blocks; constexpr auto AttributeTestMask = ~(KMemoryAttribute::SetMask | KMemoryAttribute::DeviceShared); - R_TRY(this->CheckMemoryState( - std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr), - std::addressof(num_allocator_blocks), addr, size, KMemoryState::FlagCanChangeAttribute, - KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, - AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask)); + const KMemoryState state_test_mask = + static_cast<KMemoryState>(((mask & static_cast<u32>(KMemoryAttribute::Uncached)) + ? static_cast<u32>(KMemoryState::FlagCanChangeAttribute) + : 0) | + ((mask & static_cast<u32>(KMemoryAttribute::PermissionLocked)) + ? static_cast<u32>(KMemoryState::FlagCanPermissionLock) + : 0)); + R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), + std::addressof(old_attr), std::addressof(num_allocator_blocks), + addr, size, state_test_mask, state_test_mask, + KMemoryPermission::None, KMemoryPermission::None, + AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask)); // Create an update allocator. Result allocator_result{ResultSuccess}; @@ -2664,18 +2694,17 @@ Result KPageTable::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mas m_memory_block_slab_manager, num_allocator_blocks); R_TRY(allocator_result); - // Determine the new attribute. - const KMemoryAttribute new_attr = - static_cast<KMemoryAttribute>(((old_attr & static_cast<KMemoryAttribute>(~mask)) | - static_cast<KMemoryAttribute>(attr & mask))); - - // Perform operation. - this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh); + // If we need to, perform a change attribute operation. + if (True(KMemoryAttribute::Uncached & static_cast<KMemoryAttribute>(mask))) { + // Perform operation. + R_TRY(this->Operate(addr, num_pages, old_perm, + OperationType::ChangePermissionsAndRefreshAndFlush, 0)); + } // Update the blocks. - m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm, - new_attr, KMemoryBlockDisableMergeAttribute::None, - KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages, + static_cast<KMemoryAttribute>(mask), + static_cast<KMemoryAttribute>(attr)); R_SUCCEED(); } @@ -2863,7 +2892,8 @@ Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, KProcessAddress &KMemoryBlock::ShareToDevice, KMemoryPermission::None); // Set whether the locked memory was io. - *out_is_io = old_state == KMemoryState::Io; + *out_is_io = + static_cast<Svc::MemoryState>(old_state & KMemoryState::Mask) == Svc::MemoryState::Io; R_SUCCEED(); } @@ -3021,9 +3051,10 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, const KPageGr ASSERT(num_pages == page_group.GetNumPages()); switch (operation) { - case OperationType::MapGroup: { + case OperationType::MapGroup: + case OperationType::MapFirstGroup: { // We want to maintain a new reference to every page in the group. - KScopedPageGroup spg(page_group); + KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup); for (const auto& node : page_group) { const size_t size{node.GetNumPages() * PageSize}; @@ -3065,7 +3096,6 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis m_memory->UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); break; } - case OperationType::MapFirst: case OperationType::Map: { ASSERT(map_addr); ASSERT(Common::IsAligned(GetInteger(map_addr), PageSize)); @@ -3073,11 +3103,7 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis // Open references to pages, if we should. if (IsHeapPhysicalAddress(m_kernel.MemoryLayout(), map_addr)) { - if (operation == OperationType::MapFirst) { - m_kernel.MemoryManager().OpenFirst(map_addr, num_pages); - } else { - m_kernel.MemoryManager().Open(map_addr, num_pages); - } + m_kernel.MemoryManager().Open(map_addr, num_pages); } break; } @@ -3087,6 +3113,7 @@ Result KPageTable::Operate(KProcessAddress addr, size_t num_pages, KMemoryPermis } case OperationType::ChangePermissions: case OperationType::ChangePermissionsAndRefresh: + case OperationType::ChangePermissionsAndRefreshAndFlush: break; default: ASSERT(false); @@ -3106,79 +3133,79 @@ void KPageTable::FinalizeUpdate(PageLinkedList* page_list) { } } -KProcessAddress KPageTable::GetRegionAddress(KMemoryState state) const { +KProcessAddress KPageTable::GetRegionAddress(Svc::MemoryState state) const { switch (state) { - case KMemoryState::Free: - case KMemoryState::Kernel: + case Svc::MemoryState::Free: + case Svc::MemoryState::Kernel: return m_address_space_start; - case KMemoryState::Normal: + case Svc::MemoryState::Normal: return m_heap_region_start; - case KMemoryState::Ipc: - case KMemoryState::NonSecureIpc: - case KMemoryState::NonDeviceIpc: + case Svc::MemoryState::Ipc: + case Svc::MemoryState::NonSecureIpc: + case Svc::MemoryState::NonDeviceIpc: return m_alias_region_start; - case KMemoryState::Stack: + case Svc::MemoryState::Stack: return m_stack_region_start; - case KMemoryState::Static: - case KMemoryState::ThreadLocal: + case Svc::MemoryState::Static: + case Svc::MemoryState::ThreadLocal: return m_kernel_map_region_start; - case KMemoryState::Io: - case KMemoryState::Shared: - case KMemoryState::AliasCode: - case KMemoryState::AliasCodeData: - case KMemoryState::Transfered: - case KMemoryState::SharedTransfered: - case KMemoryState::SharedCode: - case KMemoryState::GeneratedCode: - case KMemoryState::CodeOut: - case KMemoryState::Coverage: - case KMemoryState::Insecure: + case Svc::MemoryState::Io: + case Svc::MemoryState::Shared: + case Svc::MemoryState::AliasCode: + case Svc::MemoryState::AliasCodeData: + case Svc::MemoryState::Transfered: + case Svc::MemoryState::SharedTransfered: + case Svc::MemoryState::SharedCode: + case Svc::MemoryState::GeneratedCode: + case Svc::MemoryState::CodeOut: + case Svc::MemoryState::Coverage: + case Svc::MemoryState::Insecure: return m_alias_code_region_start; - case KMemoryState::Code: - case KMemoryState::CodeData: + case Svc::MemoryState::Code: + case Svc::MemoryState::CodeData: return m_code_region_start; default: UNREACHABLE(); } } -size_t KPageTable::GetRegionSize(KMemoryState state) const { +size_t KPageTable::GetRegionSize(Svc::MemoryState state) const { switch (state) { - case KMemoryState::Free: - case KMemoryState::Kernel: + case Svc::MemoryState::Free: + case Svc::MemoryState::Kernel: return m_address_space_end - m_address_space_start; - case KMemoryState::Normal: + case Svc::MemoryState::Normal: return m_heap_region_end - m_heap_region_start; - case KMemoryState::Ipc: - case KMemoryState::NonSecureIpc: - case KMemoryState::NonDeviceIpc: + case Svc::MemoryState::Ipc: + case Svc::MemoryState::NonSecureIpc: + case Svc::MemoryState::NonDeviceIpc: return m_alias_region_end - m_alias_region_start; - case KMemoryState::Stack: + case Svc::MemoryState::Stack: return m_stack_region_end - m_stack_region_start; - case KMemoryState::Static: - case KMemoryState::ThreadLocal: + case Svc::MemoryState::Static: + case Svc::MemoryState::ThreadLocal: return m_kernel_map_region_end - m_kernel_map_region_start; - case KMemoryState::Io: - case KMemoryState::Shared: - case KMemoryState::AliasCode: - case KMemoryState::AliasCodeData: - case KMemoryState::Transfered: - case KMemoryState::SharedTransfered: - case KMemoryState::SharedCode: - case KMemoryState::GeneratedCode: - case KMemoryState::CodeOut: - case KMemoryState::Coverage: - case KMemoryState::Insecure: + case Svc::MemoryState::Io: + case Svc::MemoryState::Shared: + case Svc::MemoryState::AliasCode: + case Svc::MemoryState::AliasCodeData: + case Svc::MemoryState::Transfered: + case Svc::MemoryState::SharedTransfered: + case Svc::MemoryState::SharedCode: + case Svc::MemoryState::GeneratedCode: + case Svc::MemoryState::CodeOut: + case Svc::MemoryState::Coverage: + case Svc::MemoryState::Insecure: return m_alias_code_region_end - m_alias_code_region_start; - case KMemoryState::Code: - case KMemoryState::CodeData: + case Svc::MemoryState::Code: + case Svc::MemoryState::CodeData: return m_code_region_end - m_code_region_start; default: UNREACHABLE(); } } -bool KPageTable::CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { +bool KPageTable::CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const { const KProcessAddress end = addr + size; const KProcessAddress last = end - 1; @@ -3192,32 +3219,32 @@ bool KPageTable::CanContain(KProcessAddress addr, size_t size, KMemoryState stat const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr || m_alias_region_start == m_alias_region_end); switch (state) { - case KMemoryState::Free: - case KMemoryState::Kernel: + case Svc::MemoryState::Free: + case Svc::MemoryState::Kernel: return is_in_region; - case KMemoryState::Io: - case KMemoryState::Static: - case KMemoryState::Code: - case KMemoryState::CodeData: - case KMemoryState::Shared: - case KMemoryState::AliasCode: - case KMemoryState::AliasCodeData: - case KMemoryState::Stack: - case KMemoryState::ThreadLocal: - case KMemoryState::Transfered: - case KMemoryState::SharedTransfered: - case KMemoryState::SharedCode: - case KMemoryState::GeneratedCode: - case KMemoryState::CodeOut: - case KMemoryState::Coverage: - case KMemoryState::Insecure: + case Svc::MemoryState::Io: + case Svc::MemoryState::Static: + case Svc::MemoryState::Code: + case Svc::MemoryState::CodeData: + case Svc::MemoryState::Shared: + case Svc::MemoryState::AliasCode: + case Svc::MemoryState::AliasCodeData: + case Svc::MemoryState::Stack: + case Svc::MemoryState::ThreadLocal: + case Svc::MemoryState::Transfered: + case Svc::MemoryState::SharedTransfered: + case Svc::MemoryState::SharedCode: + case Svc::MemoryState::GeneratedCode: + case Svc::MemoryState::CodeOut: + case Svc::MemoryState::Coverage: + case Svc::MemoryState::Insecure: return is_in_region && !is_in_heap && !is_in_alias; - case KMemoryState::Normal: + case Svc::MemoryState::Normal: ASSERT(is_in_heap); return is_in_region && !is_in_alias; - case KMemoryState::Ipc: - case KMemoryState::NonSecureIpc: - case KMemoryState::NonDeviceIpc: + case Svc::MemoryState::Ipc: + case Svc::MemoryState::NonSecureIpc: + case Svc::MemoryState::NonDeviceIpc: ASSERT(is_in_alias); return is_in_region && !is_in_heap; default: @@ -3281,21 +3308,16 @@ Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, KProces Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, KMemoryAttribute* out_attr, size_t* out_blocks_needed, - KProcessAddress addr, size_t size, KMemoryState state_mask, + KMemoryBlockManager::const_iterator it, + KProcessAddress last_addr, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { ASSERT(this->IsLockedByCurrentThread()); // Get information about the first block. - const KProcessAddress last_addr = addr + size - 1; - KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); KMemoryInfo info = it->GetMemoryInfo(); - // If the start address isn't aligned, we need a block. - const size_t blocks_for_start_align = - (Common::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0; - // Validate all blocks in the range have correct state. const KMemoryState first_state = info.m_state; const KMemoryPermission first_perm = info.m_permission; @@ -3321,10 +3343,6 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* info = it->GetMemoryInfo(); } - // If the end address isn't aligned, we need a block. - const size_t blocks_for_end_align = - (Common::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0; - // Write output state. if (out_state != nullptr) { *out_state = first_state; @@ -3335,9 +3353,39 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* if (out_attr != nullptr) { *out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr); } + + // If the end address isn't aligned, we need a block. if (out_blocks_needed != nullptr) { - *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; + const size_t blocks_for_end_align = + (Common::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress()) + ? 1 + : 0; + *out_blocks_needed = blocks_for_end_align; + } + + R_SUCCEED(); +} + +Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, + KMemoryAttribute* out_attr, size_t* out_blocks_needed, + KProcessAddress addr, size_t size, KMemoryState state_mask, + KMemoryState state, KMemoryPermission perm_mask, + KMemoryPermission perm, KMemoryAttribute attr_mask, + KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { + ASSERT(this->IsLockedByCurrentThread()); + + // Check memory state. + const KProcessAddress last_addr = addr + size - 1; + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); + R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr, + state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr)); + + // If the start address isn't aligned, we need a block. + if (out_blocks_needed != nullptr && + Common::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) { + ++(*out_blocks_needed); } + R_SUCCEED(); } diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index 7da675f27..3d64b6fb0 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h @@ -126,8 +126,6 @@ public: return m_block_info_manager; } - bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const; - Result MapPages(KProcessAddress* out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { @@ -162,6 +160,21 @@ public: void RemapPageGroup(PageLinkedList* page_list, KProcessAddress address, size_t size, const KPageGroup& pg); + KProcessAddress GetRegionAddress(Svc::MemoryState state) const; + size_t GetRegionSize(Svc::MemoryState state) const; + bool CanContain(KProcessAddress addr, size_t size, Svc::MemoryState state) const; + + KProcessAddress GetRegionAddress(KMemoryState state) const { + return this->GetRegionAddress(static_cast<Svc::MemoryState>(state & KMemoryState::Mask)); + } + size_t GetRegionSize(KMemoryState state) const { + return this->GetRegionSize(static_cast<Svc::MemoryState>(state & KMemoryState::Mask)); + } + bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { + return this->CanContain(addr, size, + static_cast<Svc::MemoryState>(state & KMemoryState::Mask)); + } + protected: struct PageLinkedList { private: @@ -204,12 +217,13 @@ protected: private: enum class OperationType : u32 { Map = 0, - MapFirst = 1, - MapGroup = 2, + MapGroup = 1, + MapFirstGroup = 2, Unmap = 3, ChangePermissions = 4, ChangePermissionsAndRefresh = 5, - Separate = 6, + ChangePermissionsAndRefreshAndFlush = 6, + Separate = 7, }; static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = @@ -228,8 +242,6 @@ private: Result Operate(KProcessAddress addr, size_t num_pages, KMemoryPermission perm, OperationType operation, KPhysicalAddress map_addr = 0); void FinalizeUpdate(PageLinkedList* page_list); - KProcessAddress GetRegionAddress(KMemoryState state) const; - size_t GetRegionSize(KMemoryState state) const; KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, @@ -252,6 +264,13 @@ private: KMemoryAttribute attr_mask, KMemoryAttribute attr) const; Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, KMemoryAttribute* out_attr, size_t* out_blocks_needed, + KMemoryBlockManager::const_iterator it, KProcessAddress last_addr, + KMemoryState state_mask, KMemoryState state, + KMemoryPermission perm_mask, KMemoryPermission perm, + KMemoryAttribute attr_mask, KMemoryAttribute attr, + KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; + Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, + KMemoryAttribute* out_attr, size_t* out_blocks_needed, KProcessAddress addr, size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 4a099286b..7fa34d693 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -149,7 +149,7 @@ u64 KProcess::GetTotalPhysicalMemoryUsed() { } u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() { - return this->GetTotalPhysicalMemoryUsed() - this->GetSystemResourceUsage(); + return this->GetTotalPhysicalMemoryUsed() - this->GetSystemResourceSize(); } bool KProcess::ReleaseUserException(KThread* thread) { diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index cb025c3d6..24433d32b 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -623,14 +623,33 @@ struct KernelCore::Impl { ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab)); + // Insert a physical region for the secure applet memory. + const auto secure_applet_end_phys_addr = + slab_end_phys_addr + KSystemControl::SecureAppletMemorySize; + if constexpr (KSystemControl::SecureAppletMemorySize > 0) { + ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( + GetInteger(slab_end_phys_addr), KSystemControl::SecureAppletMemorySize, + KMemoryRegionType_DramKernelSecureAppletMemory)); + } + + // Insert a physical region for the unknown debug2 region. + constexpr size_t SecureUnknownRegionSize = 0; + const size_t secure_unknown_size = SecureUnknownRegionSize; + const auto secure_unknown_end_phys_addr = secure_applet_end_phys_addr + secure_unknown_size; + if constexpr (SecureUnknownRegionSize > 0) { + ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( + GetInteger(secure_applet_end_phys_addr), secure_unknown_size, + KMemoryRegionType_DramKernelSecureUnknown)); + } + // Determine size available for kernel page table heaps, requiring > 8 MB. const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size; - const size_t page_table_heap_size = resource_end_phys_addr - slab_end_phys_addr; + const size_t page_table_heap_size = resource_end_phys_addr - secure_unknown_end_phys_addr; ASSERT(page_table_heap_size / 4_MiB > 2); // Insert a physical region for the kernel page table heap region ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert( - GetInteger(slab_end_phys_addr), page_table_heap_size, + GetInteger(secure_unknown_end_phys_addr), page_table_heap_size, KMemoryRegionType_DramKernelPtHeap)); // All DRAM regions that we haven't tagged by this point will be mapped under the linear diff --git a/src/core/hle/kernel/svc/svc_memory.cpp b/src/core/hle/kernel/svc/svc_memory.cpp index 2cab74127..97f1210de 100644 --- a/src/core/hle/kernel/svc/svc_memory.cpp +++ b/src/core/hle/kernel/svc/svc_memory.cpp @@ -76,7 +76,7 @@ Result MapUnmapMemorySanityChecks(const KPageTable& manager, u64 dst_addr, u64 s } // namespace Result SetMemoryPermission(Core::System& system, u64 address, u64 size, MemoryPermission perm) { - LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X", address, size, + LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, perm=0x{:08X}", address, size, perm); // Validate address / size. @@ -108,10 +108,16 @@ Result SetMemoryAttribute(Core::System& system, u64 address, u64 size, u32 mask, R_UNLESS((address < address + size), ResultInvalidCurrentMemory); // Validate the attribute and mask. - constexpr u32 SupportedMask = static_cast<u32>(MemoryAttribute::Uncached); + constexpr u32 SupportedMask = + static_cast<u32>(MemoryAttribute::Uncached | MemoryAttribute::PermissionLocked); R_UNLESS((mask | attr) == mask, ResultInvalidCombination); R_UNLESS((mask | attr | SupportedMask) == SupportedMask, ResultInvalidCombination); + // Check that permission locked is either being set or not masked. + R_UNLESS((static_cast<Svc::MemoryAttribute>(mask) & Svc::MemoryAttribute::PermissionLocked) == + (static_cast<Svc::MemoryAttribute>(attr) & Svc::MemoryAttribute::PermissionLocked), + ResultInvalidCombination); + // Validate that the region is in range for the current process. auto& page_table{GetCurrentProcess(system.Kernel()).GetPageTable()}; R_UNLESS(page_table.Contains(address, size), ResultInvalidCurrentMemory); diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h index 7f380ca4f..251e6013c 100644 --- a/src/core/hle/kernel/svc_types.h +++ b/src/core/hle/kernel/svc_types.h @@ -46,6 +46,7 @@ enum class MemoryAttribute : u32 { IpcLocked = (1 << 1), DeviceShared = (1 << 2), Uncached = (1 << 3), + PermissionLocked = (1 << 4), }; DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute); diff --git a/src/core/hle/service/acc/acc.cpp b/src/core/hle/service/acc/acc.cpp index b7d14060c..1b1c8190e 100644 --- a/src/core/hle/service/acc/acc.cpp +++ b/src/core/hle/service/acc/acc.cpp @@ -407,13 +407,13 @@ protected: IPC::RequestParser rp{ctx}; const auto base = rp.PopRaw<ProfileBase>(); - const auto user_data = ctx.ReadBuffer(); - const auto image_data = ctx.ReadBuffer(1); + const auto image_data = ctx.ReadBufferA(0); + const auto user_data = ctx.ReadBufferX(0); - LOG_DEBUG(Service_ACC, "called, username='{}', timestamp={:016X}, uuid=0x{}", - Common::StringFromFixedZeroTerminatedBuffer( - reinterpret_cast<const char*>(base.username.data()), base.username.size()), - base.timestamp, base.user_uuid.RawString()); + LOG_INFO(Service_ACC, "called, username='{}', timestamp={:016X}, uuid=0x{}", + Common::StringFromFixedZeroTerminatedBuffer( + reinterpret_cast<const char*>(base.username.data()), base.username.size()), + base.timestamp, base.user_uuid.RawString()); if (user_data.size() < sizeof(UserData)) { LOG_ERROR(Service_ACC, "UserData buffer too small!"); diff --git a/src/core/hle/service/hle_ipc.cpp b/src/core/hle/service/hle_ipc.cpp index f6a1e54f2..6f3ae3cc4 100644 --- a/src/core/hle/service/hle_ipc.cpp +++ b/src/core/hle/service/hle_ipc.cpp @@ -23,6 +23,17 @@ #include "core/hle/service/ipc_helpers.h" #include "core/memory.h" +namespace { +static thread_local std::array read_buffer_data_a{ + Common::ScratchBuffer<u8>(), + Common::ScratchBuffer<u8>(), +}; +static thread_local std::array read_buffer_data_x{ + Common::ScratchBuffer<u8>(), + Common::ScratchBuffer<u8>(), +}; +} // Anonymous namespace + namespace Service { SessionRequestHandler::SessionRequestHandler(Kernel::KernelCore& kernel_, const char* service_name_) @@ -328,26 +339,57 @@ std::vector<u8> HLERequestContext::ReadBufferCopy(std::size_t buffer_index) cons } } -std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const { +std::span<const u8> HLERequestContext::ReadBufferA(std::size_t buffer_index) const { static thread_local std::array read_buffer_a{ Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), }; - static thread_local std::array read_buffer_data_a{ - Common::ScratchBuffer<u8>(), - Common::ScratchBuffer<u8>(), - }; + + ASSERT_OR_EXECUTE_MSG( + BufferDescriptorA().size() > buffer_index, { return {}; }, + "BufferDescriptorA invalid buffer_index {}", buffer_index); + auto& read_buffer = read_buffer_a[buffer_index]; + return read_buffer.Read(BufferDescriptorA()[buffer_index].Address(), + BufferDescriptorA()[buffer_index].Size(), + &read_buffer_data_a[buffer_index]); +} + +std::span<const u8> HLERequestContext::ReadBufferX(std::size_t buffer_index) const { static thread_local std::array read_buffer_x{ Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), }; - static thread_local std::array read_buffer_data_x{ - Common::ScratchBuffer<u8>(), - Common::ScratchBuffer<u8>(), + + ASSERT_OR_EXECUTE_MSG( + BufferDescriptorX().size() > buffer_index, { return {}; }, + "BufferDescriptorX invalid buffer_index {}", buffer_index); + auto& read_buffer = read_buffer_x[buffer_index]; + return read_buffer.Read(BufferDescriptorX()[buffer_index].Address(), + BufferDescriptorX()[buffer_index].Size(), + &read_buffer_data_x[buffer_index]); +} + +std::span<const u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const { + static thread_local std::array read_buffer_a{ + Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), + Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), + }; + static thread_local std::array read_buffer_x{ + Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), + Core::Memory::CpuGuestMemory<u8, Core::Memory::GuestMemoryFlags::SafeRead>(memory, 0, 0), }; const bool is_buffer_a{BufferDescriptorA().size() > buffer_index && BufferDescriptorA()[buffer_index].Size()}; + const bool is_buffer_x{BufferDescriptorX().size() > buffer_index && + BufferDescriptorX()[buffer_index].Size()}; + + if (is_buffer_a && is_buffer_x) { + LOG_WARNING(Input, "Both buffer descriptors are available a.size={}, x.size={}", + BufferDescriptorA()[buffer_index].Size(), + BufferDescriptorX()[buffer_index].Size()); + } + if (is_buffer_a) { ASSERT_OR_EXECUTE_MSG( BufferDescriptorA().size() > buffer_index, { return {}; }, diff --git a/src/core/hle/service/hle_ipc.h b/src/core/hle/service/hle_ipc.h index 4bd24c899..ad5259a5c 100644 --- a/src/core/hle/service/hle_ipc.h +++ b/src/core/hle/service/hle_ipc.h @@ -253,6 +253,12 @@ public: return domain_message_header.has_value(); } + /// Helper function to get a span of a buffer using the buffer descriptor A + [[nodiscard]] std::span<const u8> ReadBufferA(std::size_t buffer_index = 0) const; + + /// Helper function to get a span of a buffer using the buffer descriptor X + [[nodiscard]] std::span<const u8> ReadBufferX(std::size_t buffer_index = 0) const; + /// Helper function to get a span of a buffer using the appropriate buffer descriptor [[nodiscard]] std::span<const u8> ReadBuffer(std::size_t buffer_index = 0) const; diff --git a/src/core/hle/service/mii/types/core_data.cpp b/src/core/hle/service/mii/types/core_data.cpp index 970c748ca..ba1da76ba 100644 --- a/src/core/hle/service/mii/types/core_data.cpp +++ b/src/core/hle/service/mii/types/core_data.cpp @@ -41,6 +41,7 @@ void CoreData::BuildRandom(Age age, Gender gender, Race race) { } } + SetDefault(); SetGender(gender); SetFavoriteColor(MiiUtil::GetRandomValue(FavoriteColor::Max)); SetRegionMove(0); diff --git a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp index 469a53244..2e29bc848 100644 --- a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp +++ b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp @@ -46,7 +46,7 @@ Result AllocateIoForProcessAddressSpace(Common::ProcessAddress* out_map_address, // Get bounds of where mapping is possible. const VAddr alias_code_begin = GetInteger(page_table.GetAliasCodeRegionStart()); const VAddr alias_code_size = page_table.GetAliasCodeRegionSize() / YUZU_PAGESIZE; - const auto state = Kernel::KMemoryState::Io; + const auto state = Kernel::KMemoryState::IoMemory; const auto perm = Kernel::KMemoryPermission::UserReadWrite; std::mt19937_64 rng{process->GetRandomEntropy(0)}; diff --git a/src/core/hle/service/prepo/prepo.cpp b/src/core/hle/service/prepo/prepo.cpp index ec4a84989..14e8df63a 100644 --- a/src/core/hle/service/prepo/prepo.cpp +++ b/src/core/hle/service/prepo/prepo.cpp @@ -58,14 +58,8 @@ private: IPC::RequestParser rp{ctx}; const auto process_id = rp.PopRaw<u64>(); - const auto data1 = ctx.ReadBuffer(0); - const auto data2 = [&ctx] { - if (ctx.CanReadBuffer(1)) { - return ctx.ReadBuffer(1); - } - - return std::span<const u8>{}; - }(); + const auto data1 = ctx.ReadBufferA(0); + const auto data2 = ctx.ReadBufferX(0); LOG_DEBUG(Service_PREPO, "called, type={:02X}, process_id={:016X}, data1_size={:016X}, data2_size={:016X}", @@ -85,14 +79,8 @@ private: const auto user_id = rp.PopRaw<u128>(); const auto process_id = rp.PopRaw<u64>(); - const auto data1 = ctx.ReadBuffer(0); - const auto data2 = [&ctx] { - if (ctx.CanReadBuffer(1)) { - return ctx.ReadBuffer(1); - } - - return std::span<const u8>{}; - }(); + const auto data1 = ctx.ReadBufferA(0); + const auto data2 = ctx.ReadBufferX(0); LOG_DEBUG(Service_PREPO, "called, type={:02X}, user_id={:016X}{:016X}, process_id={:016X}, " @@ -137,14 +125,8 @@ private: IPC::RequestParser rp{ctx}; const auto title_id = rp.PopRaw<u64>(); - const auto data1 = ctx.ReadBuffer(0); - const auto data2 = [&ctx] { - if (ctx.CanReadBuffer(1)) { - return ctx.ReadBuffer(1); - } - - return std::span<const u8>{}; - }(); + const auto data1 = ctx.ReadBufferA(0); + const auto data2 = ctx.ReadBufferX(0); LOG_DEBUG(Service_PREPO, "called, title_id={:016X}, data1_size={:016X}, data2_size={:016X}", title_id, data1.size(), data2.size()); @@ -161,14 +143,8 @@ private: const auto user_id = rp.PopRaw<u128>(); const auto title_id = rp.PopRaw<u64>(); - const auto data1 = ctx.ReadBuffer(0); - const auto data2 = [&ctx] { - if (ctx.CanReadBuffer(1)) { - return ctx.ReadBuffer(1); - } - - return std::span<const u8>{}; - }(); + const auto data1 = ctx.ReadBufferA(0); + const auto data2 = ctx.ReadBufferX(0); LOG_DEBUG(Service_PREPO, "called, user_id={:016X}{:016X}, title_id={:016X}, data1_size={:016X}, " diff --git a/src/tests/common/unique_function.cpp b/src/tests/common/unique_function.cpp index f7a23e876..42e6ade09 100644 --- a/src/tests/common/unique_function.cpp +++ b/src/tests/common/unique_function.cpp @@ -46,8 +46,8 @@ TEST_CASE("UniqueFunction", "[common]") { Noisy noisy; REQUIRE(noisy.state == "Default constructed"); - Common::UniqueFunction<void> func = [noisy = std::move(noisy)] { - REQUIRE(noisy.state == "Move constructed"); + Common::UniqueFunction<void> func = [noisy_inner = std::move(noisy)] { + REQUIRE(noisy_inner.state == "Move constructed"); }; REQUIRE(noisy.state == "Moved away"); func(); @@ -101,7 +101,7 @@ TEST_CASE("UniqueFunction", "[common]") { }; Foo object{&num_destroyed}; { - Common::UniqueFunction<void> func = [object = std::move(object)] {}; + Common::UniqueFunction<void> func = [object_inner = std::move(object)] {}; REQUIRE(num_destroyed == 0); } REQUIRE(num_destroyed == 1); diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h index 805a89900..c0e6471fe 100644 --- a/src/video_core/fence_manager.h +++ b/src/video_core/fence_manager.h @@ -86,7 +86,10 @@ public: uncommitted_operations.emplace_back(std::move(func)); } pending_operations.emplace_back(std::move(uncommitted_operations)); - QueueFence(new_fence); + { + std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + QueueFence(new_fence); + } if (!delay_fence) { func(); } diff --git a/src/video_core/host_shaders/CMakeLists.txt b/src/video_core/host_shaders/CMakeLists.txt index 8bb429578..cd2549232 100644 --- a/src/video_core/host_shaders/CMakeLists.txt +++ b/src/video_core/host_shaders/CMakeLists.txt @@ -19,6 +19,7 @@ set(SHADER_FILES block_linear_unswizzle_2d.comp block_linear_unswizzle_3d.comp convert_abgr8_to_d24s8.frag + convert_abgr8_to_d32f.frag convert_d32f_to_abgr8.frag convert_d24s8_to_abgr8.frag convert_depth_to_float.frag diff --git a/src/video_core/host_shaders/convert_abgr8_to_d32f.frag b/src/video_core/host_shaders/convert_abgr8_to_d32f.frag new file mode 100644 index 000000000..095b910c2 --- /dev/null +++ b/src/video_core/host_shaders/convert_abgr8_to_d32f.frag @@ -0,0 +1,15 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#version 450 + +layout(binding = 0) uniform sampler2D color_texture; + +void main() { + ivec2 coord = ivec2(gl_FragCoord.xy); + vec4 color = texelFetch(color_texture, coord, 0).abgr; + + float value = color.a * (color.r + color.g + color.b) / 3.0f; + + gl_FragDepth = value; +} diff --git a/src/video_core/host_shaders/convert_d32f_to_abgr8.frag b/src/video_core/host_shaders/convert_d32f_to_abgr8.frag index 04cfef8b5..4e5a9f955 100644 --- a/src/video_core/host_shaders/convert_d32f_to_abgr8.frag +++ b/src/video_core/host_shaders/convert_d32f_to_abgr8.frag @@ -9,6 +9,6 @@ layout(location = 0) out vec4 color; void main() { ivec2 coord = ivec2(gl_FragCoord.xy); - float depth = textureLod(depth_tex, coord, 0).r; + float depth = texelFetch(depth_tex, coord, 0).r; color = vec4(depth, depth, depth, 1.0); } diff --git a/src/video_core/renderer_vulkan/blit_image.cpp b/src/video_core/renderer_vulkan/blit_image.cpp index f01d2394e..c3db09424 100644 --- a/src/video_core/renderer_vulkan/blit_image.cpp +++ b/src/video_core/renderer_vulkan/blit_image.cpp @@ -8,6 +8,7 @@ #include "common/settings.h" #include "video_core/host_shaders/blit_color_float_frag_spv.h" #include "video_core/host_shaders/convert_abgr8_to_d24s8_frag_spv.h" +#include "video_core/host_shaders/convert_abgr8_to_d32f_frag_spv.h" #include "video_core/host_shaders/convert_d24s8_to_abgr8_frag_spv.h" #include "video_core/host_shaders/convert_d32f_to_abgr8_frag_spv.h" #include "video_core/host_shaders/convert_depth_to_float_frag_spv.h" @@ -434,6 +435,7 @@ BlitImageHelper::BlitImageHelper(const Device& device_, Scheduler& scheduler_, convert_depth_to_float_frag(BuildShader(device, CONVERT_DEPTH_TO_FLOAT_FRAG_SPV)), convert_float_to_depth_frag(BuildShader(device, CONVERT_FLOAT_TO_DEPTH_FRAG_SPV)), convert_abgr8_to_d24s8_frag(BuildShader(device, CONVERT_ABGR8_TO_D24S8_FRAG_SPV)), + convert_abgr8_to_d32f_frag(BuildShader(device, CONVERT_ABGR8_TO_D32F_FRAG_SPV)), convert_d32f_to_abgr8_frag(BuildShader(device, CONVERT_D32F_TO_ABGR8_FRAG_SPV)), convert_d24s8_to_abgr8_frag(BuildShader(device, CONVERT_D24S8_TO_ABGR8_FRAG_SPV)), convert_s8d24_to_abgr8_frag(BuildShader(device, CONVERT_S8D24_TO_ABGR8_FRAG_SPV)), @@ -559,6 +561,13 @@ void BlitImageHelper::ConvertABGR8ToD24S8(const Framebuffer* dst_framebuffer, Convert(*convert_abgr8_to_d24s8_pipeline, dst_framebuffer, src_image_view); } +void BlitImageHelper::ConvertABGR8ToD32F(const Framebuffer* dst_framebuffer, + const ImageView& src_image_view) { + ConvertPipelineDepthTargetEx(convert_abgr8_to_d32f_pipeline, dst_framebuffer->RenderPass(), + convert_abgr8_to_d32f_frag); + Convert(*convert_abgr8_to_d32f_pipeline, dst_framebuffer, src_image_view); +} + void BlitImageHelper::ConvertD32FToABGR8(const Framebuffer* dst_framebuffer, ImageView& src_image_view) { ConvertPipelineColorTargetEx(convert_d32f_to_abgr8_pipeline, dst_framebuffer->RenderPass(), diff --git a/src/video_core/renderer_vulkan/blit_image.h b/src/video_core/renderer_vulkan/blit_image.h index a032c71fb..b2104a59e 100644 --- a/src/video_core/renderer_vulkan/blit_image.h +++ b/src/video_core/renderer_vulkan/blit_image.h @@ -67,6 +67,8 @@ public: void ConvertABGR8ToD24S8(const Framebuffer* dst_framebuffer, const ImageView& src_image_view); + void ConvertABGR8ToD32F(const Framebuffer* dst_framebuffer, const ImageView& src_image_view); + void ConvertD32FToABGR8(const Framebuffer* dst_framebuffer, ImageView& src_image_view); void ConvertD24S8ToABGR8(const Framebuffer* dst_framebuffer, ImageView& src_image_view); @@ -130,6 +132,7 @@ private: vk::ShaderModule convert_depth_to_float_frag; vk::ShaderModule convert_float_to_depth_frag; vk::ShaderModule convert_abgr8_to_d24s8_frag; + vk::ShaderModule convert_abgr8_to_d32f_frag; vk::ShaderModule convert_d32f_to_abgr8_frag; vk::ShaderModule convert_d24s8_to_abgr8_frag; vk::ShaderModule convert_s8d24_to_abgr8_frag; @@ -149,6 +152,7 @@ private: vk::Pipeline convert_d16_to_r16_pipeline; vk::Pipeline convert_r16_to_d16_pipeline; vk::Pipeline convert_abgr8_to_d24s8_pipeline; + vk::Pipeline convert_abgr8_to_d32f_pipeline; vk::Pipeline convert_d32f_to_abgr8_pipeline; vk::Pipeline convert_d24s8_to_abgr8_pipeline; vk::Pipeline convert_s8d24_to_abgr8_pipeline; diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp index c4c30d807..7e7a80740 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp +++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp @@ -132,12 +132,16 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { const bool use_accelerated = rasterizer.AccelerateDisplay(*framebuffer, framebuffer_addr, framebuffer->stride); const bool is_srgb = use_accelerated && screen_info.is_srgb; - RenderScreenshot(*framebuffer, use_accelerated); - Frame* frame = present_manager.GetRenderFrame(); - blit_screen.DrawToSwapchain(frame, *framebuffer, use_accelerated, is_srgb); - scheduler.Flush(*frame->render_ready); - present_manager.Present(frame); + { + std::scoped_lock lock{rasterizer.LockCaches()}; + RenderScreenshot(*framebuffer, use_accelerated); + + Frame* frame = present_manager.GetRenderFrame(); + blit_screen.DrawToSwapchain(frame, *framebuffer, use_accelerated, is_srgb); + scheduler.Flush(*frame->render_ready); + present_manager.Present(frame); + } gpu.RendererFrameEndNotify(); rasterizer.TickFrame(); diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 61d03daae..465eac37e 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -198,7 +198,7 @@ void RasterizerVulkan::PrepareDraw(bool is_indexed, Func&& draw_func) { if (!pipeline) { return; } - std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + std::scoped_lock lock{LockCaches()}; // update engine as channel may be different. pipeline->SetEngine(maxwell3d, gpu_memory); pipeline->Configure(is_indexed); @@ -708,6 +708,7 @@ void RasterizerVulkan::TiledCacheBarrier() { } void RasterizerVulkan::FlushCommands() { + std::scoped_lock lock{LockCaches()}; if (draw_counter == 0) { return; } @@ -805,6 +806,7 @@ void RasterizerVulkan::FlushWork() { if ((++draw_counter & 7) != 7) { return; } + std::scoped_lock lock{LockCaches()}; if (draw_counter < DRAWS_TO_DISPATCH) { // Send recorded tasks to the worker thread scheduler.DispatchWork(); @@ -1499,7 +1501,7 @@ void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs) void RasterizerVulkan::InitializeChannel(Tegra::Control::ChannelState& channel) { CreateChannel(channel); { - std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + std::scoped_lock lock{LockCaches()}; texture_cache.CreateChannel(channel); buffer_cache.CreateChannel(channel); } @@ -1512,7 +1514,7 @@ void RasterizerVulkan::BindChannel(Tegra::Control::ChannelState& channel) { const s32 channel_id = channel.bind_id; BindToChannel(channel_id); { - std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + std::scoped_lock lock{LockCaches()}; texture_cache.BindToChannel(channel_id); buffer_cache.BindToChannel(channel_id); } @@ -1525,7 +1527,7 @@ void RasterizerVulkan::BindChannel(Tegra::Control::ChannelState& channel) { void RasterizerVulkan::ReleaseChannel(s32 channel_id) { EraseChannel(channel_id); { - std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; + std::scoped_lock lock{LockCaches()}; texture_cache.EraseChannel(channel_id); buffer_cache.EraseChannel(channel_id); } diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h index ad069556c..ce3dfbaab 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.h +++ b/src/video_core/renderer_vulkan/vk_rasterizer.h @@ -133,6 +133,10 @@ public: void ReleaseChannel(s32 channel_id) override; + std::scoped_lock<std::recursive_mutex, std::recursive_mutex> LockCaches() { + return std::scoped_lock{buffer_cache.mutex, texture_cache.mutex}; + } + private: static constexpr size_t MAX_TEXTURES = 192; static constexpr size_t MAX_IMAGES = 48; diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp index 93773a69f..de34f6d49 100644 --- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp @@ -1194,6 +1194,11 @@ void TextureCacheRuntime::ConvertImage(Framebuffer* dst, ImageView& dst_view, Im return blit_image_helper.ConvertD16ToR16(dst, src_view); } break; + case PixelFormat::A8B8G8R8_SRGB: + if (src_view.format == PixelFormat::D32_FLOAT) { + return blit_image_helper.ConvertD32FToABGR8(dst, src_view); + } + break; case PixelFormat::A8B8G8R8_UNORM: if (src_view.format == PixelFormat::S8_UINT_D24_UNORM) { return blit_image_helper.ConvertD24S8ToABGR8(dst, src_view); @@ -1205,6 +1210,16 @@ void TextureCacheRuntime::ConvertImage(Framebuffer* dst, ImageView& dst_view, Im return blit_image_helper.ConvertD32FToABGR8(dst, src_view); } break; + case PixelFormat::B8G8R8A8_SRGB: + if (src_view.format == PixelFormat::D32_FLOAT) { + return blit_image_helper.ConvertD32FToABGR8(dst, src_view); + } + break; + case PixelFormat::B8G8R8A8_UNORM: + if (src_view.format == PixelFormat::D32_FLOAT) { + return blit_image_helper.ConvertD32FToABGR8(dst, src_view); + } + break; case PixelFormat::R32_FLOAT: if (src_view.format == PixelFormat::D32_FLOAT) { return blit_image_helper.ConvertD32ToR32(dst, src_view); @@ -1222,6 +1237,12 @@ void TextureCacheRuntime::ConvertImage(Framebuffer* dst, ImageView& dst_view, Im } break; case PixelFormat::D32_FLOAT: + if (src_view.format == PixelFormat::A8B8G8R8_UNORM || + src_view.format == PixelFormat::B8G8R8A8_UNORM || + src_view.format == PixelFormat::A8B8G8R8_SRGB || + src_view.format == PixelFormat::B8G8R8A8_SRGB) { + return blit_image_helper.ConvertABGR8ToD32F(dst, src_view); + } if (src_view.format == PixelFormat::R32_FLOAT) { return blit_image_helper.ConvertR32ToD32(dst, src_view); } @@ -2034,7 +2055,7 @@ void TextureCacheRuntime::TransitionImageLayout(Image& image) { }, }; scheduler.RequestOutsideRenderPassOperationContext(); - scheduler.Record([barrier = barrier](vk::CommandBuffer cmdbuf) { + scheduler.Record([barrier](vk::CommandBuffer cmdbuf) { cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, barrier); }); |