summaryrefslogtreecommitdiffstats
path: root/src/core
diff options
context:
space:
mode:
authorbunnei <bunneidev@gmail.com>2022-02-03 01:58:55 +0100
committerGitHub <noreply@github.com>2022-02-03 01:58:55 +0100
commit03186af6a156a7d46aa1fa9d7b4f9ebbb221d4aa (patch)
tree40b52d9a20f917144653ce926bbdb827f63e9180 /src/core
parentMerge pull request #7838 from lioncash/noncopy (diff)
parenthle: kernel: KPageTable: Migrate locks to KScopedLightLock. (diff)
downloadyuzu-03186af6a156a7d46aa1fa9d7b4f9ebbb221d4aa.tar
yuzu-03186af6a156a7d46aa1fa9d7b4f9ebbb221d4aa.tar.gz
yuzu-03186af6a156a7d46aa1fa9d7b4f9ebbb221d4aa.tar.bz2
yuzu-03186af6a156a7d46aa1fa9d7b4f9ebbb221d4aa.tar.lz
yuzu-03186af6a156a7d46aa1fa9d7b4f9ebbb221d4aa.tar.xz
yuzu-03186af6a156a7d46aa1fa9d7b4f9ebbb221d4aa.tar.zst
yuzu-03186af6a156a7d46aa1fa9d7b4f9ebbb221d4aa.zip
Diffstat (limited to 'src/core')
-rw-r--r--src/core/hle/kernel/k_page_table.cpp65
-rw-r--r--src/core/hle/kernel/k_page_table.h15
2 files changed, 46 insertions, 34 deletions
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 393214082..912853e5c 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -61,7 +61,8 @@ constexpr std::size_t GetSizeInRange(const KMemoryInfo& info, VAddr start, VAddr
} // namespace
-KPageTable::KPageTable(Core::System& system_) : system{system_} {}
+KPageTable::KPageTable(Core::System& system_)
+ : general_lock{system_.Kernel()}, map_physical_memory_lock{system_.Kernel()}, system{system_} {}
KPageTable::~KPageTable() = default;
@@ -284,7 +285,7 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory
R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
// Lock the table.
- std::lock_guard lock{page_table_lock};
+ KScopedLightLock lk(general_lock);
// Verify that the destination memory is unmapped.
R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
@@ -302,7 +303,7 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory
}
ResultCode KPageTable::MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
- std::lock_guard lock{page_table_lock};
+ KScopedLightLock lk(general_lock);
const std::size_t num_pages{size / PageSize};
@@ -339,7 +340,7 @@ ResultCode KPageTable::MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t
}
ResultCode KPageTable::UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
- std::lock_guard lock{page_table_lock};
+ KScopedLightLock lk(general_lock);
if (!size) {
return ResultSuccess;
@@ -373,7 +374,7 @@ ResultCode KPageTable::UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size
ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
KPageTable& src_page_table, VAddr src_addr) {
- std::lock_guard lock{page_table_lock};
+ KScopedLightLock lk(general_lock);
const std::size_t num_pages{size / PageSize};
@@ -401,10 +402,10 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
// Lock the physical memory lock.
- std::lock_guard phys_lk(map_physical_memory_lock);
+ KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
// Lock the table.
- std::lock_guard lock{page_table_lock};
+ KScopedLightLock lk(general_lock);
std::size_t mapped_size{};
const VAddr end_addr{addr + size};
@@ -480,7 +481,11 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
}
ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) {
- std::lock_guard lock{page_table_lock};
+ // Lock the physical memory lock.
+ KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
+
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
const VAddr end_addr{addr + size};
ResultCode result{ResultSuccess};
@@ -542,7 +547,7 @@ ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) {
}
ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
- std::lock_guard lock{page_table_lock};
+ KScopedLightLock lk(general_lock);
KMemoryState src_state{};
CASCADE_CODE(CheckMemoryState(
@@ -581,7 +586,7 @@ ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t siz
}
ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
- std::lock_guard lock{page_table_lock};
+ KScopedLightLock lk(general_lock);
KMemoryState src_state{};
CASCADE_CODE(CheckMemoryState(
@@ -624,6 +629,8 @@ ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t s
ResultCode KPageTable::MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
KMemoryPermission perm) {
+ ASSERT(this->IsLockedByCurrentThread());
+
VAddr cur_addr{addr};
for (const auto& node : page_linked_list.Nodes()) {
@@ -652,7 +659,7 @@ ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list
R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
// Lock the table.
- std::lock_guard lock{page_table_lock};
+ KScopedLightLock lk(general_lock);
// Check the memory state.
R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free,
@@ -669,6 +676,8 @@ ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list
}
ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) {
+ ASSERT(this->IsLockedByCurrentThread());
+
VAddr cur_addr{addr};
for (const auto& node : page_linked_list.Nodes()) {
@@ -693,7 +702,7 @@ ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
// Lock the table.
- std::lock_guard lock{page_table_lock};
+ KScopedLightLock lk(general_lock);
// Check the memory state.
R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, state, KMemoryPermission::None,
@@ -714,7 +723,7 @@ ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
const size_t num_pages = size / PageSize;
// Lock the table.
- std::lock_guard lock{page_table_lock};
+ KScopedLightLock lk(general_lock);
// Verify we can change the memory permission.
KMemoryState old_state;
@@ -768,7 +777,7 @@ ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
}
KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) {
- std::lock_guard lock{page_table_lock};
+ KScopedLightLock lk(general_lock);
return block_manager->FindBlock(addr).GetMemoryInfo();
}
@@ -783,7 +792,7 @@ KMemoryInfo KPageTable::QueryInfo(VAddr addr) {
}
ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) {
- std::lock_guard lock{page_table_lock};
+ KScopedLightLock lk(general_lock);
KMemoryState state{};
KMemoryAttribute attribute{};
@@ -801,7 +810,7 @@ ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemo
}
ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
- std::lock_guard lock{page_table_lock};
+ KScopedLightLock lk(general_lock);
KMemoryState state{};
@@ -820,7 +829,7 @@ ResultCode KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
const size_t num_pages = size / PageSize;
// Lock the table.
- std::lock_guard lock{page_table_lock};
+ KScopedLightLock lk(general_lock);
// Verify we can change the memory permission.
KMemoryState old_state;
@@ -849,7 +858,7 @@ ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask
KMemoryAttribute::SetMask);
// Lock the table.
- std::lock_guard lock{page_table_lock};
+ KScopedLightLock lk(general_lock);
// Verify we can change the memory attribute.
KMemoryState old_state;
@@ -880,7 +889,7 @@ ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask
ResultCode KPageTable::SetMaxHeapSize(std::size_t size) {
// Lock the table.
- std::lock_guard lock{page_table_lock};
+ KScopedLightLock lk(general_lock);
// Only process page tables are allowed to set heap size.
ASSERT(!this->IsKernel());
@@ -891,15 +900,15 @@ ResultCode KPageTable::SetMaxHeapSize(std::size_t size) {
}
ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
- // Lock the physical memory lock.
- std::lock_guard phys_lk(map_physical_memory_lock);
+ // Lock the physical memory mutex.
+ KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
// Try to perform a reduction in heap, instead of an extension.
VAddr cur_address{};
std::size_t allocation_size{};
{
// Lock the table.
- std::lock_guard lk(page_table_lock);
+ KScopedLightLock lk(general_lock);
// Validate that setting heap size is possible at all.
R_UNLESS(!is_kernel, ResultOutOfMemory);
@@ -964,7 +973,7 @@ ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
// Map the pages.
{
// Lock the table.
- std::lock_guard lk(page_table_lock);
+ KScopedLightLock lk(general_lock);
// Ensure that the heap hasn't changed since we began executing.
ASSERT(cur_address == current_heap_end);
@@ -1006,7 +1015,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages,
bool is_map_only, VAddr region_start,
std::size_t region_num_pages, KMemoryState state,
KMemoryPermission perm, PAddr map_addr) {
- std::lock_guard lock{page_table_lock};
+ KScopedLightLock lk(general_lock);
if (!CanContain(region_start, region_num_pages * PageSize, state)) {
return ResultInvalidCurrentMemory;
@@ -1037,7 +1046,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages,
}
ResultCode KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
- std::lock_guard lock{page_table_lock};
+ KScopedLightLock lk(general_lock);
KMemoryPermission perm{};
if (const ResultCode result{CheckMemoryState(
@@ -1060,7 +1069,7 @@ ResultCode KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
}
ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) {
- std::lock_guard lock{page_table_lock};
+ KScopedLightLock lk(general_lock);
KMemoryPermission perm{};
if (const ResultCode result{CheckMemoryState(
@@ -1083,7 +1092,7 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size)
}
ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) {
- std::lock_guard lock{page_table_lock};
+ KScopedLightLock lk(general_lock);
KMemoryPermission new_perm = KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite;
@@ -1110,7 +1119,7 @@ ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) {
}
ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) {
- std::lock_guard lock{page_table_lock};
+ KScopedLightLock lk(general_lock);
KMemoryPermission new_perm = KMemoryPermission::UserReadWrite;
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index ecae939a0..c98887d34 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -5,12 +5,12 @@
#pragma once
#include <memory>
-#include <mutex>
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/page_table.h"
#include "core/file_sys/program_metadata.h"
+#include "core/hle/kernel/k_light_lock.h"
#include "core/hle/kernel/k_memory_block.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/result.h"
@@ -147,11 +147,12 @@ private:
}
bool IsLockedByCurrentThread() const {
- return true;
+ return general_lock.IsLockedByCurrentThread();
}
- std::recursive_mutex page_table_lock;
- std::mutex map_physical_memory_lock;
+ mutable KLightLock general_lock;
+ mutable KLightLock map_physical_memory_lock;
+
std::unique_ptr<KMemoryBlockManager> block_manager;
public:
@@ -210,7 +211,7 @@ public:
return alias_code_region_end - alias_code_region_start;
}
size_t GetNormalMemorySize() {
- std::lock_guard lk(page_table_lock);
+ KScopedLightLock lk(general_lock);
return GetHeapSize() + mapped_physical_memory_size;
}
constexpr std::size_t GetAddressSpaceWidth() const {
@@ -252,7 +253,9 @@ public:
constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const {
return !IsOutsideASLRRegion(address, size);
}
- constexpr PAddr GetPhysicalAddr(VAddr addr) {
+
+ PAddr GetPhysicalAddr(VAddr addr) {
+ ASSERT(IsLockedByCurrentThread());
const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits];
ASSERT(backing_addr);
return backing_addr + addr;