diff options
Diffstat (limited to 'src/core/hle/kernel')
48 files changed, 3380 insertions, 1485 deletions
diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index 5b3feec66..e4f43a053 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp @@ -19,6 +19,7 @@ #include "core/hle/kernel/k_server_session.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/service_thread.h" #include "core/memory.h" namespace Kernel { @@ -56,16 +57,103 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co } } +Result SessionRequestManager::CompleteSyncRequest(KServerSession* server_session, + HLERequestContext& context) { + Result result = ResultSuccess; + + // If the session has been converted to a domain, handle the domain request + if (this->HasSessionRequestHandler(context)) { + if (IsDomain() && context.HasDomainMessageHeader()) { + result = HandleDomainSyncRequest(server_session, context); + // If there is no domain header, the regular session handler is used + } else if (this->HasSessionHandler()) { + // If this manager has an associated HLE handler, forward the request to it. + result = this->SessionHandler().HandleSyncRequest(*server_session, context); + } + } else { + ASSERT_MSG(false, "Session handler is invalid, stubbing response!"); + IPC::ResponseBuilder rb(context, 2); + rb.Push(ResultSuccess); + } + + if (convert_to_domain) { + ASSERT_MSG(!IsDomain(), "ServerSession is already a domain instance."); + this->ConvertToDomain(); + convert_to_domain = false; + } + + return result; +} + +Result SessionRequestManager::HandleDomainSyncRequest(KServerSession* server_session, + HLERequestContext& context) { + if (!context.HasDomainMessageHeader()) { + return ResultSuccess; + } + + // Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs + context.SetSessionRequestManager(server_session->GetSessionRequestManager()); + + // If there is a DomainMessageHeader, then this is CommandType "Request" + const auto& domain_message_header = context.GetDomainMessageHeader(); + const u32 object_id{domain_message_header.object_id}; + switch (domain_message_header.command) { + case IPC::DomainMessageHeader::CommandType::SendMessage: + if (object_id > this->DomainHandlerCount()) { + LOG_CRITICAL(IPC, + "object_id {} is too big! This probably means a recent service call " + "needed to return a new interface!", + object_id); + ASSERT(false); + return ResultSuccess; // Ignore error if asserts are off + } + if (auto strong_ptr = this->DomainHandler(object_id - 1).lock()) { + return strong_ptr->HandleSyncRequest(*server_session, context); + } else { + ASSERT(false); + return ResultSuccess; + } + + case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: { + LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id); + + this->CloseDomainHandler(object_id - 1); + + IPC::ResponseBuilder rb{context, 2}; + rb.Push(ResultSuccess); + return ResultSuccess; + } + } + + LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value()); + ASSERT(false); + return ResultSuccess; +} + +Result SessionRequestManager::QueueSyncRequest(KSession* parent, + std::shared_ptr<HLERequestContext>&& context) { + // Ensure we have a session request handler + if (this->HasSessionRequestHandler(*context)) { + if (auto strong_ptr = this->GetServiceThread().lock()) { + strong_ptr->QueueSyncRequest(*parent, std::move(context)); + } else { + ASSERT_MSG(false, "strong_ptr is nullptr!"); + } + } else { + ASSERT_MSG(false, "handler is invalid!"); + } + + return ResultSuccess; +} + void SessionRequestHandler::ClientConnected(KServerSession* session) { - session->ClientConnected(shared_from_this()); + session->GetSessionRequestManager()->SetSessionHandler(shared_from_this()); // Ensure our server session is tracked globally. kernel.RegisterServerObject(session); } -void SessionRequestHandler::ClientDisconnected(KServerSession* session) { - session->ClientDisconnected(); -} +void SessionRequestHandler::ClientDisconnected(KServerSession* session) {} HLERequestContext::HLERequestContext(KernelCore& kernel_, Core::Memory::Memory& memory_, KServerSession* server_session_, KThread* thread_) @@ -126,7 +214,7 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32 // Padding to align to 16 bytes rp.AlignWithPadding(); - if (Session()->IsDomain() && + if (Session()->GetSessionRequestManager()->IsDomain() && ((command_header->type == IPC::CommandType::Request || command_header->type == IPC::CommandType::RequestWithContext) || !incoming)) { @@ -135,7 +223,7 @@ void HLERequestContext::ParseCommandBuffer(const KHandleTable& handle_table, u32 if (incoming || domain_message_header) { domain_message_header = rp.PopRaw<IPC::DomainMessageHeader>(); } else { - if (Session()->IsDomain()) { + if (Session()->GetSessionRequestManager()->IsDomain()) { LOG_WARNING(IPC, "Domain request has no DomainMessageHeader!"); } } @@ -228,12 +316,12 @@ Result HLERequestContext::WriteToOutgoingCommandBuffer(KThread& requesting_threa // Write the domain objects to the command buffer, these go after the raw untranslated data. // TODO(Subv): This completely ignores C buffers. - if (Session()->IsDomain()) { + if (server_session->GetSessionRequestManager()->IsDomain()) { current_offset = domain_offset - static_cast<u32>(outgoing_domain_objects.size()); - for (const auto& object : outgoing_domain_objects) { - server_session->AppendDomainHandler(object); - cmd_buf[current_offset++] = - static_cast<u32_le>(server_session->NumDomainRequestHandlers()); + for (auto& object : outgoing_domain_objects) { + server_session->GetSessionRequestManager()->AppendDomainHandler(std::move(object)); + cmd_buf[current_offset++] = static_cast<u32_le>( + server_session->GetSessionRequestManager()->DomainHandlerCount()); } } diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index 99265ce90..a0522bca0 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h @@ -43,13 +43,13 @@ class Domain; class HLERequestContext; class KAutoObject; class KernelCore; +class KEvent; class KHandleTable; class KProcess; class KServerSession; class KThread; class KReadableEvent; class KSession; -class KWritableEvent; class ServiceThread; enum class ThreadWakeupReason; @@ -121,6 +121,10 @@ public: is_domain = true; } + void ConvertToDomainOnRequestEnd() { + convert_to_domain = true; + } + std::size_t DomainHandlerCount() const { return domain_handlers.size(); } @@ -164,7 +168,12 @@ public: bool HasSessionRequestHandler(const HLERequestContext& context) const; + Result HandleDomainSyncRequest(KServerSession* server_session, HLERequestContext& context); + Result CompleteSyncRequest(KServerSession* server_session, HLERequestContext& context); + Result QueueSyncRequest(KSession* parent, std::shared_ptr<HLERequestContext>&& context); + private: + bool convert_to_domain{}; bool is_domain{}; SessionRequestHandlerPtr session_handler; std::vector<SessionRequestHandlerPtr> domain_handlers; diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp index 9b6b284d0..477e4e407 100644 --- a/src/core/hle/kernel/init/init_slab_setup.cpp +++ b/src/core/hle/kernel/init/init_slab_setup.cpp @@ -18,6 +18,7 @@ #include "core/hle/kernel/k_process.h" #include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/k_session.h" +#include "core/hle/kernel/k_session_request.h" #include "core/hle/kernel/k_shared_memory.h" #include "core/hle/kernel/k_shared_memory_info.h" #include "core/hle/kernel/k_system_control.h" @@ -34,6 +35,7 @@ namespace Kernel::Init { HANDLER(KThread, (SLAB_COUNT(KThread)), ##__VA_ARGS__) \ HANDLER(KEvent, (SLAB_COUNT(KEvent)), ##__VA_ARGS__) \ HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \ + HANDLER(KSessionRequest, (SLAB_COUNT(KSession) * 2), ##__VA_ARGS__) \ HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \ HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ##__VA_ARGS__) \ HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \ @@ -94,8 +96,8 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd // TODO(bunnei): Fix this once we support the kernel virtual memory layout. if (size > 0) { - void* backing_kernel_memory{ - system.DeviceMemory().GetPointer(TranslateSlabAddrToPhysical(memory_layout, start))}; + void* backing_kernel_memory{system.DeviceMemory().GetPointer<void>( + TranslateSlabAddrToPhysical(memory_layout, start))}; const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1); ASSERT(region != nullptr); @@ -181,7 +183,7 @@ void InitializeKPageBufferSlabHeap(Core::System& system) { ASSERT(slab_address != 0); // Initialize the slabheap. - KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address), + KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address), slab_size); } diff --git a/src/core/hle/kernel/k_class_token.cpp b/src/core/hle/kernel/k_class_token.cpp index cc2a0f7ca..10265c23c 100644 --- a/src/core/hle/kernel/k_class_token.cpp +++ b/src/core/hle/kernel/k_class_token.cpp @@ -18,7 +18,6 @@ #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_transfer_memory.h" -#include "core/hle/kernel/k_writable_event.h" namespace Kernel { @@ -42,13 +41,12 @@ static_assert(ClassToken<KPort> == 0b10000101'00000000); static_assert(ClassToken<KSession> == 0b00011001'00000000); static_assert(ClassToken<KSharedMemory> == 0b00101001'00000000); static_assert(ClassToken<KEvent> == 0b01001001'00000000); -static_assert(ClassToken<KWritableEvent> == 0b10001001'00000000); // static_assert(ClassToken<KLightClientSession> == 0b00110001'00000000); // static_assert(ClassToken<KLightServerSession> == 0b01010001'00000000); -static_assert(ClassToken<KTransferMemory> == 0b10010001'00000000); +static_assert(ClassToken<KTransferMemory> == 0b01010001'00000000); // static_assert(ClassToken<KDeviceAddressSpace> == 0b01100001'00000000); // static_assert(ClassToken<KSessionRequest> == 0b10100001'00000000); -static_assert(ClassToken<KCodeMemory> == 0b11000001'00000000); +static_assert(ClassToken<KCodeMemory> == 0b10100001'00000000); // Ensure that the token hierarchy is correct. @@ -73,13 +71,12 @@ static_assert(ClassToken<KPort> == ((0b10000101 << 8) | ClassToken<KAutoObject>) static_assert(ClassToken<KSession> == ((0b00011001 << 8) | ClassToken<KAutoObject>)); static_assert(ClassToken<KSharedMemory> == ((0b00101001 << 8) | ClassToken<KAutoObject>)); static_assert(ClassToken<KEvent> == ((0b01001001 << 8) | ClassToken<KAutoObject>)); -static_assert(ClassToken<KWritableEvent> == ((0b10001001 << 8) | ClassToken<KAutoObject>)); // static_assert(ClassToken<KLightClientSession> == ((0b00110001 << 8) | ClassToken<KAutoObject>)); // static_assert(ClassToken<KLightServerSession> == ((0b01010001 << 8) | ClassToken<KAutoObject>)); -static_assert(ClassToken<KTransferMemory> == ((0b10010001 << 8) | ClassToken<KAutoObject>)); +static_assert(ClassToken<KTransferMemory> == ((0b01010001 << 8) | ClassToken<KAutoObject>)); // static_assert(ClassToken<KDeviceAddressSpace> == ((0b01100001 << 8) | ClassToken<KAutoObject>)); // static_assert(ClassToken<KSessionRequest> == ((0b10100001 << 8) | ClassToken<KAutoObject>)); -static_assert(ClassToken<KCodeMemory> == ((0b11000001 << 8) | ClassToken<KAutoObject>)); +static_assert(ClassToken<KCodeMemory> == ((0b10100001 << 8) | ClassToken<KAutoObject>)); // Ensure that the token hierarchy reflects the class hierarchy. @@ -110,7 +107,6 @@ static_assert(std::is_final_v<KPort> && std::is_base_of_v<KAutoObject, KPort>); static_assert(std::is_final_v<KSession> && std::is_base_of_v<KAutoObject, KSession>); static_assert(std::is_final_v<KSharedMemory> && std::is_base_of_v<KAutoObject, KSharedMemory>); static_assert(std::is_final_v<KEvent> && std::is_base_of_v<KAutoObject, KEvent>); -static_assert(std::is_final_v<KWritableEvent> && std::is_base_of_v<KAutoObject, KWritableEvent>); // static_assert(std::is_final_v<KLightClientSession> && // std::is_base_of_v<KAutoObject, KLightClientSession>); // static_assert(std::is_final_v<KLightServerSession> && diff --git a/src/core/hle/kernel/k_class_token.h b/src/core/hle/kernel/k_class_token.h index c9001ae3d..ab20e00ff 100644 --- a/src/core/hle/kernel/k_class_token.h +++ b/src/core/hle/kernel/k_class_token.h @@ -101,7 +101,6 @@ public: KSession, KSharedMemory, KEvent, - KWritableEvent, KLightClientSession, KLightServerSession, KTransferMemory, diff --git a/src/core/hle/kernel/k_client_session.cpp b/src/core/hle/kernel/k_client_session.cpp index b2a887b14..b4197a8d5 100644 --- a/src/core/hle/kernel/k_client_session.cpp +++ b/src/core/hle/kernel/k_client_session.cpp @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project // SPDX-License-Identifier: GPL-2.0-or-later +#include "common/scope_exit.h" #include "core/hle/kernel/hle_ipc.h" #include "core/hle/kernel/k_client_session.h" #include "core/hle/kernel/k_server_session.h" @@ -10,6 +11,8 @@ namespace Kernel { +static constexpr u32 MessageBufferSize = 0x100; + KClientSession::KClientSession(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {} KClientSession::~KClientSession() = default; @@ -21,10 +24,17 @@ void KClientSession::Destroy() { void KClientSession::OnServerClosed() {} -Result KClientSession::SendSyncRequest(KThread* thread, Core::Memory::Memory& memory, - Core::Timing::CoreTiming& core_timing) { - // Signal the server session that new data is available - return parent->GetServerSession().HandleSyncRequest(thread, memory, core_timing); +Result KClientSession::SendSyncRequest() { + // Create a session request. + KSessionRequest* request = KSessionRequest::Create(kernel); + R_UNLESS(request != nullptr, ResultOutOfResource); + SCOPE_EXIT({ request->Close(); }); + + // Initialize the request. + request->Initialize(nullptr, GetCurrentThread(kernel).GetTLSAddress(), MessageBufferSize); + + // Send the request. + return parent->GetServerSession().OnRequest(request); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_client_session.h b/src/core/hle/kernel/k_client_session.h index 0c750d756..b4a19c546 100644 --- a/src/core/hle/kernel/k_client_session.h +++ b/src/core/hle/kernel/k_client_session.h @@ -46,8 +46,7 @@ public: return parent; } - Result SendSyncRequest(KThread* thread, Core::Memory::Memory& memory, - Core::Timing::CoreTiming& core_timing); + Result SendSyncRequest(); void OnServerClosed(); diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp index da57ceb21..4b1c134d4 100644 --- a/src/core/hle/kernel/k_code_memory.cpp +++ b/src/core/hle/kernel/k_code_memory.cpp @@ -34,7 +34,7 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si // Clear the memory. for (const auto& block : m_page_group.Nodes()) { - std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize()); + std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize()); } // Set remaining tracking members. diff --git a/src/core/hle/kernel/k_dynamic_page_manager.h b/src/core/hle/kernel/k_dynamic_page_manager.h new file mode 100644 index 000000000..9076c8fa3 --- /dev/null +++ b/src/core/hle/kernel/k_dynamic_page_manager.h @@ -0,0 +1,136 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "common/alignment.h" +#include "common/common_types.h" +#include "core/hle/kernel/k_page_bitmap.h" +#include "core/hle/kernel/k_spin_lock.h" +#include "core/hle/kernel/memory_types.h" +#include "core/hle/kernel/svc_results.h" + +namespace Kernel { + +class KDynamicPageManager { +public: + class PageBuffer { + private: + u8 m_buffer[PageSize]; + }; + static_assert(sizeof(PageBuffer) == PageSize); + +public: + KDynamicPageManager() = default; + + template <typename T> + T* GetPointer(VAddr addr) { + return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address)); + } + + template <typename T> + const T* GetPointer(VAddr addr) const { + return reinterpret_cast<T*>(m_backing_memory.data() + (addr - m_address)); + } + + Result Initialize(VAddr addr, size_t sz) { + // We need to have positive size. + R_UNLESS(sz > 0, ResultOutOfMemory); + m_backing_memory.resize(sz); + + // Calculate management overhead. + const size_t management_size = + KPageBitmap::CalculateManagementOverheadSize(sz / sizeof(PageBuffer)); + const size_t allocatable_size = sz - management_size; + + // Set tracking fields. + m_address = addr; + m_size = Common::AlignDown(allocatable_size, sizeof(PageBuffer)); + m_count = allocatable_size / sizeof(PageBuffer); + R_UNLESS(m_count > 0, ResultOutOfMemory); + + // Clear the management region. + u64* management_ptr = GetPointer<u64>(m_address + allocatable_size); + std::memset(management_ptr, 0, management_size); + + // Initialize the bitmap. + m_page_bitmap.Initialize(management_ptr, m_count); + + // Free the pages to the bitmap. + for (size_t i = 0; i < m_count; i++) { + // Ensure the freed page is all-zero. + std::memset(GetPointer<PageBuffer>(m_address) + i, 0, PageSize); + + // Set the bit for the free page. + m_page_bitmap.SetBit(i); + } + + R_SUCCEED(); + } + + VAddr GetAddress() const { + return m_address; + } + size_t GetSize() const { + return m_size; + } + size_t GetUsed() const { + return m_used; + } + size_t GetPeak() const { + return m_peak; + } + size_t GetCount() const { + return m_count; + } + + PageBuffer* Allocate() { + // Take the lock. + // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. + KScopedSpinLock lk(m_lock); + + // Find a random free block. + s64 soffset = m_page_bitmap.FindFreeBlock(true); + if (soffset < 0) [[unlikely]] { + return nullptr; + } + + const size_t offset = static_cast<size_t>(soffset); + + // Update our tracking. + m_page_bitmap.ClearBit(offset); + m_peak = std::max(m_peak, (++m_used)); + + return GetPointer<PageBuffer>(m_address) + offset; + } + + void Free(PageBuffer* pb) { + // Ensure all pages in the heap are zero. + std::memset(pb, 0, PageSize); + + // Take the lock. + // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. + KScopedSpinLock lk(m_lock); + + // Set the bit for the free page. + size_t offset = (reinterpret_cast<uintptr_t>(pb) - m_address) / sizeof(PageBuffer); + m_page_bitmap.SetBit(offset); + + // Decrement our used count. + --m_used; + } + +private: + KSpinLock m_lock; + KPageBitmap m_page_bitmap; + size_t m_used{}; + size_t m_peak{}; + size_t m_count{}; + VAddr m_address{}; + size_t m_size{}; + + // TODO(bunnei): Back by host memory until we emulate kernel virtual address space. + std::vector<u8> m_backing_memory; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_dynamic_resource_manager.h b/src/core/hle/kernel/k_dynamic_resource_manager.h new file mode 100644 index 000000000..1ce517e8e --- /dev/null +++ b/src/core/hle/kernel/k_dynamic_resource_manager.h @@ -0,0 +1,58 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "common/common_funcs.h" +#include "core/hle/kernel/k_dynamic_slab_heap.h" +#include "core/hle/kernel/k_memory_block.h" + +namespace Kernel { + +template <typename T, bool ClearNode = false> +class KDynamicResourceManager { + YUZU_NON_COPYABLE(KDynamicResourceManager); + YUZU_NON_MOVEABLE(KDynamicResourceManager); + +public: + using DynamicSlabType = KDynamicSlabHeap<T, ClearNode>; + +public: + constexpr KDynamicResourceManager() = default; + + constexpr size_t GetSize() const { + return m_slab_heap->GetSize(); + } + constexpr size_t GetUsed() const { + return m_slab_heap->GetUsed(); + } + constexpr size_t GetPeak() const { + return m_slab_heap->GetPeak(); + } + constexpr size_t GetCount() const { + return m_slab_heap->GetCount(); + } + + void Initialize(KDynamicPageManager* page_allocator, DynamicSlabType* slab_heap) { + m_page_allocator = page_allocator; + m_slab_heap = slab_heap; + } + + T* Allocate() const { + return m_slab_heap->Allocate(m_page_allocator); + } + + void Free(T* t) const { + m_slab_heap->Free(t); + } + +private: + KDynamicPageManager* m_page_allocator{}; + DynamicSlabType* m_slab_heap{}; +}; + +class KMemoryBlockSlabManager : public KDynamicResourceManager<KMemoryBlock> {}; + +using KMemoryBlockSlabHeap = typename KMemoryBlockSlabManager::DynamicSlabType; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_dynamic_slab_heap.h b/src/core/hle/kernel/k_dynamic_slab_heap.h new file mode 100644 index 000000000..3a0ddd050 --- /dev/null +++ b/src/core/hle/kernel/k_dynamic_slab_heap.h @@ -0,0 +1,122 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include <atomic> + +#include "common/common_funcs.h" +#include "core/hle/kernel/k_dynamic_page_manager.h" +#include "core/hle/kernel/k_slab_heap.h" + +namespace Kernel { + +template <typename T, bool ClearNode = false> +class KDynamicSlabHeap : protected impl::KSlabHeapImpl { + YUZU_NON_COPYABLE(KDynamicSlabHeap); + YUZU_NON_MOVEABLE(KDynamicSlabHeap); + +public: + constexpr KDynamicSlabHeap() = default; + + constexpr VAddr GetAddress() const { + return m_address; + } + constexpr size_t GetSize() const { + return m_size; + } + constexpr size_t GetUsed() const { + return m_used.load(); + } + constexpr size_t GetPeak() const { + return m_peak.load(); + } + constexpr size_t GetCount() const { + return m_count.load(); + } + + constexpr bool IsInRange(VAddr addr) const { + return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1; + } + + void Initialize(KDynamicPageManager* page_allocator, size_t num_objects) { + ASSERT(page_allocator != nullptr); + + // Initialize members. + m_address = page_allocator->GetAddress(); + m_size = page_allocator->GetSize(); + + // Initialize the base allocator. + KSlabHeapImpl::Initialize(); + + // Allocate until we have the correct number of objects. + while (m_count.load() < num_objects) { + auto* allocated = reinterpret_cast<T*>(page_allocator->Allocate()); + ASSERT(allocated != nullptr); + + for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) { + KSlabHeapImpl::Free(allocated + i); + } + + m_count += sizeof(PageBuffer) / sizeof(T); + } + } + + T* Allocate(KDynamicPageManager* page_allocator) { + T* allocated = static_cast<T*>(KSlabHeapImpl::Allocate()); + + // If we successfully allocated and we should clear the node, do so. + if constexpr (ClearNode) { + if (allocated != nullptr) [[likely]] { + reinterpret_cast<KSlabHeapImpl::Node*>(allocated)->next = nullptr; + } + } + + // If we fail to allocate, try to get a new page from our next allocator. + if (allocated == nullptr) [[unlikely]] { + if (page_allocator != nullptr) { + allocated = reinterpret_cast<T*>(page_allocator->Allocate()); + if (allocated != nullptr) { + // If we succeeded in getting a page, free the rest to our slab. + for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) { + KSlabHeapImpl::Free(allocated + i); + } + m_count += sizeof(PageBuffer) / sizeof(T); + } + } + } + + if (allocated != nullptr) [[likely]] { + // Construct the object. + std::construct_at(allocated); + + // Update our tracking. + const size_t used = ++m_used; + size_t peak = m_peak.load(); + while (peak < used) { + if (m_peak.compare_exchange_weak(peak, used, std::memory_order_relaxed)) { + break; + } + } + } + + return allocated; + } + + void Free(T* t) { + KSlabHeapImpl::Free(t); + --m_used; + } + +private: + using PageBuffer = KDynamicPageManager::PageBuffer; + +private: + std::atomic<size_t> m_used{}; + std::atomic<size_t> m_peak{}; + std::atomic<size_t> m_count{}; + VAddr m_address{}; + size_t m_size{}; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_event.cpp b/src/core/hle/kernel/k_event.cpp index e52fafbc7..78ca59463 100644 --- a/src/core/hle/kernel/k_event.cpp +++ b/src/core/hle/kernel/k_event.cpp @@ -8,39 +8,45 @@ namespace Kernel { KEvent::KEvent(KernelCore& kernel_) - : KAutoObjectWithSlabHeapAndContainer{kernel_}, readable_event{kernel_}, writable_event{ - kernel_} {} + : KAutoObjectWithSlabHeapAndContainer{kernel_}, m_readable_event{kernel_} {} KEvent::~KEvent() = default; -void KEvent::Initialize(std::string&& name_, KProcess* owner_) { - // Increment reference count. - // Because reference count is one on creation, this will result - // in a reference count of two. Thus, when both readable and - // writable events are closed this object will be destroyed. - Open(); +void KEvent::Initialize(KProcess* owner) { + // Create our readable event. + KAutoObject::Create(std::addressof(m_readable_event)); - // Create our sub events. - KAutoObject::Create(std::addressof(readable_event)); - KAutoObject::Create(std::addressof(writable_event)); - - // Initialize our sub sessions. - readable_event.Initialize(this, name_ + ":Readable"); - writable_event.Initialize(this, name_ + ":Writable"); + // Initialize our readable event. + m_readable_event.Initialize(this); // Set our owner process. - owner = owner_; - owner->Open(); + m_owner = owner; + m_owner->Open(); // Mark initialized. - name = std::move(name_); - initialized = true; + m_initialized = true; } void KEvent::Finalize() { KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList>::Finalize(); } +Result KEvent::Signal() { + KScopedSchedulerLock sl{kernel}; + + R_SUCCEED_IF(m_readable_event_destroyed); + + return m_readable_event.Signal(); +} + +Result KEvent::Clear() { + KScopedSchedulerLock sl{kernel}; + + R_SUCCEED_IF(m_readable_event_destroyed); + + return m_readable_event.Clear(); +} + void KEvent::PostDestroy(uintptr_t arg) { // Release the event count resource the owner process holds. KProcess* owner = reinterpret_cast<KProcess*>(arg); diff --git a/src/core/hle/kernel/k_event.h b/src/core/hle/kernel/k_event.h index 2ff828feb..48ce7d9a0 100644 --- a/src/core/hle/kernel/k_event.h +++ b/src/core/hle/kernel/k_event.h @@ -4,14 +4,12 @@ #pragma once #include "core/hle/kernel/k_readable_event.h" -#include "core/hle/kernel/k_writable_event.h" #include "core/hle/kernel/slab_helpers.h" namespace Kernel { class KernelCore; class KReadableEvent; -class KWritableEvent; class KProcess; class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObjectWithList> { @@ -21,37 +19,40 @@ public: explicit KEvent(KernelCore& kernel_); ~KEvent() override; - void Initialize(std::string&& name, KProcess* owner_); + void Initialize(KProcess* owner); void Finalize() override; bool IsInitialized() const override { - return initialized; + return m_initialized; } uintptr_t GetPostDestroyArgument() const override { - return reinterpret_cast<uintptr_t>(owner); + return reinterpret_cast<uintptr_t>(m_owner); } KProcess* GetOwner() const override { - return owner; + return m_owner; } KReadableEvent& GetReadableEvent() { - return readable_event; - } - - KWritableEvent& GetWritableEvent() { - return writable_event; + return m_readable_event; } static void PostDestroy(uintptr_t arg); + Result Signal(); + Result Clear(); + + void OnReadableEventDestroyed() { + m_readable_event_destroyed = true; + } + private: - KReadableEvent readable_event; - KWritableEvent writable_event; - KProcess* owner{}; - bool initialized{}; + KReadableEvent m_readable_event; + KProcess* m_owner{}; + bool m_initialized{}; + bool m_readable_event_destroyed{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp index 1b577a5b3..4a6b60d26 100644 --- a/src/core/hle/kernel/k_interrupt_manager.cpp +++ b/src/core/hle/kernel/k_interrupt_manager.cpp @@ -11,29 +11,34 @@ namespace Kernel::KInterruptManager { void HandleInterrupt(KernelCore& kernel, s32 core_id) { - auto* process = kernel.CurrentProcess(); - if (!process) { - return; - } - // Acknowledge the interrupt. kernel.PhysicalCore(core_id).ClearInterrupt(); auto& current_thread = GetCurrentThread(kernel); - // If the user disable count is set, we may need to pin the current thread. - if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) { - KScopedSchedulerLock sl{kernel}; + if (auto* process = kernel.CurrentProcess(); process) { + // If the user disable count is set, we may need to pin the current thread. + if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) { + KScopedSchedulerLock sl{kernel}; - // Pin the current thread. - process->PinCurrentThread(core_id); + // Pin the current thread. + process->PinCurrentThread(core_id); - // Set the interrupt flag for the thread. - GetCurrentThread(kernel).SetInterruptFlag(); + // Set the interrupt flag for the thread. + GetCurrentThread(kernel).SetInterruptFlag(); + } } // Request interrupt scheduling. kernel.CurrentScheduler()->RequestScheduleOnInterrupt(); } +void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask) { + for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) { + if (core_mask & (1ULL << core_id)) { + kernel.PhysicalCore(core_id).Interrupt(); + } + } +} + } // namespace Kernel::KInterruptManager diff --git a/src/core/hle/kernel/k_interrupt_manager.h b/src/core/hle/kernel/k_interrupt_manager.h index f103dfe3f..803dc9211 100644 --- a/src/core/hle/kernel/k_interrupt_manager.h +++ b/src/core/hle/kernel/k_interrupt_manager.h @@ -11,6 +11,8 @@ class KernelCore; namespace KInterruptManager { void HandleInterrupt(KernelCore& kernel, s32 core_id); -} +void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask); + +} // namespace KInterruptManager } // namespace Kernel diff --git a/src/core/hle/kernel/k_linked_list.h b/src/core/hle/kernel/k_linked_list.h index 78859ced3..29ebd16b7 100644 --- a/src/core/hle/kernel/k_linked_list.h +++ b/src/core/hle/kernel/k_linked_list.h @@ -16,6 +16,7 @@ class KLinkedListNode : public boost::intrusive::list_base_hook<>, public KSlabAllocated<KLinkedListNode> { public: + explicit KLinkedListNode(KernelCore&) {} KLinkedListNode() = default; void Initialize(void* it) { diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h index 18df1f836..9444f6bd2 100644 --- a/src/core/hle/kernel/k_memory_block.h +++ b/src/core/hle/kernel/k_memory_block.h @@ -6,6 +6,7 @@ #include "common/alignment.h" #include "common/assert.h" #include "common/common_types.h" +#include "common/intrusive_red_black_tree.h" #include "core/hle/kernel/memory_types.h" #include "core/hle/kernel/svc_types.h" @@ -168,9 +169,8 @@ constexpr KMemoryPermission ConvertToKMemoryPermission(Svc::MemoryPermission per enum class KMemoryAttribute : u8 { None = 0x00, - Mask = 0x7F, - All = Mask, - DontCareMask = 0x80, + All = 0xFF, + UserMask = All, Locked = static_cast<u8>(Svc::MemoryAttribute::Locked), IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked), @@ -178,76 +178,112 @@ enum class KMemoryAttribute : u8 { Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached), SetMask = Uncached, - - IpcAndDeviceMapped = IpcLocked | DeviceShared, - LockedAndIpcLocked = Locked | IpcLocked, - DeviceSharedAndUncached = DeviceShared | Uncached }; DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute); -static_assert((static_cast<u8>(KMemoryAttribute::Mask) & - static_cast<u8>(KMemoryAttribute::DontCareMask)) == 0); +enum class KMemoryBlockDisableMergeAttribute : u8 { + None = 0, + Normal = (1u << 0), + DeviceLeft = (1u << 1), + IpcLeft = (1u << 2), + Locked = (1u << 3), + DeviceRight = (1u << 4), + + AllLeft = Normal | DeviceLeft | IpcLeft | Locked, + AllRight = DeviceRight, +}; +DECLARE_ENUM_FLAG_OPERATORS(KMemoryBlockDisableMergeAttribute); struct KMemoryInfo { - VAddr addr{}; - std::size_t size{}; - KMemoryState state{}; - KMemoryPermission perm{}; - KMemoryAttribute attribute{}; - KMemoryPermission original_perm{}; - u16 ipc_lock_count{}; - u16 device_use_count{}; + uintptr_t m_address; + size_t m_size; + KMemoryState m_state; + u16 m_device_disable_merge_left_count; + u16 m_device_disable_merge_right_count; + u16 m_ipc_lock_count; + u16 m_device_use_count; + u16 m_ipc_disable_merge_count; + KMemoryPermission m_permission; + KMemoryAttribute m_attribute; + KMemoryPermission m_original_permission; + KMemoryBlockDisableMergeAttribute m_disable_merge_attribute; constexpr Svc::MemoryInfo GetSvcMemoryInfo() const { return { - addr, - size, - static_cast<Svc::MemoryState>(state & KMemoryState::Mask), - static_cast<Svc::MemoryAttribute>(attribute & KMemoryAttribute::Mask), - static_cast<Svc::MemoryPermission>(perm & KMemoryPermission::UserMask), - ipc_lock_count, - device_use_count, + .addr = m_address, + .size = m_size, + .state = static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask), + .attr = static_cast<Svc::MemoryAttribute>(m_attribute & KMemoryAttribute::UserMask), + .perm = static_cast<Svc::MemoryPermission>(m_permission & KMemoryPermission::UserMask), + .ipc_refcount = m_ipc_lock_count, + .device_refcount = m_device_use_count, + .padding = {}, }; } - constexpr VAddr GetAddress() const { - return addr; + constexpr uintptr_t GetAddress() const { + return m_address; + } + + constexpr size_t GetSize() const { + return m_size; } - constexpr std::size_t GetSize() const { - return size; + + constexpr size_t GetNumPages() const { + return this->GetSize() / PageSize; } - constexpr std::size_t GetNumPages() const { - return GetSize() / PageSize; + + constexpr uintptr_t GetEndAddress() const { + return this->GetAddress() + this->GetSize(); } - constexpr VAddr GetEndAddress() const { - return GetAddress() + GetSize(); + + constexpr uintptr_t GetLastAddress() const { + return this->GetEndAddress() - 1; } - constexpr VAddr GetLastAddress() const { - return GetEndAddress() - 1; + + constexpr u16 GetIpcLockCount() const { + return m_ipc_lock_count; } + + constexpr u16 GetIpcDisableMergeCount() const { + return m_ipc_disable_merge_count; + } + constexpr KMemoryState GetState() const { - return state; + return m_state; + } + + constexpr KMemoryPermission GetPermission() const { + return m_permission; } + + constexpr KMemoryPermission GetOriginalPermission() const { + return m_original_permission; + } + constexpr KMemoryAttribute GetAttribute() const { - return attribute; + return m_attribute; } - constexpr KMemoryPermission GetPermission() const { - return perm; + + constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const { + return m_disable_merge_attribute; } }; -class KMemoryBlock final { - friend class KMemoryBlockManager; - +class KMemoryBlock : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock> { private: - VAddr addr{}; - std::size_t num_pages{}; - KMemoryState state{KMemoryState::None}; - u16 ipc_lock_count{}; - u16 device_use_count{}; - KMemoryPermission perm{KMemoryPermission::None}; - KMemoryPermission original_perm{KMemoryPermission::None}; - KMemoryAttribute attribute{KMemoryAttribute::None}; + u16 m_device_disable_merge_left_count; + u16 m_device_disable_merge_right_count; + VAddr m_address; + size_t m_num_pages; + KMemoryState m_memory_state; + u16 m_ipc_lock_count; + u16 m_device_use_count; + u16 m_ipc_disable_merge_count; + KMemoryPermission m_permission; + KMemoryPermission m_original_permission; + KMemoryAttribute m_attribute; + KMemoryBlockDisableMergeAttribute m_disable_merge_attribute; public: static constexpr int Compare(const KMemoryBlock& lhs, const KMemoryBlock& rhs) { @@ -261,113 +297,349 @@ public: } public: - constexpr KMemoryBlock() = default; - constexpr KMemoryBlock(VAddr addr_, std::size_t num_pages_, KMemoryState state_, - KMemoryPermission perm_, KMemoryAttribute attribute_) - : addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {} - constexpr VAddr GetAddress() const { - return addr; + return m_address; } - constexpr std::size_t GetNumPages() const { - return num_pages; + constexpr size_t GetNumPages() const { + return m_num_pages; } - constexpr std::size_t GetSize() const { - return GetNumPages() * PageSize; + constexpr size_t GetSize() const { + return this->GetNumPages() * PageSize; } constexpr VAddr GetEndAddress() const { - return GetAddress() + GetSize(); + return this->GetAddress() + this->GetSize(); } constexpr VAddr GetLastAddress() const { - return GetEndAddress() - 1; + return this->GetEndAddress() - 1; + } + + constexpr u16 GetIpcLockCount() const { + return m_ipc_lock_count; + } + + constexpr u16 GetIpcDisableMergeCount() const { + return m_ipc_disable_merge_count; + } + + constexpr KMemoryPermission GetPermission() const { + return m_permission; + } + + constexpr KMemoryPermission GetOriginalPermission() const { + return m_original_permission; + } + + constexpr KMemoryAttribute GetAttribute() const { + return m_attribute; } constexpr KMemoryInfo GetMemoryInfo() const { return { - GetAddress(), GetSize(), state, perm, - attribute, original_perm, ipc_lock_count, device_use_count, + .m_address = this->GetAddress(), + .m_size = this->GetSize(), + .m_state = m_memory_state, + .m_device_disable_merge_left_count = m_device_disable_merge_left_count, + .m_device_disable_merge_right_count = m_device_disable_merge_right_count, + .m_ipc_lock_count = m_ipc_lock_count, + .m_device_use_count = m_device_use_count, + .m_ipc_disable_merge_count = m_ipc_disable_merge_count, + .m_permission = m_permission, + .m_attribute = m_attribute, + .m_original_permission = m_original_permission, + .m_disable_merge_attribute = m_disable_merge_attribute, }; } - void ShareToDevice(KMemoryPermission /*new_perm*/) { - ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared || - device_use_count == 0); - attribute |= KMemoryAttribute::DeviceShared; - const u16 new_use_count{++device_use_count}; - ASSERT(new_use_count > 0); +public: + explicit KMemoryBlock() = default; + + constexpr KMemoryBlock(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p, + KMemoryAttribute attr) + : Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(), + m_device_disable_merge_left_count(), m_device_disable_merge_right_count(), + m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0), + m_device_use_count(0), m_ipc_disable_merge_count(), m_permission(p), + m_original_permission(KMemoryPermission::None), m_attribute(attr), + m_disable_merge_attribute() {} + + constexpr void Initialize(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p, + KMemoryAttribute attr) { + m_device_disable_merge_left_count = 0; + m_device_disable_merge_right_count = 0; + m_address = addr; + m_num_pages = np; + m_memory_state = ms; + m_ipc_lock_count = 0; + m_device_use_count = 0; + m_permission = p; + m_original_permission = KMemoryPermission::None; + m_attribute = attr; + m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None; + } + + constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const { + constexpr auto AttributeIgnoreMask = + KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; + return m_memory_state == s && m_permission == p && + (m_attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask); + } + + constexpr bool HasSameProperties(const KMemoryBlock& rhs) const { + return m_memory_state == rhs.m_memory_state && m_permission == rhs.m_permission && + m_original_permission == rhs.m_original_permission && + m_attribute == rhs.m_attribute && m_ipc_lock_count == rhs.m_ipc_lock_count && + m_device_use_count == rhs.m_device_use_count; + } + + constexpr bool CanMergeWith(const KMemoryBlock& rhs) const { + return this->HasSameProperties(rhs) && + (m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight) == + KMemoryBlockDisableMergeAttribute::None && + (rhs.m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft) == + KMemoryBlockDisableMergeAttribute::None; } - void UnshareToDevice(KMemoryPermission /*new_perm*/) { - ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); - const u16 prev_use_count{device_use_count--}; - ASSERT(prev_use_count > 0); - if (prev_use_count == 1) { - attribute &= ~KMemoryAttribute::DeviceShared; + constexpr bool Contains(VAddr addr) const { + return this->GetAddress() <= addr && addr <= this->GetEndAddress(); + } + + constexpr void Add(const KMemoryBlock& added_block) { + ASSERT(added_block.GetNumPages() > 0); + ASSERT(this->GetAddress() + added_block.GetSize() - 1 < + this->GetEndAddress() + added_block.GetSize() - 1); + + m_num_pages += added_block.GetNumPages(); + m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( + m_disable_merge_attribute | added_block.m_disable_merge_attribute); + m_device_disable_merge_right_count = added_block.m_device_disable_merge_right_count; + } + + constexpr void Update(KMemoryState s, KMemoryPermission p, KMemoryAttribute a, + bool set_disable_merge_attr, u8 set_mask, u8 clear_mask) { + ASSERT(m_original_permission == KMemoryPermission::None); + ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None); + + m_memory_state = s; + m_permission = p; + m_attribute = static_cast<KMemoryAttribute>( + a | (m_attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared))); + + if (set_disable_merge_attr && set_mask != 0) { + m_disable_merge_attribute = m_disable_merge_attribute | + static_cast<KMemoryBlockDisableMergeAttribute>(set_mask); + } + if (clear_mask != 0) { + m_disable_merge_attribute = m_disable_merge_attribute & + static_cast<KMemoryBlockDisableMergeAttribute>(~clear_mask); } } -private: - constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const { - constexpr KMemoryAttribute AttributeIgnoreMask{KMemoryAttribute::DontCareMask | - KMemoryAttribute::IpcLocked | - KMemoryAttribute::DeviceShared}; - return state == s && perm == p && - (attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask); + constexpr void Split(KMemoryBlock* block, VAddr addr) { + ASSERT(this->GetAddress() < addr); + ASSERT(this->Contains(addr)); + ASSERT(Common::IsAligned(addr, PageSize)); + + block->m_address = m_address; + block->m_num_pages = (addr - this->GetAddress()) / PageSize; + block->m_memory_state = m_memory_state; + block->m_ipc_lock_count = m_ipc_lock_count; + block->m_device_use_count = m_device_use_count; + block->m_permission = m_permission; + block->m_original_permission = m_original_permission; + block->m_attribute = m_attribute; + block->m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( + m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft); + block->m_ipc_disable_merge_count = m_ipc_disable_merge_count; + block->m_device_disable_merge_left_count = m_device_disable_merge_left_count; + block->m_device_disable_merge_right_count = 0; + + m_address = addr; + m_num_pages -= block->m_num_pages; + + m_ipc_disable_merge_count = 0; + m_device_disable_merge_left_count = 0; + m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( + m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight); } - constexpr bool HasSameProperties(const KMemoryBlock& rhs) const { - return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm && - attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count && - device_use_count == rhs.device_use_count; + constexpr void UpdateDeviceDisableMergeStateForShareLeft( + [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { + if (left) { + m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( + m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft); + const u16 new_device_disable_merge_left_count = ++m_device_disable_merge_left_count; + ASSERT(new_device_disable_merge_left_count > 0); + } } - constexpr bool Contains(VAddr start) const { - return GetAddress() <= start && start <= GetEndAddress(); + constexpr void UpdateDeviceDisableMergeStateForShareRight( + [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { + if (right) { + m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( + m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight); + const u16 new_device_disable_merge_right_count = ++m_device_disable_merge_right_count; + ASSERT(new_device_disable_merge_right_count > 0); + } + } + + constexpr void UpdateDeviceDisableMergeStateForShare(KMemoryPermission new_perm, bool left, + bool right) { + this->UpdateDeviceDisableMergeStateForShareLeft(new_perm, left, right); + this->UpdateDeviceDisableMergeStateForShareRight(new_perm, left, right); } - constexpr void Add(std::size_t count) { - ASSERT(count > 0); - ASSERT(GetAddress() + count * PageSize - 1 < GetEndAddress() + count * PageSize - 1); + constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, + bool right) { + // We must either be shared or have a zero lock count. + ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared || + m_device_use_count == 0); - num_pages += count; + // Share. + const u16 new_count = ++m_device_use_count; + ASSERT(new_count > 0); + + m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::DeviceShared); + + this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right); } - constexpr void Update(KMemoryState new_state, KMemoryPermission new_perm, - KMemoryAttribute new_attribute) { - ASSERT(original_perm == KMemoryPermission::None); - ASSERT((attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None); + constexpr void UpdateDeviceDisableMergeStateForUnshareLeft( + [[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { - state = new_state; - perm = new_perm; + if (left) { + if (!m_device_disable_merge_left_count) { + return; + } + --m_device_disable_merge_left_count; + } - attribute = static_cast<KMemoryAttribute>( - new_attribute | - (attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared))); + m_device_disable_merge_left_count = + std::min(m_device_disable_merge_left_count, m_device_use_count); + + if (m_device_disable_merge_left_count == 0) { + m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( + m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceLeft); + } } - constexpr KMemoryBlock Split(VAddr split_addr) { - ASSERT(GetAddress() < split_addr); - ASSERT(Contains(split_addr)); - ASSERT(Common::IsAligned(split_addr, PageSize)); + constexpr void UpdateDeviceDisableMergeStateForUnshareRight( + [[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) { + if (right) { + const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--; + ASSERT(old_device_disable_merge_right_count > 0); + if (old_device_disable_merge_right_count == 1) { + m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( + m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceRight); + } + } + } - KMemoryBlock block; - block.addr = addr; - block.num_pages = (split_addr - GetAddress()) / PageSize; - block.state = state; - block.ipc_lock_count = ipc_lock_count; - block.device_use_count = device_use_count; - block.perm = perm; - block.original_perm = original_perm; - block.attribute = attribute; + constexpr void UpdateDeviceDisableMergeStateForUnshare(KMemoryPermission new_perm, bool left, + bool right) { + this->UpdateDeviceDisableMergeStateForUnshareLeft(new_perm, left, right); + this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right); + } - addr = split_addr; - num_pages -= block.num_pages; + constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left, + bool right) { + // We must be shared. + ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); + + // Unhare. + const u16 old_count = m_device_use_count--; + ASSERT(old_count > 0); + + if (old_count == 1) { + m_attribute = + static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared); + } + + this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right); + } + + constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left, + bool right) { + + // We must be shared. + ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared); + + // Unhare. + const u16 old_count = m_device_use_count--; + ASSERT(old_count > 0); + + if (old_count == 1) { + m_attribute = + static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared); + } + + this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right); + } + + constexpr void LockForIpc(KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) { + // We must either be locked or have a zero lock count. + ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked || + m_ipc_lock_count == 0); + + // Lock. + const u16 new_lock_count = ++m_ipc_lock_count; + ASSERT(new_lock_count > 0); + + // If this is our first lock, update our permissions. + if (new_lock_count == 1) { + ASSERT(m_original_permission == KMemoryPermission::None); + ASSERT((m_permission | new_perm | KMemoryPermission::NotMapped) == + (m_permission | KMemoryPermission::NotMapped)); + ASSERT((m_permission & KMemoryPermission::UserExecute) != + KMemoryPermission::UserExecute || + (new_perm == KMemoryPermission::UserRead)); + m_original_permission = m_permission; + m_permission = static_cast<KMemoryPermission>( + (new_perm & KMemoryPermission::IpcLockChangeMask) | + (m_original_permission & ~KMemoryPermission::IpcLockChangeMask)); + } + m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::IpcLocked); + + if (left) { + m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( + m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::IpcLeft); + const u16 new_ipc_disable_merge_count = ++m_ipc_disable_merge_count; + ASSERT(new_ipc_disable_merge_count > 0); + } + } + + constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left, + [[maybe_unused]] bool right) { + // We must be locked. + ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked); + + // Unlock. + const u16 old_lock_count = m_ipc_lock_count--; + ASSERT(old_lock_count > 0); + + // If this is our last unlock, update our permissions. + if (old_lock_count == 1) { + ASSERT(m_original_permission != KMemoryPermission::None); + m_permission = m_original_permission; + m_original_permission = KMemoryPermission::None; + m_attribute = static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::IpcLocked); + } + + if (left) { + const u16 old_ipc_disable_merge_count = m_ipc_disable_merge_count--; + ASSERT(old_ipc_disable_merge_count > 0); + if (old_ipc_disable_merge_count == 1) { + m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>( + m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::IpcLeft); + } + } + } - return block; + constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const { + return m_disable_merge_attribute; } }; static_assert(std::is_trivially_destructible<KMemoryBlock>::value); diff --git a/src/core/hle/kernel/k_memory_block_manager.cpp b/src/core/hle/kernel/k_memory_block_manager.cpp index 3ddb9984f..cf4c1e371 100644 --- a/src/core/hle/kernel/k_memory_block_manager.cpp +++ b/src/core/hle/kernel/k_memory_block_manager.cpp @@ -2,221 +2,336 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include "core/hle/kernel/k_memory_block_manager.h" -#include "core/hle/kernel/memory_types.h" namespace Kernel { -KMemoryBlockManager::KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_) - : start_addr{start_addr_}, end_addr{end_addr_} { - const u64 num_pages{(end_addr - start_addr) / PageSize}; - memory_block_tree.emplace_back(start_addr, num_pages, KMemoryState::Free, - KMemoryPermission::None, KMemoryAttribute::None); -} +KMemoryBlockManager::KMemoryBlockManager() = default; -KMemoryBlockManager::iterator KMemoryBlockManager::FindIterator(VAddr addr) { - auto node{memory_block_tree.begin()}; - while (node != end()) { - const VAddr node_end_addr{node->GetNumPages() * PageSize + node->GetAddress()}; - if (node->GetAddress() <= addr && node_end_addr - 1 >= addr) { - return node; - } - node = std::next(node); - } - return end(); +Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) { + // Allocate a block to encapsulate the address space, insert it into the tree. + KMemoryBlock* start_block = slab_manager->Allocate(); + R_UNLESS(start_block != nullptr, ResultOutOfResource); + + // Set our start and end. + m_start_address = st; + m_end_address = nd; + ASSERT(Common::IsAligned(m_start_address, PageSize)); + ASSERT(Common::IsAligned(m_end_address, PageSize)); + + // Initialize and insert the block. + start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize, + KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None); + m_memory_block_tree.insert(*start_block); + + R_SUCCEED(); } -VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages, - std::size_t num_pages, std::size_t align, - std::size_t offset, std::size_t guard_pages) { - if (num_pages == 0) { - return {}; +void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager, + HostUnmapCallback&& host_unmap_callback) { + // Erase every block until we have none left. + auto it = m_memory_block_tree.begin(); + while (it != m_memory_block_tree.end()) { + KMemoryBlock* block = std::addressof(*it); + it = m_memory_block_tree.erase(it); + slab_manager->Free(block); + host_unmap_callback(block->GetAddress(), block->GetSize()); } - const VAddr region_end{region_start + region_num_pages * PageSize}; - const VAddr region_last{region_end - 1}; - for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) { - const auto info{it->GetMemoryInfo()}; - if (region_last < info.GetAddress()) { - break; - } + ASSERT(m_memory_block_tree.empty()); +} - if (info.state != KMemoryState::Free) { - continue; - } +VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages, + size_t num_pages, size_t alignment, size_t offset, + size_t guard_pages) const { + if (num_pages > 0) { + const VAddr region_end = region_start + region_num_pages * PageSize; + const VAddr region_last = region_end - 1; + for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend(); + it++) { + const KMemoryInfo info = it->GetMemoryInfo(); + if (region_last < info.GetAddress()) { + break; + } + if (info.m_state != KMemoryState::Free) { + continue; + } - VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()}; - area += guard_pages * PageSize; + VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress(); + area += guard_pages * PageSize; - const VAddr offset_area{Common::AlignDown(area, align) + offset}; - area = (area <= offset_area) ? offset_area : offset_area + align; + const VAddr offset_area = Common::AlignDown(area, alignment) + offset; + area = (area <= offset_area) ? offset_area : offset_area + alignment; - const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize}; - const VAddr area_last{area_end - 1}; + const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize; + const VAddr area_last = area_end - 1; - if (info.GetAddress() <= area && area < area_last && area_last <= region_last && - area_last <= info.GetLastAddress()) { - return area; + if (info.GetAddress() <= area && area < area_last && area_last <= region_last && + area_last <= info.GetLastAddress()) { + return area; + } } } return {}; } -void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state, - KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, - KMemoryState state, KMemoryPermission perm, - KMemoryAttribute attribute) { - const VAddr update_end_addr{addr + num_pages * PageSize}; - iterator node{memory_block_tree.begin()}; +void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, + VAddr address, size_t num_pages) { + // Find the iterator now that we've updated. + iterator it = this->FindIterator(address); + if (address != m_start_address) { + it--; + } - prev_attribute |= KMemoryAttribute::IpcAndDeviceMapped; + // Coalesce blocks that we can. + while (true) { + iterator prev = it++; + if (it == m_memory_block_tree.end()) { + break; + } - while (node != memory_block_tree.end()) { - KMemoryBlock* block{&(*node)}; - iterator next_node{std::next(node)}; - const VAddr cur_addr{block->GetAddress()}; - const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; + if (prev->CanMergeWith(*it)) { + KMemoryBlock* block = std::addressof(*it); + m_memory_block_tree.erase(it); + prev->Add(*block); + allocator->Free(block); + it = prev; + } - if (addr < cur_end_addr && cur_addr < update_end_addr) { - if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) { - node = next_node; - continue; - } + if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) { + break; + } + } +} - iterator new_node{node}; - if (addr > cur_addr) { - memory_block_tree.insert(node, block->Split(addr)); +void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, + size_t num_pages, KMemoryState state, KMemoryPermission perm, + KMemoryAttribute attr, + KMemoryBlockDisableMergeAttribute set_disable_attr, + KMemoryBlockDisableMergeAttribute clear_disable_attr) { + // Ensure for auditing that we never end up with an invalid tree. + KScopedMemoryBlockManagerAuditor auditor(this); + ASSERT(Common::IsAligned(address, PageSize)); + ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) == + KMemoryAttribute::None); + + VAddr cur_address = address; + size_t remaining_pages = num_pages; + iterator it = this->FindIterator(address); + + while (remaining_pages > 0) { + const size_t remaining_size = remaining_pages * PageSize; + KMemoryInfo cur_info = it->GetMemoryInfo(); + if (it->HasProperties(state, perm, attr)) { + // If we already have the right properties, just advance. + if (cur_address + remaining_size < cur_info.GetEndAddress()) { + remaining_pages = 0; + cur_address += remaining_size; + } else { + remaining_pages = + (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize; + cur_address = cur_info.GetEndAddress(); } + } else { + // If we need to, create a new block before and insert it. + if (cur_info.GetAddress() != cur_address) { + KMemoryBlock* new_block = allocator->Allocate(); + + it->Split(new_block, cur_address); + it = m_memory_block_tree.insert(*new_block); + it++; - if (update_end_addr < cur_end_addr) { - new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); + cur_info = it->GetMemoryInfo(); + cur_address = cur_info.GetAddress(); } - new_node->Update(state, perm, attribute); + // If we need to, create a new block after and insert it. + if (cur_info.GetSize() > remaining_size) { + KMemoryBlock* new_block = allocator->Allocate(); - MergeAdjacent(new_node, next_node); - } + it->Split(new_block, cur_address + remaining_size); + it = m_memory_block_tree.insert(*new_block); - if (cur_end_addr - 1 >= update_end_addr - 1) { - break; - } + cur_info = it->GetMemoryInfo(); + } - node = next_node; + // Update block state. + it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr), + static_cast<u8>(clear_disable_attr)); + cur_address += cur_info.GetSize(); + remaining_pages -= cur_info.GetNumPages(); + } + it++; } + + this->CoalesceForUpdate(allocator, address, num_pages); } -void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState state, - KMemoryPermission perm, KMemoryAttribute attribute) { - const VAddr update_end_addr{addr + num_pages * PageSize}; - iterator node{memory_block_tree.begin()}; +void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, + VAddr address, size_t num_pages, KMemoryState test_state, + KMemoryPermission test_perm, KMemoryAttribute test_attr, + KMemoryState state, KMemoryPermission perm, + KMemoryAttribute attr) { + // Ensure for auditing that we never end up with an invalid tree. + KScopedMemoryBlockManagerAuditor auditor(this); + ASSERT(Common::IsAligned(address, PageSize)); + ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) == + KMemoryAttribute::None); + + VAddr cur_address = address; + size_t remaining_pages = num_pages; + iterator it = this->FindIterator(address); + + while (remaining_pages > 0) { + const size_t remaining_size = remaining_pages * PageSize; + KMemoryInfo cur_info = it->GetMemoryInfo(); + if (it->HasProperties(test_state, test_perm, test_attr) && + !it->HasProperties(state, perm, attr)) { + // If we need to, create a new block before and insert it. + if (cur_info.GetAddress() != cur_address) { + KMemoryBlock* new_block = allocator->Allocate(); + + it->Split(new_block, cur_address); + it = m_memory_block_tree.insert(*new_block); + it++; + + cur_info = it->GetMemoryInfo(); + cur_address = cur_info.GetAddress(); + } - while (node != memory_block_tree.end()) { - KMemoryBlock* block{&(*node)}; - iterator next_node{std::next(node)}; - const VAddr cur_addr{block->GetAddress()}; - const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; + // If we need to, create a new block after and insert it. + if (cur_info.GetSize() > remaining_size) { + KMemoryBlock* new_block = allocator->Allocate(); - if (addr < cur_end_addr && cur_addr < update_end_addr) { - iterator new_node{node}; + it->Split(new_block, cur_address + remaining_size); + it = m_memory_block_tree.insert(*new_block); - if (addr > cur_addr) { - memory_block_tree.insert(node, block->Split(addr)); + cur_info = it->GetMemoryInfo(); } - if (update_end_addr < cur_end_addr) { - new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); + // Update block state. + it->Update(state, perm, attr, false, 0, 0); + cur_address += cur_info.GetSize(); + remaining_pages -= cur_info.GetNumPages(); + } else { + // If we already have the right properties, just advance. + if (cur_address + remaining_size < cur_info.GetEndAddress()) { + remaining_pages = 0; + cur_address += remaining_size; + } else { + remaining_pages = + (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize; + cur_address = cur_info.GetEndAddress(); } - - new_node->Update(state, perm, attribute); - - MergeAdjacent(new_node, next_node); - } - - if (cur_end_addr - 1 >= update_end_addr - 1) { - break; } - - node = next_node; + it++; } + + this->CoalesceForUpdate(allocator, address, num_pages); } -void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, +void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, + size_t num_pages, MemoryBlockLockFunction lock_func, KMemoryPermission perm) { - const VAddr update_end_addr{addr + num_pages * PageSize}; - iterator node{memory_block_tree.begin()}; + // Ensure for auditing that we never end up with an invalid tree. + KScopedMemoryBlockManagerAuditor auditor(this); + ASSERT(Common::IsAligned(address, PageSize)); - while (node != memory_block_tree.end()) { - KMemoryBlock* block{&(*node)}; - iterator next_node{std::next(node)}; - const VAddr cur_addr{block->GetAddress()}; - const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; + VAddr cur_address = address; + size_t remaining_pages = num_pages; + iterator it = this->FindIterator(address); - if (addr < cur_end_addr && cur_addr < update_end_addr) { - iterator new_node{node}; + const VAddr end_address = address + (num_pages * PageSize); - if (addr > cur_addr) { - memory_block_tree.insert(node, block->Split(addr)); - } + while (remaining_pages > 0) { + const size_t remaining_size = remaining_pages * PageSize; + KMemoryInfo cur_info = it->GetMemoryInfo(); - if (update_end_addr < cur_end_addr) { - new_node = memory_block_tree.insert(node, block->Split(update_end_addr)); - } + // If we need to, create a new block before and insert it. + if (cur_info.m_address != cur_address) { + KMemoryBlock* new_block = allocator->Allocate(); - lock_func(new_node, perm); + it->Split(new_block, cur_address); + it = m_memory_block_tree.insert(*new_block); + it++; - MergeAdjacent(new_node, next_node); + cur_info = it->GetMemoryInfo(); + cur_address = cur_info.GetAddress(); } - if (cur_end_addr - 1 >= update_end_addr - 1) { - break; + if (cur_info.GetSize() > remaining_size) { + // If we need to, create a new block after and insert it. + KMemoryBlock* new_block = allocator->Allocate(); + + it->Split(new_block, cur_address + remaining_size); + it = m_memory_block_tree.insert(*new_block); + + cur_info = it->GetMemoryInfo(); } - node = next_node; + // Call the locked update function. + (std::addressof(*it)->*lock_func)(perm, cur_info.GetAddress() == address, + cur_info.GetEndAddress() == end_address); + cur_address += cur_info.GetSize(); + remaining_pages -= cur_info.GetNumPages(); + it++; } -} -void KMemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) { - const_iterator it{FindIterator(start)}; - KMemoryInfo info{}; - do { - info = it->GetMemoryInfo(); - func(info); - it = std::next(it); - } while (info.addr + info.size - 1 < end - 1 && it != cend()); + this->CoalesceForUpdate(allocator, address, num_pages); } -void KMemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) { - KMemoryBlock* block{&(*it)}; - - auto EraseIt = [&](const iterator it_to_erase) { - if (next_it == it_to_erase) { - next_it = std::next(next_it); +// Debug. +bool KMemoryBlockManager::CheckState() const { + // Loop over every block, ensuring that we are sorted and coalesced. + auto it = m_memory_block_tree.cbegin(); + auto prev = it++; + while (it != m_memory_block_tree.cend()) { + const KMemoryInfo prev_info = prev->GetMemoryInfo(); + const KMemoryInfo cur_info = it->GetMemoryInfo(); + + // Sequential blocks which can be merged should be merged. + if (prev->CanMergeWith(*it)) { + return false; } - memory_block_tree.erase(it_to_erase); - }; - if (it != memory_block_tree.begin()) { - KMemoryBlock* prev{&(*std::prev(it))}; - - if (block->HasSameProperties(*prev)) { - const iterator prev_it{std::prev(it)}; + // Sequential blocks should be sequential. + if (prev_info.GetEndAddress() != cur_info.GetAddress()) { + return false; + } - prev->Add(block->GetNumPages()); - EraseIt(it); + // If the block is ipc locked, it must have a count. + if ((cur_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None && + cur_info.m_ipc_lock_count == 0) { + return false; + } - it = prev_it; - block = prev; + // If the block is device shared, it must have a count. + if ((cur_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None && + cur_info.m_device_use_count == 0) { + return false; } + + // Advance the iterator. + prev = it++; } - if (it != cend()) { - const KMemoryBlock* const next{&(*std::next(it))}; + // Our loop will miss checking the last block, potentially, so check it. + if (prev != m_memory_block_tree.cend()) { + const KMemoryInfo prev_info = prev->GetMemoryInfo(); + // If the block is ipc locked, it must have a count. + if ((prev_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None && + prev_info.m_ipc_lock_count == 0) { + return false; + } - if (block->HasSameProperties(*next)) { - block->Add(next->GetNumPages()); - EraseIt(std::next(it)); + // If the block is device shared, it must have a count. + if ((prev_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None && + prev_info.m_device_use_count == 0) { + return false; } } + + return true; } } // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_block_manager.h b/src/core/hle/kernel/k_memory_block_manager.h index e14741b89..9b5873883 100644 --- a/src/core/hle/kernel/k_memory_block_manager.h +++ b/src/core/hle/kernel/k_memory_block_manager.h @@ -4,63 +4,154 @@ #pragma once #include <functional> -#include <list> +#include "common/common_funcs.h" #include "common/common_types.h" +#include "core/hle/kernel/k_dynamic_resource_manager.h" #include "core/hle/kernel/k_memory_block.h" namespace Kernel { +class KMemoryBlockManagerUpdateAllocator { +public: + static constexpr size_t MaxBlocks = 2; + +private: + KMemoryBlock* m_blocks[MaxBlocks]; + size_t m_index; + KMemoryBlockSlabManager* m_slab_manager; + +private: + Result Initialize(size_t num_blocks) { + // Check num blocks. + ASSERT(num_blocks <= MaxBlocks); + + // Set index. + m_index = MaxBlocks - num_blocks; + + // Allocate the blocks. + for (size_t i = 0; i < num_blocks && i < MaxBlocks; ++i) { + m_blocks[m_index + i] = m_slab_manager->Allocate(); + R_UNLESS(m_blocks[m_index + i] != nullptr, ResultOutOfResource); + } + + R_SUCCEED(); + } + +public: + KMemoryBlockManagerUpdateAllocator(Result* out_result, KMemoryBlockSlabManager* sm, + size_t num_blocks = MaxBlocks) + : m_blocks(), m_index(MaxBlocks), m_slab_manager(sm) { + *out_result = this->Initialize(num_blocks); + } + + ~KMemoryBlockManagerUpdateAllocator() { + for (const auto& block : m_blocks) { + if (block != nullptr) { + m_slab_manager->Free(block); + } + } + } + + KMemoryBlock* Allocate() { + ASSERT(m_index < MaxBlocks); + ASSERT(m_blocks[m_index] != nullptr); + KMemoryBlock* block = nullptr; + std::swap(block, m_blocks[m_index++]); + return block; + } + + void Free(KMemoryBlock* block) { + ASSERT(m_index <= MaxBlocks); + ASSERT(block != nullptr); + if (m_index == 0) { + m_slab_manager->Free(block); + } else { + m_blocks[--m_index] = block; + } + } +}; + class KMemoryBlockManager final { public: - using MemoryBlockTree = std::list<KMemoryBlock>; + using MemoryBlockTree = + Common::IntrusiveRedBlackTreeBaseTraits<KMemoryBlock>::TreeType<KMemoryBlock>; + using MemoryBlockLockFunction = void (KMemoryBlock::*)(KMemoryPermission new_perm, bool left, + bool right); using iterator = MemoryBlockTree::iterator; using const_iterator = MemoryBlockTree::const_iterator; public: - KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_); + KMemoryBlockManager(); + + using HostUnmapCallback = std::function<void(VAddr, u64)>; + + Result Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager); + void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback); iterator end() { - return memory_block_tree.end(); + return m_memory_block_tree.end(); } const_iterator end() const { - return memory_block_tree.end(); + return m_memory_block_tree.end(); } const_iterator cend() const { - return memory_block_tree.cend(); + return m_memory_block_tree.cend(); } - iterator FindIterator(VAddr addr); + VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, + size_t alignment, size_t offset, size_t guard_pages) const; - VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, - std::size_t align, std::size_t offset, std::size_t guard_pages); + void Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages, + KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr, + KMemoryBlockDisableMergeAttribute set_disable_attr, + KMemoryBlockDisableMergeAttribute clear_disable_attr); + void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages, + MemoryBlockLockFunction lock_func, KMemoryPermission perm); - void Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state, - KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, KMemoryState state, - KMemoryPermission perm, KMemoryAttribute attribute); + void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, + size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, + KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, + KMemoryAttribute attr); - void Update(VAddr addr, std::size_t num_pages, KMemoryState state, - KMemoryPermission perm = KMemoryPermission::None, - KMemoryAttribute attribute = KMemoryAttribute::None); - - using LockFunc = std::function<void(iterator, KMemoryPermission)>; - void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, - KMemoryPermission perm); + iterator FindIterator(VAddr address) const { + return m_memory_block_tree.find(KMemoryBlock( + address, 1, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None)); + } - using IterateFunc = std::function<void(const KMemoryInfo&)>; - void IterateForRange(VAddr start, VAddr end, IterateFunc&& func); + const KMemoryBlock* FindBlock(VAddr address) const { + if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) { + return std::addressof(*it); + } - KMemoryBlock& FindBlock(VAddr addr) { - return *FindIterator(addr); + return nullptr; } + // Debug. + bool CheckState() const; + private: - void MergeAdjacent(iterator it, iterator& next_it); + void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, + size_t num_pages); - [[maybe_unused]] const VAddr start_addr; - [[maybe_unused]] const VAddr end_addr; + MemoryBlockTree m_memory_block_tree; + VAddr m_start_address{}; + VAddr m_end_address{}; +}; - MemoryBlockTree memory_block_tree; +class KScopedMemoryBlockManagerAuditor { +public: + explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager* m) : m_manager(m) { + ASSERT(m_manager->CheckState()); + } + explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager& m) + : KScopedMemoryBlockManagerAuditor(std::addressof(m)) {} + ~KScopedMemoryBlockManagerAuditor() { + ASSERT(m_manager->CheckState()); + } + +private: + KMemoryBlockManager* m_manager; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_memory_manager.cpp b/src/core/hle/kernel/k_memory_manager.cpp index 5b0a9963a..646711505 100644 --- a/src/core/hle/kernel/k_memory_manager.cpp +++ b/src/core/hle/kernel/k_memory_manager.cpp @@ -331,7 +331,7 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag // Set all the allocated memory. for (const auto& block : out->Nodes()) { - std::memset(system.DeviceMemory().GetPointer(block.GetAddress()), fill_pattern, + std::memset(system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern, block.GetSize()); } diff --git a/src/core/hle/kernel/k_page_buffer.cpp b/src/core/hle/kernel/k_page_buffer.cpp index 1a0bf4439..0c16dded4 100644 --- a/src/core/hle/kernel/k_page_buffer.cpp +++ b/src/core/hle/kernel/k_page_buffer.cpp @@ -12,7 +12,7 @@ namespace Kernel { KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) { ASSERT(Common::IsAligned(phys_addr, PageSize)); - return reinterpret_cast<KPageBuffer*>(system.DeviceMemory().GetPointer(phys_addr)); + return system.DeviceMemory().GetPointer<KPageBuffer>(phys_addr); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_page_buffer.h b/src/core/hle/kernel/k_page_buffer.h index 7e50dc1d1..aef06e213 100644 --- a/src/core/hle/kernel/k_page_buffer.h +++ b/src/core/hle/kernel/k_page_buffer.h @@ -13,6 +13,7 @@ namespace Kernel { class KPageBuffer final : public KSlabAllocated<KPageBuffer> { public: + explicit KPageBuffer(KernelCore&) {} KPageBuffer() = default; static KPageBuffer* FromPhysicalAddress(Core::System& system, PAddr phys_addr); diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index d975de844..307e491cb 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp @@ -25,7 +25,7 @@ namespace { using namespace Common::Literals; -constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { +constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { switch (as_type) { case FileSys::ProgramAddressSpaceType::Is32Bit: case FileSys::ProgramAddressSpaceType::Is32BitNoMap: @@ -43,27 +43,29 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT } // namespace KPageTable::KPageTable(Core::System& system_) - : general_lock{system_.Kernel()}, map_physical_memory_lock{system_.Kernel()}, system{system_} {} + : m_general_lock{system_.Kernel()}, + m_map_physical_memory_lock{system_.Kernel()}, m_system{system_} {} KPageTable::~KPageTable() = default; Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, - VAddr code_addr, std::size_t code_size, + VAddr code_addr, size_t code_size, + KMemoryBlockSlabManager* mem_block_slab_manager, KMemoryManager::Pool pool) { const auto GetSpaceStart = [this](KAddressSpaceInfo::Type type) { - return KAddressSpaceInfo::GetAddressSpaceStart(address_space_width, type); + return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type); }; const auto GetSpaceSize = [this](KAddressSpaceInfo::Type type) { - return KAddressSpaceInfo::GetAddressSpaceSize(address_space_width, type); + return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type); }; // Set our width and heap/alias sizes - address_space_width = GetAddressSpaceWidthFromType(as_type); + m_address_space_width = GetAddressSpaceWidthFromType(as_type); const VAddr start = 0; - const VAddr end{1ULL << address_space_width}; - std::size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)}; - std::size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)}; + const VAddr end{1ULL << m_address_space_width}; + size_t alias_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Alias)}; + size_t heap_region_size{GetSpaceSize(KAddressSpaceInfo::Type::Heap)}; ASSERT(code_addr < code_addr + code_size); ASSERT(code_addr + code_size - 1 <= end - 1); @@ -75,66 +77,65 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type } // Set code regions and determine remaining - constexpr std::size_t RegionAlignment{2_MiB}; + constexpr size_t RegionAlignment{2_MiB}; VAddr process_code_start{}; VAddr process_code_end{}; - std::size_t stack_region_size{}; - std::size_t kernel_map_region_size{}; + size_t stack_region_size{}; + size_t kernel_map_region_size{}; - if (address_space_width == 39) { + if (m_address_space_width == 39) { alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Alias); heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Heap); stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type::Stack); kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); - code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); - code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); - alias_code_region_start = code_region_start; - alias_code_region_end = code_region_end; + m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::Map39Bit); + m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::Map39Bit); + m_alias_code_region_start = m_code_region_start; + m_alias_code_region_end = m_code_region_end; process_code_start = Common::AlignDown(code_addr, RegionAlignment); process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment); } else { stack_region_size = 0; kernel_map_region_size = 0; - code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); - code_region_end = code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); - stack_region_start = code_region_start; - alias_code_region_start = code_region_start; - alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + - GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); - stack_region_end = code_region_end; - kernel_map_region_start = code_region_start; - kernel_map_region_end = code_region_end; - process_code_start = code_region_start; - process_code_end = code_region_end; + m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type::MapSmall); + m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type::MapSmall); + m_stack_region_start = m_code_region_start; + m_alias_code_region_start = m_code_region_start; + m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type::MapLarge) + + GetSpaceSize(KAddressSpaceInfo::Type::MapLarge); + m_stack_region_end = m_code_region_end; + m_kernel_map_region_start = m_code_region_start; + m_kernel_map_region_end = m_code_region_end; + process_code_start = m_code_region_start; + process_code_end = m_code_region_end; } // Set other basic fields - is_aslr_enabled = enable_aslr; - address_space_start = start; - address_space_end = end; - is_kernel = false; + m_enable_aslr = enable_aslr; + m_enable_device_address_space_merge = false; + m_address_space_start = start; + m_address_space_end = end; + m_is_kernel = false; + m_memory_block_slab_manager = mem_block_slab_manager; // Determine the region we can place our undetermineds in VAddr alloc_start{}; - std::size_t alloc_size{}; - if ((process_code_start - code_region_start) >= (end - process_code_end)) { - alloc_start = code_region_start; - alloc_size = process_code_start - code_region_start; + size_t alloc_size{}; + if ((process_code_start - m_code_region_start) >= (end - process_code_end)) { + alloc_start = m_code_region_start; + alloc_size = process_code_start - m_code_region_start; } else { alloc_start = process_code_end; alloc_size = end - process_code_end; } - const std::size_t needed_size{ - (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)}; - if (alloc_size < needed_size) { - ASSERT(false); - return ResultOutOfMemory; - } + const size_t needed_size = + (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size); + R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory); - const std::size_t remaining_size{alloc_size - needed_size}; + const size_t remaining_size{alloc_size - needed_size}; // Determine random placements for each region - std::size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{}; + size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{}; if (enable_aslr) { alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * RegionAlignment; @@ -147,117 +148,130 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type } // Setup heap and alias regions - alias_region_start = alloc_start + alias_rnd; - alias_region_end = alias_region_start + alias_region_size; - heap_region_start = alloc_start + heap_rnd; - heap_region_end = heap_region_start + heap_region_size; + m_alias_region_start = alloc_start + alias_rnd; + m_alias_region_end = m_alias_region_start + alias_region_size; + m_heap_region_start = alloc_start + heap_rnd; + m_heap_region_end = m_heap_region_start + heap_region_size; if (alias_rnd <= heap_rnd) { - heap_region_start += alias_region_size; - heap_region_end += alias_region_size; + m_heap_region_start += alias_region_size; + m_heap_region_end += alias_region_size; } else { - alias_region_start += heap_region_size; - alias_region_end += heap_region_size; + m_alias_region_start += heap_region_size; + m_alias_region_end += heap_region_size; } // Setup stack region if (stack_region_size) { - stack_region_start = alloc_start + stack_rnd; - stack_region_end = stack_region_start + stack_region_size; + m_stack_region_start = alloc_start + stack_rnd; + m_stack_region_end = m_stack_region_start + stack_region_size; if (alias_rnd < stack_rnd) { - stack_region_start += alias_region_size; - stack_region_end += alias_region_size; + m_stack_region_start += alias_region_size; + m_stack_region_end += alias_region_size; } else { - alias_region_start += stack_region_size; - alias_region_end += stack_region_size; + m_alias_region_start += stack_region_size; + m_alias_region_end += stack_region_size; } if (heap_rnd < stack_rnd) { - stack_region_start += heap_region_size; - stack_region_end += heap_region_size; + m_stack_region_start += heap_region_size; + m_stack_region_end += heap_region_size; } else { - heap_region_start += stack_region_size; - heap_region_end += stack_region_size; + m_heap_region_start += stack_region_size; + m_heap_region_end += stack_region_size; } } // Setup kernel map region if (kernel_map_region_size) { - kernel_map_region_start = alloc_start + kmap_rnd; - kernel_map_region_end = kernel_map_region_start + kernel_map_region_size; + m_kernel_map_region_start = alloc_start + kmap_rnd; + m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size; if (alias_rnd < kmap_rnd) { - kernel_map_region_start += alias_region_size; - kernel_map_region_end += alias_region_size; + m_kernel_map_region_start += alias_region_size; + m_kernel_map_region_end += alias_region_size; } else { - alias_region_start += kernel_map_region_size; - alias_region_end += kernel_map_region_size; + m_alias_region_start += kernel_map_region_size; + m_alias_region_end += kernel_map_region_size; } if (heap_rnd < kmap_rnd) { - kernel_map_region_start += heap_region_size; - kernel_map_region_end += heap_region_size; + m_kernel_map_region_start += heap_region_size; + m_kernel_map_region_end += heap_region_size; } else { - heap_region_start += kernel_map_region_size; - heap_region_end += kernel_map_region_size; + m_heap_region_start += kernel_map_region_size; + m_heap_region_end += kernel_map_region_size; } if (stack_region_size) { if (stack_rnd < kmap_rnd) { - kernel_map_region_start += stack_region_size; - kernel_map_region_end += stack_region_size; + m_kernel_map_region_start += stack_region_size; + m_kernel_map_region_end += stack_region_size; } else { - stack_region_start += kernel_map_region_size; - stack_region_end += kernel_map_region_size; + m_stack_region_start += kernel_map_region_size; + m_stack_region_end += kernel_map_region_size; } } } // Set heap members - current_heap_end = heap_region_start; - max_heap_size = 0; - max_physical_memory_size = 0; + m_current_heap_end = m_heap_region_start; + m_max_heap_size = 0; + m_max_physical_memory_size = 0; // Ensure that we regions inside our address space auto IsInAddressSpace = [&](VAddr addr) { - return address_space_start <= addr && addr <= address_space_end; + return m_address_space_start <= addr && addr <= m_address_space_end; }; - ASSERT(IsInAddressSpace(alias_region_start)); - ASSERT(IsInAddressSpace(alias_region_end)); - ASSERT(IsInAddressSpace(heap_region_start)); - ASSERT(IsInAddressSpace(heap_region_end)); - ASSERT(IsInAddressSpace(stack_region_start)); - ASSERT(IsInAddressSpace(stack_region_end)); - ASSERT(IsInAddressSpace(kernel_map_region_start)); - ASSERT(IsInAddressSpace(kernel_map_region_end)); + ASSERT(IsInAddressSpace(m_alias_region_start)); + ASSERT(IsInAddressSpace(m_alias_region_end)); + ASSERT(IsInAddressSpace(m_heap_region_start)); + ASSERT(IsInAddressSpace(m_heap_region_end)); + ASSERT(IsInAddressSpace(m_stack_region_start)); + ASSERT(IsInAddressSpace(m_stack_region_end)); + ASSERT(IsInAddressSpace(m_kernel_map_region_start)); + ASSERT(IsInAddressSpace(m_kernel_map_region_end)); // Ensure that we selected regions that don't overlap - const VAddr alias_start{alias_region_start}; - const VAddr alias_last{alias_region_end - 1}; - const VAddr heap_start{heap_region_start}; - const VAddr heap_last{heap_region_end - 1}; - const VAddr stack_start{stack_region_start}; - const VAddr stack_last{stack_region_end - 1}; - const VAddr kmap_start{kernel_map_region_start}; - const VAddr kmap_last{kernel_map_region_end - 1}; + const VAddr alias_start{m_alias_region_start}; + const VAddr alias_last{m_alias_region_end - 1}; + const VAddr heap_start{m_heap_region_start}; + const VAddr heap_last{m_heap_region_end - 1}; + const VAddr stack_start{m_stack_region_start}; + const VAddr stack_last{m_stack_region_end - 1}; + const VAddr kmap_start{m_kernel_map_region_start}; + const VAddr kmap_last{m_kernel_map_region_end - 1}; ASSERT(alias_last < heap_start || heap_last < alias_start); ASSERT(alias_last < stack_start || stack_last < alias_start); ASSERT(alias_last < kmap_start || kmap_last < alias_start); ASSERT(heap_last < stack_start || stack_last < heap_start); ASSERT(heap_last < kmap_start || kmap_last < heap_start); - current_heap_end = heap_region_start; - max_heap_size = 0; - mapped_physical_memory_size = 0; - memory_pool = pool; + m_current_heap_end = m_heap_region_start; + m_max_heap_size = 0; + m_mapped_physical_memory_size = 0; + m_memory_pool = pool; + + m_page_table_impl = std::make_unique<Common::PageTable>(); + m_page_table_impl->Resize(m_address_space_width, PageBits); + + // Initialize our memory block manager. + R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, + m_memory_block_slab_manager)); +} - page_table_impl.Resize(address_space_width, PageBits); +void KPageTable::Finalize() { + // Finalize memory blocks. + m_memory_block_manager.Finalize(m_memory_block_slab_manager, [&](VAddr addr, u64 size) { + m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size); + }); - return InitializeMemoryLayout(start, end); + // Close the backing page table, as the destructor is not called for guest objects. + m_page_table_impl.reset(); } -Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryState state, +Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState state, KMemoryPermission perm) { const u64 size{num_pages * PageSize}; @@ -265,52 +279,76 @@ Result KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemoryStat R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify that the destination memory is unmapped. R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); + + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager); + + // Allocate and open. KPageGroup pg; - R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( + R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( &pg, num_pages, - KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, allocation_option))); + KMemoryManager::EncodeOption(KMemoryManager::Pool::Application, m_allocation_option))); R_TRY(Operate(addr, num_pages, pg, OperationType::MapGroup)); - block_manager->Update(addr, num_pages, state, perm); + // Update the blocks. + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size) { +Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size) { // Validate the mapping request. R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), ResultInvalidMemoryRegion); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify that the source memory is normal heap. KMemoryState src_state{}; KMemoryPermission src_perm{}; - std::size_t num_src_allocator_blocks{}; + size_t num_src_allocator_blocks{}; R_TRY(this->CheckMemoryState(&src_state, &src_perm, nullptr, &num_src_allocator_blocks, src_address, size, KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)); // Verify that the destination memory is unmapped. - std::size_t num_dst_allocator_blocks{}; + size_t num_dst_allocator_blocks{}; R_TRY(this->CheckMemoryState(&num_dst_allocator_blocks, dst_address, size, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); + // Create an update allocator for the source. + Result src_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), + m_memory_block_slab_manager, + num_src_allocator_blocks); + R_TRY(src_allocator_result); + + // Create an update allocator for the destination. + Result dst_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), + m_memory_block_slab_manager, + num_dst_allocator_blocks); + R_TRY(dst_allocator_result); + // Map the code memory. { // Determine the number of pages being operated on. - const std::size_t num_pages = size / PageSize; + const size_t num_pages = size / PageSize; // Create page groups for the memory being mapped. KPageGroup pg; @@ -335,33 +373,37 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, std::size unprot_guard.Cancel(); // Apply the memory block updates. - block_manager->Update(src_address, num_pages, src_state, new_perm, - KMemoryAttribute::Locked); - block_manager->Update(dst_address, num_pages, KMemoryState::AliasCode, new_perm, - KMemoryAttribute::None); + m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, + src_state, new_perm, KMemoryAttribute::Locked, + KMemoryBlockDisableMergeAttribute::Locked, + KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, + KMemoryState::AliasCode, new_perm, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); } - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size, +Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size, ICacheInvalidationStrategy icache_invalidation_strategy) { // Validate the mapping request. R_UNLESS(this->CanContain(dst_address, size, KMemoryState::AliasCode), ResultInvalidMemoryRegion); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify that the source memory is locked normal heap. - std::size_t num_src_allocator_blocks{}; + size_t num_src_allocator_blocks{}; R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size, KMemoryState::All, KMemoryState::Normal, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::Locked)); // Verify that the destination memory is aliasable code. - std::size_t num_dst_allocator_blocks{}; + size_t num_dst_allocator_blocks{}; R_TRY(this->CheckMemoryStateContiguous( std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState::FlagCanCodeAlias, KMemoryState::FlagCanCodeAlias, KMemoryPermission::None, KMemoryPermission::None, @@ -370,7 +412,7 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si // Determine whether any pages being unmapped are code. bool any_code_pages = false; { - KMemoryBlockManager::const_iterator it = block_manager->FindIterator(dst_address); + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address); while (true) { // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -396,9 +438,9 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si SCOPE_EXIT({ if (reprotected_pages && any_code_pages) { if (icache_invalidation_strategy == ICacheInvalidationStrategy::InvalidateRange) { - system.InvalidateCpuInstructionCacheRange(dst_address, size); + m_system.InvalidateCpuInstructionCacheRange(dst_address, size); } else { - system.InvalidateCpuInstructionCaches(); + m_system.InvalidateCpuInstructionCaches(); } } }); @@ -406,7 +448,21 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si // Unmap. { // Determine the number of pages being operated on. - const std::size_t num_pages = size / PageSize; + const size_t num_pages = size / PageSize; + + // Create an update allocator for the source. + Result src_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), + m_memory_block_slab_manager, + num_src_allocator_blocks); + R_TRY(src_allocator_result); + + // Create an update allocator for the destination. + Result dst_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), + m_memory_block_slab_manager, + num_dst_allocator_blocks); + R_TRY(dst_allocator_result); // Unmap the aliased copy of the pages. R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap)); @@ -416,73 +472,34 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::si OperationType::ChangePermissions)); // Apply the memory block updates. - block_manager->Update(dst_address, num_pages, KMemoryState::None); - block_manager->Update(src_address, num_pages, KMemoryState::Normal, - KMemoryPermission::UserReadWrite); + m_memory_block_manager.Update( + std::addressof(dst_allocator), dst_address, num_pages, KMemoryState::None, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Normal); + m_memory_block_manager.Update( + std::addressof(src_allocator), src_address, num_pages, KMemoryState::Normal, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, KMemoryBlockDisableMergeAttribute::Locked); // Note that we reprotected pages. reprotected_pages = true; } - return ResultSuccess; + R_SUCCEED(); } -VAddr KPageTable::FindFreeArea(VAddr region_start, std::size_t region_num_pages, - std::size_t num_pages, std::size_t alignment, std::size_t offset, - std::size_t guard_pages) { +VAddr KPageTable::FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, + size_t alignment, size_t offset, size_t guard_pages) { VAddr address = 0; if (num_pages <= region_num_pages) { if (this->IsAslrEnabled()) { - // Try to directly find a free area up to 8 times. - for (std::size_t i = 0; i < 8; i++) { - const std::size_t random_offset = - KSystemControl::GenerateRandomRange( - 0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) * - alignment; - const VAddr candidate = - Common::AlignDown((region_start + random_offset), alignment) + offset; - - KMemoryInfo info = this->QueryInfoImpl(candidate); - - if (info.state != KMemoryState::Free) { - continue; - } - if (region_start > candidate) { - continue; - } - if (info.GetAddress() + guard_pages * PageSize > candidate) { - continue; - } - - const VAddr candidate_end = candidate + (num_pages + guard_pages) * PageSize - 1; - if (candidate_end > info.GetLastAddress()) { - continue; - } - if (candidate_end > region_start + region_num_pages * PageSize - 1) { - continue; - } - - address = candidate; - break; - } - // Fall back to finding the first free area with a random offset. - if (address == 0) { - // NOTE: Nintendo does not account for guard pages here. - // This may theoretically cause an offset to be chosen that cannot be mapped. We - // will account for guard pages. - const std::size_t offset_pages = KSystemControl::GenerateRandomRange( - 0, region_num_pages - num_pages - guard_pages); - address = block_manager->FindFreeArea(region_start + offset_pages * PageSize, - region_num_pages - offset_pages, num_pages, - alignment, offset, guard_pages); - } + UNIMPLEMENTED(); } - // Find the first free area. if (address == 0) { - address = block_manager->FindFreeArea(region_start, region_num_pages, num_pages, - alignment, offset, guard_pages); + address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, + alignment, offset, guard_pages); } } @@ -500,7 +517,8 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { // Begin traversal. Common::PageTable::TraversalContext context; Common::PageTable::TraversalEntry next_entry; - R_UNLESS(page_table_impl.BeginTraversal(next_entry, context, addr), ResultInvalidCurrentMemory); + R_UNLESS(m_page_table_impl->BeginTraversal(next_entry, context, addr), + ResultInvalidCurrentMemory); // Prepare tracking variables. PAddr cur_addr = next_entry.phys_addr; @@ -508,9 +526,9 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { size_t tot_size = cur_size; // Iterate, adding to group as we go. - const auto& memory_layout = system.Kernel().MemoryLayout(); + const auto& memory_layout = m_system.Kernel().MemoryLayout(); while (tot_size < size) { - R_UNLESS(page_table_impl.ContinueTraversal(next_entry, context), + R_UNLESS(m_page_table_impl->ContinueTraversal(next_entry, context), ResultInvalidCurrentMemory); if (next_entry.phys_addr != (cur_addr + cur_size)) { @@ -538,7 +556,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) { R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory); R_TRY(pg.AddBlock(cur_addr, cur_pages)); - return ResultSuccess; + R_SUCCEED(); } bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) { @@ -546,7 +564,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu const size_t size = num_pages * PageSize; const auto& pg = pg_ll.Nodes(); - const auto& memory_layout = system.Kernel().MemoryLayout(); + const auto& memory_layout = m_system.Kernel().MemoryLayout(); // Empty groups are necessarily invalid. if (pg.empty()) { @@ -573,7 +591,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu // Begin traversal. Common::PageTable::TraversalContext context; Common::PageTable::TraversalEntry next_entry; - if (!page_table_impl.BeginTraversal(next_entry, context, addr)) { + if (!m_page_table_impl->BeginTraversal(next_entry, context, addr)) { return false; } @@ -584,7 +602,7 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu // Iterate, comparing expected to actual. while (tot_size < size) { - if (!page_table_impl.ContinueTraversal(next_entry, context)) { + if (!m_page_table_impl->ContinueTraversal(next_entry, context)) { return false; } @@ -630,11 +648,11 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize); } -Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, +Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, VAddr src_addr) { - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); - const std::size_t num_pages{size / PageSize}; + const size_t num_pages{size / PageSize}; // Check that the memory is mapped in the destination process. size_t num_allocator_blocks; @@ -649,43 +667,51 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTab KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); // Apply the memory block update. - block_manager->Update(dst_addr, num_pages, KMemoryState::Free, KMemoryPermission::None, - KMemoryAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), dst_addr, num_pages, + KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); - system.InvalidateCpuInstructionCaches(); + m_system.InvalidateCpuInstructionCaches(); - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { +Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) { // Lock the physical memory lock. - KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); + KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); // Calculate the last address for convenience. const VAddr last_address = address + size - 1; // Define iteration variables. VAddr cur_address; - std::size_t mapped_size; + size_t mapped_size; // The entire mapping process can be retried. while (true) { // Check if the memory is already mapped. { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Iterate over the memory. cur_address = address; mapped_size = 0; - auto it = block_manager->FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != block_manager->end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -716,20 +742,20 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { { // Reserve the memory from the process resource limit. KScopedResourceReservation memory_reservation( - system.Kernel().CurrentProcess()->GetResourceLimit(), + m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, size - mapped_size); R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); // Allocate pages for the new memory. KPageGroup pg; - R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( + R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( &pg, (size - mapped_size) / PageSize, - KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); + KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); // Map the memory. { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); size_t num_allocator_blocks = 0; @@ -739,10 +765,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { size_t checked_mapped_size = 0; cur_address = address; - auto it = block_manager->FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != block_manager->end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -782,6 +808,14 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { } } + // Create an update allocator. + ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, + num_allocator_blocks); + R_TRY(allocator_result); + // Reset the current tracking address, and make sure we clean up on failure. cur_address = address; auto unmap_guard = detail::ScopeExit([&] { @@ -791,10 +825,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { // Iterate, unmapping the pages. cur_address = address; - auto it = block_manager->FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != block_manager->end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -830,10 +864,10 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { PAddr pg_phys_addr = pg_it->GetAddress(); size_t pg_pages = pg_it->GetNumPages(); - auto it = block_manager->FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != block_manager->end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -886,37 +920,37 @@ Result KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) { memory_reservation.Commit(); // Increase our tracked mapped size. - mapped_physical_memory_size += (size - mapped_size); + m_mapped_physical_memory_size += (size - mapped_size); // Update the relevant memory blocks. - block_manager->Update(address, size / PageSize, KMemoryState::Free, - KMemoryPermission::None, KMemoryAttribute::None, - KMemoryState::Normal, KMemoryPermission::UserReadWrite, - KMemoryAttribute::None); + m_memory_block_manager.UpdateIfMatch( + std::addressof(allocator), address, size / PageSize, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None); // Cancel our guard. unmap_guard.Cancel(); - return ResultSuccess; + R_SUCCEED(); } } } } -Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { +Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) { // Lock the physical memory lock. - KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); + KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Calculate the last address for convenience. const VAddr last_address = address + size - 1; // Define iteration variables. VAddr cur_address = 0; - std::size_t mapped_size = 0; - std::size_t num_allocator_blocks = 0; + size_t mapped_size = 0; + size_t num_allocator_blocks = 0; // Check if the memory is mapped. { @@ -924,10 +958,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { cur_address = address; mapped_size = 0; - auto it = block_manager->FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != block_manager->end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -1022,6 +1056,13 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { } ASSERT(pg.GetNumPages() == mapped_size / PageSize); + // Create an update allocator. + ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks); + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Reset the current tracking address, and make sure we clean up on failure. cur_address = address; auto remap_guard = detail::ScopeExit([&] { @@ -1030,7 +1071,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { cur_address = address; // Iterate over the memory we unmapped. - auto it = block_manager->FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); auto pg_it = pg.Nodes().begin(); PAddr pg_phys_addr = pg_it->GetAddress(); size_t pg_pages = pg_it->GetNumPages(); @@ -1085,10 +1126,10 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { }); // Iterate over the memory, unmapping as we go. - auto it = block_manager->FindIterator(cur_address); + auto it = m_memory_block_manager.FindIterator(cur_address); while (true) { // Check that the iterator is valid. - ASSERT(it != block_manager->end()); + ASSERT(it != m_memory_block_manager.end()); // Get the memory info. const KMemoryInfo info = it->GetMemoryInfo(); @@ -1115,104 +1156,159 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) { } // Release the memory resource. - mapped_physical_memory_size -= mapped_size; - auto process{system.Kernel().CurrentProcess()}; + m_mapped_physical_memory_size -= mapped_size; + auto process{m_system.Kernel().CurrentProcess()}; process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size); // Update memory blocks. - block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None, - KMemoryAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, + KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); // TODO(bunnei): This is a workaround until the next set of changes, where we add reference // counting for mapped pages. Until then, we must manually close the reference to the page // group. - system.Kernel().MemoryManager().Close(pg); + m_system.Kernel().MemoryManager().Close(pg); // We succeeded. remap_guard.Cancel(); - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { - KScopedLightLock lk(general_lock); - - KMemoryState src_state{}; - CASCADE_CODE(CheckMemoryState( - &src_state, nullptr, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, - KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::UserReadWrite, - KMemoryAttribute::Mask, KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); +Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Validate that the source address's state is valid. + KMemoryState src_state; + size_t num_src_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr, + std::addressof(num_src_allocator_blocks), src_address, size, + KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias, + KMemoryPermission::All, KMemoryPermission::UserReadWrite, + KMemoryAttribute::All, KMemoryAttribute::None)); - if (IsRegionMapped(dst_addr, size)) { - return ResultInvalidCurrentMemory; - } + // Validate that the dst address's state is valid. + size_t num_dst_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size, + KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryAttribute::None)); + // Create an update allocator for the source. + Result src_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), + m_memory_block_slab_manager, + num_src_allocator_blocks); + R_TRY(src_allocator_result); + + // Create an update allocator for the destination. + Result dst_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), + m_memory_block_slab_manager, + num_dst_allocator_blocks); + R_TRY(dst_allocator_result); + + // Map the memory. KPageGroup page_linked_list; - const std::size_t num_pages{size / PageSize}; - - AddRegionToPages(src_addr, num_pages, page_linked_list); + const size_t num_pages{size / PageSize}; + const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>( + KMemoryPermission::KernelRead | KMemoryPermission::NotMapped); + const KMemoryAttribute new_src_attr = KMemoryAttribute::Locked; + AddRegionToPages(src_address, num_pages, page_linked_list); { + // Reprotect the source as kernel-read/not mapped. auto block_guard = detail::ScopeExit([&] { - Operate(src_addr, num_pages, KMemoryPermission::UserReadWrite, + Operate(src_address, num_pages, KMemoryPermission::UserReadWrite, OperationType::ChangePermissions); }); - - CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::None, - OperationType::ChangePermissions)); - CASCADE_CODE(MapPages(dst_addr, page_linked_list, KMemoryPermission::UserReadWrite)); + R_TRY(Operate(src_address, num_pages, new_src_perm, OperationType::ChangePermissions)); + R_TRY(MapPages(dst_address, page_linked_list, KMemoryPermission::UserReadWrite)); block_guard.Cancel(); } - block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::None, - KMemoryAttribute::Locked); - block_manager->Update(dst_addr, num_pages, KMemoryState::Stack, - KMemoryPermission::UserReadWrite); - - return ResultSuccess; + // Apply the memory block updates. + m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, + new_src_perm, new_src_attr, + KMemoryBlockDisableMergeAttribute::Locked, + KMemoryBlockDisableMergeAttribute::None); + m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, + KMemoryState::Stack, KMemoryPermission::UserReadWrite, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); + + R_SUCCEED(); } -Result KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { - KScopedLightLock lk(general_lock); +Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size) { + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Validate that the source address's state is valid. + KMemoryState src_state; + size_t num_src_allocator_blocks; + R_TRY(this->CheckMemoryState( + std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks), + src_address, size, KMemoryState::FlagCanAlias, KMemoryState::FlagCanAlias, + KMemoryPermission::All, KMemoryPermission::NotMapped | KMemoryPermission::KernelRead, + KMemoryAttribute::All, KMemoryAttribute::Locked)); + + // Validate that the dst address's state is valid. + KMemoryPermission dst_perm; + size_t num_dst_allocator_blocks; + R_TRY(this->CheckMemoryState( + nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks), + dst_address, size, KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); - KMemoryState src_state{}; - CASCADE_CODE(CheckMemoryState( - &src_state, nullptr, nullptr, nullptr, src_addr, size, KMemoryState::FlagCanAlias, - KMemoryState::FlagCanAlias, KMemoryPermission::All, KMemoryPermission::None, - KMemoryAttribute::Mask, KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); + // Create an update allocator for the source. + Result src_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), + m_memory_block_slab_manager, + num_src_allocator_blocks); + R_TRY(src_allocator_result); - KMemoryPermission dst_perm{}; - CASCADE_CODE(CheckMemoryState(nullptr, &dst_perm, nullptr, nullptr, dst_addr, size, - KMemoryState::All, KMemoryState::Stack, KMemoryPermission::None, - KMemoryPermission::None, KMemoryAttribute::Mask, - KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped)); + // Create an update allocator for the destination. + Result dst_allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), + m_memory_block_slab_manager, + num_dst_allocator_blocks); + R_TRY(dst_allocator_result); KPageGroup src_pages; KPageGroup dst_pages; - const std::size_t num_pages{size / PageSize}; + const size_t num_pages{size / PageSize}; - AddRegionToPages(src_addr, num_pages, src_pages); - AddRegionToPages(dst_addr, num_pages, dst_pages); + AddRegionToPages(src_address, num_pages, src_pages); + AddRegionToPages(dst_address, num_pages, dst_pages); - if (!dst_pages.IsEqual(src_pages)) { - return ResultInvalidMemoryRegion; - } + R_UNLESS(dst_pages.IsEqual(src_pages), ResultInvalidMemoryRegion); { - auto block_guard = detail::ScopeExit([&] { MapPages(dst_addr, dst_pages, dst_perm); }); + auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); }); - CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); - CASCADE_CODE(Operate(src_addr, num_pages, KMemoryPermission::UserReadWrite, - OperationType::ChangePermissions)); + R_TRY(Operate(dst_address, num_pages, KMemoryPermission::None, OperationType::Unmap)); + R_TRY(Operate(src_address, num_pages, KMemoryPermission::UserReadWrite, + OperationType::ChangePermissions)); block_guard.Cancel(); } - block_manager->Update(src_addr, num_pages, src_state, KMemoryPermission::UserReadWrite); - block_manager->Update(dst_addr, num_pages, KMemoryState::Free); - - return ResultSuccess; + // Apply the memory block updates. + m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Locked); + m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, + KMemoryState::None, KMemoryPermission::None, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); + + R_SUCCEED(); } Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, @@ -1225,48 +1321,54 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list, if (const auto result{ Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; result.IsError()) { - const std::size_t num_pages{(addr - cur_addr) / PageSize}; + const size_t num_pages{(addr - cur_addr) / PageSize}; ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap) .IsSuccess()); - return result; + R_RETURN(result); } cur_addr += node.GetNumPages() * PageSize; } - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state, KMemoryPermission perm) { // Check that the map is in range. - const std::size_t num_pages{page_linked_list.GetNumPages()}; - const std::size_t size{num_pages * PageSize}; + const size_t num_pages{page_linked_list.GetNumPages()}; + const size_t size{num_pages * PageSize}; R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check the memory state. R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager); + // Map the pages. R_TRY(MapPages(address, page_linked_list, perm)); // Update the blocks. - block_manager->Update(address, num_pages, state, perm); + m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, - PAddr phys_addr, bool is_pa_valid, VAddr region_start, - std::size_t region_num_pages, KMemoryState state, - KMemoryPermission perm) { +Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, + bool is_pa_valid, VAddr region_start, size_t region_num_pages, + KMemoryState state, KMemoryPermission perm) { ASSERT(Common::IsAligned(alignment, PageSize) && alignment >= PageSize); // Ensure this is a valid map request. @@ -1275,7 +1377,7 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t R_UNLESS(num_pages < region_num_pages, ResultOutOfMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Find a random address to map at. VAddr addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, @@ -1288,6 +1390,11 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t KMemoryAttribute::None, KMemoryAttribute::None) .IsSuccess()); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager); + // Perform mapping operation. if (is_pa_valid) { R_TRY(this->Operate(addr, num_pages, perm, OperationType::Map, phys_addr)); @@ -1296,11 +1403,13 @@ Result KPageTable::MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t } // Update the blocks. - block_manager->Update(addr, num_pages, state, perm); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); // We successfully mapped the pages. *out_addr = addr; - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { @@ -1312,60 +1421,80 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) { if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None, OperationType::Unmap)}; result.IsError()) { - return result; + R_RETURN(result); } cur_addr += node.GetNumPages() * PageSize; } - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state) { +Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) { // Check that the unmap is in range. - const std::size_t num_pages{page_linked_list.GetNumPages()}; - const std::size_t size{num_pages * PageSize}; - R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); + const size_t num_pages{page_linked_list.GetNumPages()}; + const size_t size{num_pages * PageSize}; + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check the memory state. - R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, state, KMemoryPermission::None, + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, + KMemoryState::All, state, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Perform the unmap. - R_TRY(UnmapPages(addr, page_linked_list)); + R_TRY(UnmapPages(address, page_linked_list)); // Update the blocks. - block_manager->Update(addr, num_pages, state, KMemoryPermission::None); + m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state) { +Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState state) { // Check that the unmap is in range. - const std::size_t size = num_pages * PageSize; + const size_t size = num_pages * PageSize; R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check the memory state. - std::size_t num_allocator_blocks{}; + size_t num_allocator_blocks{}; R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState::All, state, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Perform the unmap. R_TRY(Operate(address, num_pages, KMemoryPermission::None, OperationType::Unmap)); // Update the blocks. - block_manager->Update(address, num_pages, KMemoryState::Free, KMemoryPermission::None); + m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState::Free, + KMemoryPermission::None, KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Normal); - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, @@ -1380,7 +1509,7 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check if state allows us to create the group. R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState::FlagReferenceCounted, @@ -1390,15 +1519,15 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n // Create a new page group for the region. R_TRY(this->MakePageGroup(*out, address, num_pages)); - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, +Result KPageTable::SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm) { const size_t num_pages = size / PageSize; // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify we can change the memory permission. KMemoryState old_state; @@ -1435,105 +1564,101 @@ Result KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size, // Succeed if there's nothing to do. R_SUCCEED_IF(old_perm == new_perm && old_state == new_state); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Perform mapping operation. const auto operation = was_x ? OperationType::ChangePermissionsAndRefresh : OperationType::ChangePermissions; R_TRY(Operate(addr, num_pages, new_perm, operation)); // Update the blocks. - block_manager->Update(addr, num_pages, new_state, new_perm, KMemoryAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); // Ensure cache coherency, if we're setting pages as executable. if (is_x) { - system.InvalidateCpuInstructionCacheRange(addr, size); + m_system.InvalidateCpuInstructionCacheRange(addr, size); } - return ResultSuccess; + R_SUCCEED(); } KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) { - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); - return block_manager->FindBlock(addr).GetMemoryInfo(); + return m_memory_block_manager.FindBlock(addr)->GetMemoryInfo(); } KMemoryInfo KPageTable::QueryInfo(VAddr addr) { if (!Contains(addr, 1)) { - return {address_space_end, 0 - address_space_end, KMemoryState::Inaccessible, - KMemoryPermission::None, KMemoryAttribute::None, KMemoryPermission::None}; + return { + .m_address = m_address_space_end, + .m_size = 0 - m_address_space_end, + .m_state = static_cast<KMemoryState>(Svc::MemoryState::Inaccessible), + .m_device_disable_merge_left_count = 0, + .m_device_disable_merge_right_count = 0, + .m_ipc_lock_count = 0, + .m_device_use_count = 0, + .m_ipc_disable_merge_count = 0, + .m_permission = KMemoryPermission::None, + .m_attribute = KMemoryAttribute::None, + .m_original_permission = KMemoryPermission::None, + .m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None, + }; } return QueryInfoImpl(addr); } -Result KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) { - KScopedLightLock lk(general_lock); - - KMemoryState state{}; - KMemoryAttribute attribute{}; - - R_TRY(CheckMemoryState(&state, nullptr, &attribute, nullptr, addr, size, - KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, - KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, - KMemoryPermission::All, KMemoryPermission::UserReadWrite, - KMemoryAttribute::Mask, KMemoryAttribute::None, - KMemoryAttribute::IpcAndDeviceMapped)); - - block_manager->Update(addr, size / PageSize, state, perm, attribute | KMemoryAttribute::Locked); - - return ResultSuccess; -} - -Result KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) { - KScopedLightLock lk(general_lock); - - KMemoryState state{}; - - R_TRY(CheckMemoryState(&state, nullptr, nullptr, nullptr, addr, size, - KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, - KMemoryState::FlagCanTransfer | KMemoryState::FlagReferenceCounted, - KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::Mask, - KMemoryAttribute::Locked, KMemoryAttribute::IpcAndDeviceMapped)); - - block_manager->Update(addr, size / PageSize, state, KMemoryPermission::UserReadWrite); - return ResultSuccess; -} - -Result KPageTable::SetMemoryPermission(VAddr addr, std::size_t size, - Svc::MemoryPermission svc_perm) { +Result KPageTable::SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm) { const size_t num_pages = size / PageSize; // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify we can change the memory permission. KMemoryState old_state; KMemoryPermission old_perm; - R_TRY(this->CheckMemoryState( - std::addressof(old_state), std::addressof(old_perm), nullptr, nullptr, addr, size, - KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect, KMemoryPermission::None, - KMemoryPermission::None, KMemoryAttribute::All, KMemoryAttribute::None)); + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, + std::addressof(num_allocator_blocks), addr, size, + KMemoryState::FlagCanReprotect, KMemoryState::FlagCanReprotect, + KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::All, KMemoryAttribute::None)); // Determine new perm. const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); R_SUCCEED_IF(old_perm == new_perm); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Perform mapping operation. R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); // Update the blocks. - block_manager->Update(addr, num_pages, old_state, new_perm, KMemoryAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr) { +Result KPageTable::SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr) { const size_t num_pages = size / PageSize; ASSERT((static_cast<KMemoryAttribute>(mask) | KMemoryAttribute::SetMask) == KMemoryAttribute::SetMask); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Verify we can change the memory attribute. KMemoryState old_state; @@ -1548,6 +1673,12 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u3 KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, AttributeTestMask, KMemoryAttribute::None, ~AttributeTestMask)); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Determine the new attribute. const KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(((old_attr & static_cast<KMemoryAttribute>(~mask)) | @@ -1557,123 +1688,142 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u3 this->Operate(addr, num_pages, old_perm, OperationType::ChangePermissionsAndRefresh); // Update the blocks. - block_manager->Update(addr, num_pages, old_state, old_perm, new_attr); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm, + new_attr, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::SetMaxHeapSize(std::size_t size) { +Result KPageTable::SetMaxHeapSize(size_t size) { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Only process page tables are allowed to set heap size. ASSERT(!this->IsKernel()); - max_heap_size = size; + m_max_heap_size = size; - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { +Result KPageTable::SetHeapSize(VAddr* out, size_t size) { // Lock the physical memory mutex. - KScopedLightLock map_phys_mem_lk(map_physical_memory_lock); + KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock); // Try to perform a reduction in heap, instead of an extension. VAddr cur_address{}; - std::size_t allocation_size{}; + size_t allocation_size{}; { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Validate that setting heap size is possible at all. - R_UNLESS(!is_kernel, ResultOutOfMemory); - R_UNLESS(size <= static_cast<std::size_t>(heap_region_end - heap_region_start), + R_UNLESS(!m_is_kernel, ResultOutOfMemory); + R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start), ResultOutOfMemory); - R_UNLESS(size <= max_heap_size, ResultOutOfMemory); + R_UNLESS(size <= m_max_heap_size, ResultOutOfMemory); if (size < GetHeapSize()) { // The size being requested is less than the current size, so we need to free the end of // the heap. // Validate memory state. - std::size_t num_allocator_blocks; + size_t num_allocator_blocks; R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), - heap_region_start + size, GetHeapSize() - size, + m_heap_region_start + size, GetHeapSize() - size, KMemoryState::All, KMemoryState::Normal, KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, + num_allocator_blocks); + R_TRY(allocator_result); + // Unmap the end of the heap. const auto num_pages = (GetHeapSize() - size) / PageSize; - R_TRY(Operate(heap_region_start + size, num_pages, KMemoryPermission::None, + R_TRY(Operate(m_heap_region_start + size, num_pages, KMemoryPermission::None, OperationType::Unmap)); // Release the memory from the resource limit. - system.Kernel().CurrentProcess()->GetResourceLimit()->Release( + m_system.Kernel().CurrentProcess()->GetResourceLimit()->Release( LimitableResource::PhysicalMemory, num_pages * PageSize); // Apply the memory block update. - block_manager->Update(heap_region_start + size, num_pages, KMemoryState::Free, - KMemoryPermission::None, KMemoryAttribute::None); + m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, + num_pages, KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None, + KMemoryBlockDisableMergeAttribute::None, + size == 0 ? KMemoryBlockDisableMergeAttribute::Normal + : KMemoryBlockDisableMergeAttribute::None); // Update the current heap end. - current_heap_end = heap_region_start + size; + m_current_heap_end = m_heap_region_start + size; // Set the output. - *out = heap_region_start; - return ResultSuccess; + *out = m_heap_region_start; + R_SUCCEED(); } else if (size == GetHeapSize()) { // The size requested is exactly the current size. - *out = heap_region_start; - return ResultSuccess; + *out = m_heap_region_start; + R_SUCCEED(); } else { // We have to allocate memory. Determine how much to allocate and where while the table // is locked. - cur_address = current_heap_end; + cur_address = m_current_heap_end; allocation_size = size - GetHeapSize(); } } // Reserve memory for the heap extension. KScopedResourceReservation memory_reservation( - system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, + m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory, allocation_size); R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached); // Allocate pages for the heap extension. KPageGroup pg; - R_TRY(system.Kernel().MemoryManager().AllocateAndOpen( + R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpen( &pg, allocation_size / PageSize, - KMemoryManager::EncodeOption(memory_pool, allocation_option))); + KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option))); // Clear all the newly allocated pages. for (const auto& it : pg.Nodes()) { - std::memset(system.DeviceMemory().GetPointer(it.GetAddress()), heap_fill_value, + std::memset(m_system.DeviceMemory().GetPointer<void>(it.GetAddress()), m_heap_fill_value, it.GetSize()); } // Map the pages. { // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Ensure that the heap hasn't changed since we began executing. - ASSERT(cur_address == current_heap_end); + ASSERT(cur_address == m_current_heap_end); // Check the memory state. - std::size_t num_allocator_blocks{}; - R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), current_heap_end, + size_t num_allocator_blocks{}; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end, allocation_size, KMemoryState::All, KMemoryState::Free, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::None, KMemoryAttribute::None)); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator( + std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Map the pages. const auto num_pages = allocation_size / PageSize; - R_TRY(Operate(current_heap_end, num_pages, pg, OperationType::MapGroup)); + R_TRY(Operate(m_current_heap_end, num_pages, pg, OperationType::MapGroup)); // Clear all the newly allocated pages. - for (std::size_t cur_page = 0; cur_page < num_pages; ++cur_page) { - std::memset(system.Memory().GetPointer(current_heap_end + (cur_page * PageSize)), 0, + for (size_t cur_page = 0; cur_page < num_pages; ++cur_page) { + std::memset(m_system.Memory().GetPointer(m_current_heap_end + (cur_page * PageSize)), 0, PageSize); } @@ -1681,133 +1831,172 @@ Result KPageTable::SetHeapSize(VAddr* out, std::size_t size) { memory_reservation.Commit(); // Apply the memory block update. - block_manager->Update(current_heap_end, num_pages, KMemoryState::Normal, - KMemoryPermission::UserReadWrite, KMemoryAttribute::None); + m_memory_block_manager.Update( + std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState::Normal, + KMemoryPermission::UserReadWrite, KMemoryAttribute::None, + m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute::Normal + : KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::None); // Update the current heap end. - current_heap_end = heap_region_start + size; + m_current_heap_end = m_heap_region_start + size; // Set the output. - *out = heap_region_start; - return ResultSuccess; + *out = m_heap_region_start; + R_SUCCEED(); } } -ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, +ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only, VAddr region_start, - std::size_t region_num_pages, KMemoryState state, + size_t region_num_pages, KMemoryState state, KMemoryPermission perm, PAddr map_addr) { - KScopedLightLock lk(general_lock); - - if (!CanContain(region_start, region_num_pages * PageSize, state)) { - return ResultInvalidCurrentMemory; - } - - if (region_num_pages <= needed_num_pages) { - return ResultOutOfMemory; - } + KScopedLightLock lk(m_general_lock); + R_UNLESS(CanContain(region_start, region_num_pages * PageSize, state), + ResultInvalidCurrentMemory); + R_UNLESS(region_num_pages > needed_num_pages, ResultOutOfMemory); const VAddr addr{ AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)}; - if (!addr) { - return ResultOutOfMemory; - } + R_UNLESS(addr, ResultOutOfMemory); + + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager); if (is_map_only) { R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); } else { KPageGroup page_group; - R_TRY(system.Kernel().MemoryManager().AllocateAndOpenForProcess( + R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess( &page_group, needed_num_pages, - KMemoryManager::EncodeOption(memory_pool, allocation_option), 0, 0)); + KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0)); R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); } - block_manager->Update(addr, needed_num_pages, state, perm); + // Update the blocks. + m_memory_block_manager.Update(std::addressof(allocator), addr, needed_num_pages, state, perm, + KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal, + KMemoryBlockDisableMergeAttribute::None); return addr; } -Result KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) { - KScopedLightLock lk(general_lock); - - KMemoryPermission perm{}; - if (const Result result{CheckMemoryState( - nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute, - KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, - KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None, - KMemoryAttribute::DeviceSharedAndUncached)}; - result.IsError()) { - return result; - } +Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, + bool is_aligned) { + // Lightly validate the range before doing anything else. + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); - block_manager->UpdateLock( - addr, size / PageSize, - [](KMemoryBlockManager::iterator block, KMemoryPermission permission) { - block->ShareToDevice(permission); - }, - perm); + // Lock the table. + KScopedLightLock lk(m_general_lock); - return ResultSuccess; + // Check the memory state. + const auto test_state = + (is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap); + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, test_state, + test_state, perm, perm, + KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked, + KMemoryAttribute::None, KMemoryAttribute::DeviceShared)); + + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // Update the memory blocks. + m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, + &KMemoryBlock::ShareToDevice, KMemoryPermission::None); + + R_SUCCEED(); } -Result KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) { - KScopedLightLock lk(general_lock); - - KMemoryPermission perm{}; - if (const Result result{CheckMemoryState( - nullptr, &perm, nullptr, nullptr, addr, size, KMemoryState::FlagCanChangeAttribute, - KMemoryState::FlagCanChangeAttribute, KMemoryPermission::None, KMemoryPermission::None, - KMemoryAttribute::LockedAndIpcLocked, KMemoryAttribute::None, - KMemoryAttribute::DeviceSharedAndUncached)}; - result.IsError()) { - return result; - } +Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) { + // Lightly validate the range before doing anything else. + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); - block_manager->UpdateLock( - addr, size / PageSize, - [](KMemoryBlockManager::iterator block, KMemoryPermission permission) { - block->UnshareToDevice(permission); - }, - perm); + // Lock the table. + KScopedLightLock lk(m_general_lock); - return ResultSuccess; + // Check the memory state. + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryStateContiguous( + std::addressof(num_allocator_blocks), address, size, + KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, + KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap, + KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); + + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // Update the memory blocks. + const KMemoryBlockManager::MemoryBlockLockFunction lock_func = + m_enable_device_address_space_merge + ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare + : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight; + m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, + KMemoryPermission::None); + + R_SUCCEED(); } -Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size) { - return this->LockMemoryAndOpen( +Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) { + // Lightly validate the range before doing anything else. + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory); + + // Lock the table. + KScopedLightLock lk(m_general_lock); + + // Check the memory state. + size_t num_allocator_blocks; + R_TRY(this->CheckMemoryStateContiguous( + std::addressof(num_allocator_blocks), address, size, KMemoryState::FlagCanDeviceMap, + KMemoryState::FlagCanDeviceMap, KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared)); + + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + + // Update the memory blocks. + m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, + &KMemoryBlock::UnshareToDevice, KMemoryPermission::None); + + R_SUCCEED(); +} + +Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) { + R_RETURN(this->LockMemoryAndOpen( out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None, static_cast<KMemoryPermission>(KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite), - KMemoryAttribute::Locked); + KMemoryAttribute::Locked)); } -Result KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg) { - return this->UnlockMemory( +Result KPageTable::UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg) { + R_RETURN(this->UnlockMemory( addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory, KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All, - KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg); -} - -Result KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) { - block_manager = std::make_unique<KMemoryBlockManager>(start, end); - - return ResultSuccess; -} - -bool KPageTable::IsRegionMapped(VAddr address, u64 size) { - return CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, - KMemoryPermission::All, KMemoryPermission::None, KMemoryAttribute::Mask, - KMemoryAttribute::None, KMemoryAttribute::IpcAndDeviceMapped) - .IsError(); + KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg)); } bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { - auto start_ptr = system.Memory().GetPointer(addr); + auto start_ptr = m_system.DeviceMemory().GetPointer<u8>(addr); for (u64 offset{}; offset < size; offset += PageSize) { - if (start_ptr != system.Memory().GetPointer(addr + offset)) { + if (start_ptr != m_system.DeviceMemory().GetPointer<u8>(addr + offset)) { return false; } start_ptr += PageSize; @@ -1815,8 +2004,7 @@ bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const { return true; } -void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages, - KPageGroup& page_linked_list) { +void KPageTable::AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list) { VAddr addr{start}; while (addr < start + (num_pages * PageSize)) { const PAddr paddr{GetPhysicalAddr(addr)}; @@ -1826,16 +2014,16 @@ void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages, } } -VAddr KPageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, - u64 needed_num_pages, std::size_t align) { - if (is_aslr_enabled) { +VAddr KPageTable::AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages, + size_t align) { + if (m_enable_aslr) { UNIMPLEMENTED(); } - return block_manager->FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, - IsKernel() ? 1 : 4); + return m_memory_block_manager.FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, + IsKernel() ? 1 : 4); } -Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group, +Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group, OperationType operation) { ASSERT(this->IsLockedByCurrentThread()); @@ -1844,11 +2032,11 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& ASSERT(num_pages == page_group.GetNumPages()); for (const auto& node : page_group.Nodes()) { - const std::size_t size{node.GetNumPages() * PageSize}; + const size_t size{node.GetNumPages() * PageSize}; switch (operation) { case OperationType::MapGroup: - system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress()); + m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, size, node.GetAddress()); break; default: ASSERT(false); @@ -1857,10 +2045,10 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageGroup& addr += size; } - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, +Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation, PAddr map_addr) { ASSERT(this->IsLockedByCurrentThread()); @@ -1870,12 +2058,12 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission switch (operation) { case OperationType::Unmap: - system.Memory().UnmapRegion(page_table_impl, addr, num_pages * PageSize); + m_system.Memory().UnmapRegion(*m_page_table_impl, addr, num_pages * PageSize); break; case OperationType::Map: { ASSERT(map_addr); ASSERT(Common::IsAligned(map_addr, PageSize)); - system.Memory().MapMemoryRegion(page_table_impl, addr, num_pages * PageSize, map_addr); + m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr); break; } case OperationType::ChangePermissions: @@ -1884,25 +2072,25 @@ Result KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermission default: ASSERT(false); } - return ResultSuccess; + R_SUCCEED(); } VAddr KPageTable::GetRegionAddress(KMemoryState state) const { switch (state) { case KMemoryState::Free: case KMemoryState::Kernel: - return address_space_start; + return m_address_space_start; case KMemoryState::Normal: - return heap_region_start; + return m_heap_region_start; case KMemoryState::Ipc: case KMemoryState::NonSecureIpc: case KMemoryState::NonDeviceIpc: - return alias_region_start; + return m_alias_region_start; case KMemoryState::Stack: - return stack_region_start; + return m_stack_region_start; case KMemoryState::Static: case KMemoryState::ThreadLocal: - return kernel_map_region_start; + return m_kernel_map_region_start; case KMemoryState::Io: case KMemoryState::Shared: case KMemoryState::AliasCode: @@ -1913,31 +2101,31 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const { case KMemoryState::GeneratedCode: case KMemoryState::CodeOut: case KMemoryState::Coverage: - return alias_code_region_start; + return m_alias_code_region_start; case KMemoryState::Code: case KMemoryState::CodeData: - return code_region_start; + return m_code_region_start; default: UNREACHABLE(); } } -std::size_t KPageTable::GetRegionSize(KMemoryState state) const { +size_t KPageTable::GetRegionSize(KMemoryState state) const { switch (state) { case KMemoryState::Free: case KMemoryState::Kernel: - return address_space_end - address_space_start; + return m_address_space_end - m_address_space_start; case KMemoryState::Normal: - return heap_region_end - heap_region_start; + return m_heap_region_end - m_heap_region_start; case KMemoryState::Ipc: case KMemoryState::NonSecureIpc: case KMemoryState::NonDeviceIpc: - return alias_region_end - alias_region_start; + return m_alias_region_end - m_alias_region_start; case KMemoryState::Stack: - return stack_region_end - stack_region_start; + return m_stack_region_end - m_stack_region_start; case KMemoryState::Static: case KMemoryState::ThreadLocal: - return kernel_map_region_end - kernel_map_region_start; + return m_kernel_map_region_end - m_kernel_map_region_start; case KMemoryState::Io: case KMemoryState::Shared: case KMemoryState::AliasCode: @@ -1948,16 +2136,16 @@ std::size_t KPageTable::GetRegionSize(KMemoryState state) const { case KMemoryState::GeneratedCode: case KMemoryState::CodeOut: case KMemoryState::Coverage: - return alias_code_region_end - alias_code_region_start; + return m_alias_code_region_end - m_alias_code_region_start; case KMemoryState::Code: case KMemoryState::CodeData: - return code_region_end - code_region_start; + return m_code_region_end - m_code_region_start; default: UNREACHABLE(); } } -bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) const { +bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const { const VAddr end = addr + size; const VAddr last = end - 1; @@ -1966,10 +2154,10 @@ bool KPageTable::CanContain(VAddr addr, std::size_t size, KMemoryState state) co const bool is_in_region = region_start <= addr && addr < end && last <= region_start + region_size - 1; - const bool is_in_heap = !(end <= heap_region_start || heap_region_end <= addr || - heap_region_start == heap_region_end); - const bool is_in_alias = !(end <= alias_region_start || alias_region_end <= addr || - alias_region_start == alias_region_end); + const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr || + m_heap_region_start == m_heap_region_end); + const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr || + m_alias_region_start == m_alias_region_end); switch (state) { case KMemoryState::Free: case KMemoryState::Kernel: @@ -2008,23 +2196,23 @@ Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_ KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr) const { // Validate the states match expectation. - R_UNLESS((info.state & state_mask) == state, ResultInvalidCurrentMemory); - R_UNLESS((info.perm & perm_mask) == perm, ResultInvalidCurrentMemory); - R_UNLESS((info.attribute & attr_mask) == attr, ResultInvalidCurrentMemory); + R_UNLESS((info.m_state & state_mask) == state, ResultInvalidCurrentMemory); + R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory); + R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory); - return ResultSuccess; + R_SUCCEED(); } -Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, - std::size_t size, KMemoryState state_mask, - KMemoryState state, KMemoryPermission perm_mask, - KMemoryPermission perm, KMemoryAttribute attr_mask, +Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size, + KMemoryState state_mask, KMemoryState state, + KMemoryPermission perm_mask, KMemoryPermission perm, + KMemoryAttribute attr_mask, KMemoryAttribute attr) const { ASSERT(this->IsLockedByCurrentThread()); // Get information about the first block. const VAddr last_addr = addr + size - 1; - KMemoryBlockManager::const_iterator it = block_manager->FindIterator(addr); + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); KMemoryInfo info = it->GetMemoryInfo(); // If the start address isn't aligned, we need a block. @@ -2042,7 +2230,7 @@ Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VA // Advance our iterator. it++; - ASSERT(it != block_manager->cend()); + ASSERT(it != m_memory_block_manager.cend()); info = it->GetMemoryInfo(); } @@ -2054,12 +2242,12 @@ Result KPageTable::CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VA *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; } - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, - KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, - VAddr addr, std::size_t size, KMemoryState state_mask, + KMemoryAttribute* out_attr, size_t* out_blocks_needed, + VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute ignore_attr) const { @@ -2067,7 +2255,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* // Get information about the first block. const VAddr last_addr = addr + size - 1; - KMemoryBlockManager::const_iterator it = block_manager->FindIterator(addr); + KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr); KMemoryInfo info = it->GetMemoryInfo(); // If the start address isn't aligned, we need a block. @@ -2075,14 +2263,14 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* (Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0; // Validate all blocks in the range have correct state. - const KMemoryState first_state = info.state; - const KMemoryPermission first_perm = info.perm; - const KMemoryAttribute first_attr = info.attribute; + const KMemoryState first_state = info.m_state; + const KMemoryPermission first_perm = info.m_permission; + const KMemoryAttribute first_attr = info.m_attribute; while (true) { // Validate the current block. - R_UNLESS(info.state == first_state, ResultInvalidCurrentMemory); - R_UNLESS(info.perm == first_perm, ResultInvalidCurrentMemory); - R_UNLESS((info.attribute | ignore_attr) == (first_attr | ignore_attr), + R_UNLESS(info.m_state == first_state, ResultInvalidCurrentMemory); + R_UNLESS(info.m_permission == first_perm, ResultInvalidCurrentMemory); + R_UNLESS((info.m_attribute | ignore_attr) == (first_attr | ignore_attr), ResultInvalidCurrentMemory); // Validate against the provided masks. @@ -2095,7 +2283,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* // Advance our iterator. it++; - ASSERT(it != block_manager->cend()); + ASSERT(it != m_memory_block_manager.cend()); info = it->GetMemoryInfo(); } @@ -2116,7 +2304,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* if (out_blocks_needed != nullptr) { *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; } - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size, @@ -2134,7 +2322,7 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check that the output page group is empty, if it exists. if (out_pg) { @@ -2162,6 +2350,12 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr R_TRY(this->MakePageGroup(*out_pg, addr, num_pages)); } + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Decide on new perm and attr. new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr); @@ -2172,9 +2366,11 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr } // Apply the memory block updates. - block_manager->Update(addr, num_pages, old_state, new_perm, new_attr); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, + new_attr, KMemoryBlockDisableMergeAttribute::Locked, + KMemoryBlockDisableMergeAttribute::None); - return ResultSuccess; + R_SUCCEED(); } Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask, @@ -2191,7 +2387,7 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); // Lock the table. - KScopedLightLock lk(general_lock); + KScopedLightLock lk(m_general_lock); // Check the state. KMemoryState old_state{}; @@ -2213,15 +2409,23 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr); + // Create an update allocator. + Result allocator_result{ResultSuccess}; + KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), + m_memory_block_slab_manager, num_allocator_blocks); + R_TRY(allocator_result); + // Update permission, if we need to. if (new_perm != old_perm) { R_TRY(Operate(addr, num_pages, new_perm, OperationType::ChangePermissions)); } // Apply the memory block updates. - block_manager->Update(addr, num_pages, old_state, new_perm, new_attr); + m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, + new_attr, KMemoryBlockDisableMergeAttribute::None, + KMemoryBlockDisableMergeAttribute::Locked); - return ResultSuccess; + R_SUCCEED(); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index 25774f232..c6aeacd96 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h @@ -9,8 +9,10 @@ #include "common/common_types.h" #include "common/page_table.h" #include "core/file_sys/program_metadata.h" +#include "core/hle/kernel/k_dynamic_resource_manager.h" #include "core/hle/kernel/k_light_lock.h" #include "core/hle/kernel/k_memory_block.h" +#include "core/hle/kernel/k_memory_block_manager.h" #include "core/hle/kernel/k_memory_layout.h" #include "core/hle/kernel/k_memory_manager.h" #include "core/hle/result.h" @@ -34,58 +36,66 @@ public: ~KPageTable(); Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, - VAddr code_addr, std::size_t code_size, KMemoryManager::Pool pool); - Result MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state, + VAddr code_addr, size_t code_size, + KMemoryBlockSlabManager* mem_block_slab_manager, + KMemoryManager::Pool pool); + + void Finalize(); + + Result MapProcessCode(VAddr addr, size_t pages_count, KMemoryState state, KMemoryPermission perm); - Result MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size); - Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size, + Result MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size); + Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size, ICacheInvalidationStrategy icache_invalidation_strategy); - Result UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, + Result UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table, VAddr src_addr); - Result MapPhysicalMemory(VAddr addr, std::size_t size); - Result UnmapPhysicalMemory(VAddr addr, std::size_t size); - Result MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); - Result UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); + Result MapPhysicalMemory(VAddr addr, size_t size); + Result UnmapPhysicalMemory(VAddr addr, size_t size); + Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size); + Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size); Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state, KMemoryPermission perm); - Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr, + Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, KMemoryState state, KMemoryPermission perm) { - return this->MapPages(out_addr, num_pages, alignment, phys_addr, true, - this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, - state, perm); + R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, + this->GetRegionAddress(state), + this->GetRegionSize(state) / PageSize, state, perm)); } Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state); - Result UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state); - Result SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm); + Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state); + Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm); KMemoryInfo QueryInfo(VAddr addr); - Result ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm); - Result ResetTransferMemory(VAddr addr, std::size_t size); - Result SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm); - Result SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr); - Result SetMaxHeapSize(std::size_t size); - Result SetHeapSize(VAddr* out, std::size_t size); - ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, - bool is_map_only, VAddr region_start, - std::size_t region_num_pages, KMemoryState state, - KMemoryPermission perm, PAddr map_addr = 0); - Result LockForDeviceAddressSpace(VAddr addr, std::size_t size); - Result UnlockForDeviceAddressSpace(VAddr addr, std::size_t size); - Result LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size); - Result UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg); + Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm); + Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr); + Result SetMaxHeapSize(size_t size); + Result SetHeapSize(VAddr* out, size_t size); + ResultVal<VAddr> AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only, + VAddr region_start, size_t region_num_pages, + KMemoryState state, KMemoryPermission perm, + PAddr map_addr = 0); + + Result LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm, + bool is_aligned); + Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size); + + Result UnlockForDeviceAddressSpace(VAddr addr, size_t size); + + Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size); + Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg); Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr); Common::PageTable& PageTableImpl() { - return page_table_impl; + return *m_page_table_impl; } const Common::PageTable& PageTableImpl() const { - return page_table_impl; + return *m_page_table_impl; } - bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const; + bool CanContain(VAddr addr, size_t size, KMemoryState state) const; private: enum class OperationType : u32 { @@ -96,67 +106,65 @@ private: ChangePermissionsAndRefresh, }; - static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = KMemoryAttribute::DontCareMask | - KMemoryAttribute::IpcLocked | - KMemoryAttribute::DeviceShared; + static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = + KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared; - Result InitializeMemoryLayout(VAddr start, VAddr end); Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm); - Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr, - bool is_pa_valid, VAddr region_start, std::size_t region_num_pages, + Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr, + bool is_pa_valid, VAddr region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm); Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list); - bool IsRegionMapped(VAddr address, u64 size); bool IsRegionContiguous(VAddr addr, u64 size) const; - void AddRegionToPages(VAddr start, std::size_t num_pages, KPageGroup& page_linked_list); + void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list); KMemoryInfo QueryInfoImpl(VAddr addr); - VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages, - std::size_t align); - Result Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group, + VAddr AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages, + size_t align); + Result Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group, OperationType operation); - Result Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm, - OperationType operation, PAddr map_addr = 0); + Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation, + PAddr map_addr = 0); VAddr GetRegionAddress(KMemoryState state) const; - std::size_t GetRegionSize(KMemoryState state) const; + size_t GetRegionSize(KMemoryState state) const; - VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, - std::size_t alignment, std::size_t offset, std::size_t guard_pages); + VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages, + size_t alignment, size_t offset, size_t guard_pages); - Result CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, std::size_t size, + Result CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr) const; - Result CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask, + Result CheckMemoryStateContiguous(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr) const { - return this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, - perm, attr_mask, attr); + R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, + perm, attr_mask, attr)); } Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr) const; Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm, - KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, VAddr addr, - std::size_t size, KMemoryState state_mask, KMemoryState state, + KMemoryAttribute* out_attr, size_t* out_blocks_needed, VAddr addr, + size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const; - Result CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size, + Result CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { - return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, - state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr); + R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, + state_mask, state, perm_mask, perm, attr_mask, attr, + ignore_attr)); } - Result CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask, - KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm, + Result CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state, + KMemoryPermission perm_mask, KMemoryPermission perm, KMemoryAttribute attr_mask, KMemoryAttribute attr, KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const { - return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, - attr_mask, attr, ignore_attr); + R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, + attr_mask, attr, ignore_attr)); } Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size, @@ -174,13 +182,13 @@ private: bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages); bool IsLockedByCurrentThread() const { - return general_lock.IsLockedByCurrentThread(); + return m_general_lock.IsLockedByCurrentThread(); } bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) { ASSERT(this->IsLockedByCurrentThread()); - return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr); + return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr); } bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const { @@ -191,95 +199,93 @@ private: return *out != 0; } - mutable KLightLock general_lock; - mutable KLightLock map_physical_memory_lock; - - std::unique_ptr<KMemoryBlockManager> block_manager; + mutable KLightLock m_general_lock; + mutable KLightLock m_map_physical_memory_lock; public: constexpr VAddr GetAddressSpaceStart() const { - return address_space_start; + return m_address_space_start; } constexpr VAddr GetAddressSpaceEnd() const { - return address_space_end; + return m_address_space_end; } - constexpr std::size_t GetAddressSpaceSize() const { - return address_space_end - address_space_start; + constexpr size_t GetAddressSpaceSize() const { + return m_address_space_end - m_address_space_start; } constexpr VAddr GetHeapRegionStart() const { - return heap_region_start; + return m_heap_region_start; } constexpr VAddr GetHeapRegionEnd() const { - return heap_region_end; + return m_heap_region_end; } - constexpr std::size_t GetHeapRegionSize() const { - return heap_region_end - heap_region_start; + constexpr size_t GetHeapRegionSize() const { + return m_heap_region_end - m_heap_region_start; } constexpr VAddr GetAliasRegionStart() const { - return alias_region_start; + return m_alias_region_start; } constexpr VAddr GetAliasRegionEnd() const { - return alias_region_end; + return m_alias_region_end; } - constexpr std::size_t GetAliasRegionSize() const { - return alias_region_end - alias_region_start; + constexpr size_t GetAliasRegionSize() const { + return m_alias_region_end - m_alias_region_start; } constexpr VAddr GetStackRegionStart() const { - return stack_region_start; + return m_stack_region_start; } constexpr VAddr GetStackRegionEnd() const { - return stack_region_end; + return m_stack_region_end; } - constexpr std::size_t GetStackRegionSize() const { - return stack_region_end - stack_region_start; + constexpr size_t GetStackRegionSize() const { + return m_stack_region_end - m_stack_region_start; } constexpr VAddr GetKernelMapRegionStart() const { - return kernel_map_region_start; + return m_kernel_map_region_start; } constexpr VAddr GetKernelMapRegionEnd() const { - return kernel_map_region_end; + return m_kernel_map_region_end; } constexpr VAddr GetCodeRegionStart() const { - return code_region_start; + return m_code_region_start; } constexpr VAddr GetCodeRegionEnd() const { - return code_region_end; + return m_code_region_end; } constexpr VAddr GetAliasCodeRegionStart() const { - return alias_code_region_start; + return m_alias_code_region_start; } constexpr VAddr GetAliasCodeRegionSize() const { - return alias_code_region_end - alias_code_region_start; + return m_alias_code_region_end - m_alias_code_region_start; } - std::size_t GetNormalMemorySize() { - KScopedLightLock lk(general_lock); - return GetHeapSize() + mapped_physical_memory_size; + size_t GetNormalMemorySize() { + KScopedLightLock lk(m_general_lock); + return GetHeapSize() + m_mapped_physical_memory_size; } - constexpr std::size_t GetAddressSpaceWidth() const { - return address_space_width; + constexpr size_t GetAddressSpaceWidth() const { + return m_address_space_width; } - constexpr std::size_t GetHeapSize() const { - return current_heap_end - heap_region_start; + constexpr size_t GetHeapSize() const { + return m_current_heap_end - m_heap_region_start; } - constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const { - return address_space_start <= address && address + size - 1 <= address_space_end - 1; + constexpr bool IsInsideAddressSpace(VAddr address, size_t size) const { + return m_address_space_start <= address && address + size - 1 <= m_address_space_end - 1; } - constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const { - return alias_region_start > address || address + size - 1 > alias_region_end - 1; + constexpr bool IsOutsideAliasRegion(VAddr address, size_t size) const { + return m_alias_region_start > address || address + size - 1 > m_alias_region_end - 1; } - constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const { - return stack_region_start > address || address + size - 1 > stack_region_end - 1; + constexpr bool IsOutsideStackRegion(VAddr address, size_t size) const { + return m_stack_region_start > address || address + size - 1 > m_stack_region_end - 1; } - constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const { + constexpr bool IsInvalidRegion(VAddr address, size_t size) const { return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1; } - constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const { - return address + size > heap_region_start && heap_region_end > address; + constexpr bool IsInsideHeapRegion(VAddr address, size_t size) const { + return address + size > m_heap_region_start && m_heap_region_end > address; } - constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const { - return address + size > alias_region_start && alias_region_end > address; + constexpr bool IsInsideAliasRegion(VAddr address, size_t size) const { + return address + size > m_alias_region_start && m_alias_region_end > address; } - constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const { + constexpr bool IsOutsideASLRRegion(VAddr address, size_t size) const { if (IsInvalidRegion(address, size)) { return true; } @@ -291,73 +297,78 @@ public: } return {}; } - constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const { + constexpr bool IsInsideASLRRegion(VAddr address, size_t size) const { return !IsOutsideASLRRegion(address, size); } - constexpr std::size_t GetNumGuardPages() const { + constexpr size_t GetNumGuardPages() const { return IsKernel() ? 1 : 4; } PAddr GetPhysicalAddr(VAddr addr) const { - const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits]; + const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits]; ASSERT(backing_addr); return backing_addr + addr; } constexpr bool Contains(VAddr addr) const { - return address_space_start <= addr && addr <= address_space_end - 1; + return m_address_space_start <= addr && addr <= m_address_space_end - 1; } - constexpr bool Contains(VAddr addr, std::size_t size) const { - return address_space_start <= addr && addr < addr + size && - addr + size - 1 <= address_space_end - 1; + constexpr bool Contains(VAddr addr, size_t size) const { + return m_address_space_start <= addr && addr < addr + size && + addr + size - 1 <= m_address_space_end - 1; } private: constexpr bool IsKernel() const { - return is_kernel; + return m_is_kernel; } constexpr bool IsAslrEnabled() const { - return is_aslr_enabled; + return m_enable_aslr; } - constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const { - return (address_space_start <= addr) && - (num_pages <= (address_space_end - address_space_start) / PageSize) && - (addr + num_pages * PageSize - 1 <= address_space_end - 1); + constexpr bool ContainsPages(VAddr addr, size_t num_pages) const { + return (m_address_space_start <= addr) && + (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && + (addr + num_pages * PageSize - 1 <= m_address_space_end - 1); } private: - VAddr address_space_start{}; - VAddr address_space_end{}; - VAddr heap_region_start{}; - VAddr heap_region_end{}; - VAddr current_heap_end{}; - VAddr alias_region_start{}; - VAddr alias_region_end{}; - VAddr stack_region_start{}; - VAddr stack_region_end{}; - VAddr kernel_map_region_start{}; - VAddr kernel_map_region_end{}; - VAddr code_region_start{}; - VAddr code_region_end{}; - VAddr alias_code_region_start{}; - VAddr alias_code_region_end{}; - - std::size_t mapped_physical_memory_size{}; - std::size_t max_heap_size{}; - std::size_t max_physical_memory_size{}; - std::size_t address_space_width{}; - - bool is_kernel{}; - bool is_aslr_enabled{}; - - u32 heap_fill_value{}; - const KMemoryRegion* cached_physical_heap_region{}; - - KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application}; - KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront}; - - Common::PageTable page_table_impl; - - Core::System& system; + VAddr m_address_space_start{}; + VAddr m_address_space_end{}; + VAddr m_heap_region_start{}; + VAddr m_heap_region_end{}; + VAddr m_current_heap_end{}; + VAddr m_alias_region_start{}; + VAddr m_alias_region_end{}; + VAddr m_stack_region_start{}; + VAddr m_stack_region_end{}; + VAddr m_kernel_map_region_start{}; + VAddr m_kernel_map_region_end{}; + VAddr m_code_region_start{}; + VAddr m_code_region_end{}; + VAddr m_alias_code_region_start{}; + VAddr m_alias_code_region_end{}; + + size_t m_mapped_physical_memory_size{}; + size_t m_max_heap_size{}; + size_t m_max_physical_memory_size{}; + size_t m_address_space_width{}; + + KMemoryBlockManager m_memory_block_manager; + + bool m_is_kernel{}; + bool m_enable_aslr{}; + bool m_enable_device_address_space_merge{}; + + KMemoryBlockSlabManager* m_memory_block_slab_manager{}; + + u32 m_heap_fill_value{}; + const KMemoryRegion* m_cached_physical_heap_region{}; + + KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application}; + KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront}; + + std::unique_ptr<Common::PageTable> m_page_table_impl; + + Core::System& m_system; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index d3e99665f..8c3495e5a 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -72,7 +72,8 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string process->name = std::move(process_name); process->resource_limit = res_limit; - process->status = ProcessStatus::Created; + process->system_resource_address = 0; + process->state = State::Created; process->program_id = 0; process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() : kernel.CreateNewUserProcessID(); @@ -92,11 +93,12 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string process->exception_thread = nullptr; process->is_suspended = false; process->schedule_count = 0; + process->is_handle_table_initialized = false; // Open a reference to the resource limit. process->resource_limit->Open(); - return ResultSuccess; + R_SUCCEED(); } void KProcess::DoWorkerTaskImpl() { @@ -121,9 +123,9 @@ void KProcess::DecrementRunningThreadCount() { } } -u64 KProcess::GetTotalPhysicalMemoryAvailable() const { +u64 KProcess::GetTotalPhysicalMemoryAvailable() { const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) + - page_table->GetNormalMemorySize() + GetSystemResourceSize() + image_size + + page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size + main_thread_stack_size}; if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application); capacity != pool_size) { @@ -135,16 +137,16 @@ u64 KProcess::GetTotalPhysicalMemoryAvailable() const { return memory_usage_capacity; } -u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const { +u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() { return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize(); } -u64 KProcess::GetTotalPhysicalMemoryUsed() const { - return image_size + main_thread_stack_size + page_table->GetNormalMemorySize() + +u64 KProcess::GetTotalPhysicalMemoryUsed() { + return image_size + main_thread_stack_size + page_table.GetNormalMemorySize() + GetSystemResourceSize(); } -u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() const { +u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() { return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); } @@ -244,7 +246,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad shmem->Open(); shemen_info->Open(); - return ResultSuccess; + R_SUCCEED(); } void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, @@ -289,12 +291,12 @@ Result KProcess::Reset() { KScopedSchedulerLock sl{kernel}; // Validate that we're in a state that we can reset. - R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState); + R_UNLESS(state != State::Terminated, ResultInvalidState); R_UNLESS(is_signaled, ResultInvalidState); // Clear signaled. is_signaled = false; - return ResultSuccess; + R_SUCCEED(); } Result KProcess::SetActivity(ProcessActivity activity) { @@ -304,15 +306,13 @@ Result KProcess::SetActivity(ProcessActivity activity) { KScopedSchedulerLock sl{kernel}; // Validate our state. - R_UNLESS(status != ProcessStatus::Exiting, ResultInvalidState); - R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState); + R_UNLESS(state != State::Terminating, ResultInvalidState); + R_UNLESS(state != State::Terminated, ResultInvalidState); // Either pause or resume. if (activity == ProcessActivity::Paused) { // Verify that we're not suspended. - if (is_suspended) { - return ResultInvalidState; - } + R_UNLESS(!is_suspended, ResultInvalidState); // Suspend all threads. for (auto* thread : GetThreadList()) { @@ -325,9 +325,7 @@ Result KProcess::SetActivity(ProcessActivity activity) { ASSERT(activity == ProcessActivity::Runnable); // Verify that we're suspended. - if (!is_suspended) { - return ResultInvalidState; - } + R_UNLESS(is_suspended, ResultInvalidState); // Resume all threads. for (auto* thread : GetThreadList()) { @@ -338,7 +336,7 @@ Result KProcess::SetActivity(ProcessActivity activity) { SetSuspended(false); } - return ResultSuccess; + R_SUCCEED(); } Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) { @@ -348,35 +346,38 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: system_resource_size = metadata.GetSystemResourceSize(); image_size = code_size; + // We currently do not support process-specific system resource + UNIMPLEMENTED_IF(system_resource_size != 0); + KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory, code_size + system_resource_size); if (!memory_reservation.Succeeded()) { LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes", code_size + system_resource_size); - return ResultLimitReached; + R_RETURN(ResultLimitReached); } // Initialize proces address space - if (const Result result{page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, - 0x8000000, code_size, - KMemoryManager::Pool::Application)}; + if (const Result result{page_table.InitializeForProcess( + metadata.GetAddressSpaceType(), false, 0x8000000, code_size, + &kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)}; result.IsError()) { - return result; + R_RETURN(result); } // Map process code region - if (const Result result{page_table->MapProcessCode(page_table->GetCodeRegionStart(), - code_size / PageSize, KMemoryState::Code, - KMemoryPermission::None)}; + if (const Result result{page_table.MapProcessCode(page_table.GetCodeRegionStart(), + code_size / PageSize, KMemoryState::Code, + KMemoryPermission::None)}; result.IsError()) { - return result; + R_RETURN(result); } // Initialize process capabilities const auto& caps{metadata.GetKernelCapabilities()}; if (const Result result{ - capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)}; + capabilities.InitializeForUserProcess(caps.data(), caps.size(), page_table)}; result.IsError()) { - return result; + R_RETURN(result); } // Set memory usage capacity @@ -384,12 +385,12 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: case FileSys::ProgramAddressSpaceType::Is32Bit: case FileSys::ProgramAddressSpaceType::Is36Bit: case FileSys::ProgramAddressSpaceType::Is39Bit: - memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart(); + memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart(); break; case FileSys::ProgramAddressSpaceType::Is32BitNoMap: - memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart() + - page_table->GetAliasRegionEnd() - page_table->GetAliasRegionStart(); + memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart() + + page_table.GetAliasRegionEnd() - page_table.GetAliasRegionStart(); break; default: @@ -397,10 +398,10 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std: } // Create TLS region - R_TRY(this->CreateThreadLocalRegion(std::addressof(tls_region_address))); + R_TRY(this->CreateThreadLocalRegion(std::addressof(plr_address))); memory_reservation.Commit(); - return handle_table.Initialize(capabilities.GetHandleTableSize()); + R_RETURN(handle_table.Initialize(capabilities.GetHandleTableSize())); } void KProcess::Run(s32 main_thread_priority, u64 stack_size) { @@ -409,15 +410,15 @@ void KProcess::Run(s32 main_thread_priority, u64 stack_size) { resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size); const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)}; - ASSERT(!page_table->SetMaxHeapSize(heap_capacity).IsError()); + ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError()); - ChangeStatus(ProcessStatus::Running); + ChangeState(State::Running); SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top); } void KProcess::PrepareForTermination() { - ChangeStatus(ProcessStatus::Exiting); + ChangeState(State::Terminating); const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) { for (auto* thread : in_thread_list) { @@ -437,15 +438,15 @@ void KProcess::PrepareForTermination() { stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList()); - this->DeleteThreadLocalRegion(tls_region_address); - tls_region_address = 0; + this->DeleteThreadLocalRegion(plr_address); + plr_address = 0; if (resource_limit) { resource_limit->Release(LimitableResource::PhysicalMemory, main_thread_stack_size + image_size); } - ChangeStatus(ProcessStatus::Exited); + ChangeState(State::Terminated); } void KProcess::Finalize() { @@ -474,7 +475,7 @@ void KProcess::Finalize() { } // Finalize the page table. - page_table.reset(); + page_table.Finalize(); // Perform inherited finalization. KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize(); @@ -499,7 +500,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) { } *out = tlr; - return ResultSuccess; + R_SUCCEED(); } } @@ -528,7 +529,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) { // We succeeded! tlp_guard.Cancel(); *out = tlr; - return ResultSuccess; + R_SUCCEED(); } Result KProcess::DeleteThreadLocalRegion(VAddr addr) { @@ -576,7 +577,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) { KThreadLocalPage::Free(kernel, page_to_free); } - return ResultSuccess; + R_SUCCEED(); } bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size, @@ -628,7 +629,7 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size, void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { const auto ReprotectSegment = [&](const CodeSet::Segment& segment, Svc::MemoryPermission permission) { - page_table->SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); + page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); }; kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(), @@ -645,19 +646,18 @@ bool KProcess::IsSignaled() const { } KProcess::KProcess(KernelCore& kernel_) - : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{std::make_unique<KPageTable>( - kernel_.System())}, + : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{kernel_.System()}, handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()}, state_lock{kernel_}, list_lock{kernel_} {} KProcess::~KProcess() = default; -void KProcess::ChangeStatus(ProcessStatus new_status) { - if (status == new_status) { +void KProcess::ChangeState(State new_state) { + if (state == new_state) { return; } - status = new_status; + state = new_state; is_signaled = true; NotifyAvailable(); } @@ -668,17 +668,17 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) { // The kernel always ensures that the given stack size is page aligned. main_thread_stack_size = Common::AlignUp(stack_size, PageSize); - const VAddr start{page_table->GetStackRegionStart()}; - const std::size_t size{page_table->GetStackRegionEnd() - start}; + const VAddr start{page_table.GetStackRegionStart()}; + const std::size_t size{page_table.GetStackRegionEnd() - start}; CASCADE_RESULT(main_thread_stack_top, - page_table->AllocateAndMapMemory( + page_table.AllocateAndMapMemory( main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize, KMemoryState::Stack, KMemoryPermission::UserReadWrite)); main_thread_stack_top += main_thread_stack_size; - return ResultSuccess; + R_SUCCEED(); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h index d56d73bab..2e0cc3d0b 100644 --- a/src/core/hle/kernel/k_process.h +++ b/src/core/hle/kernel/k_process.h @@ -13,6 +13,7 @@ #include "core/hle/kernel/k_auto_object.h" #include "core/hle/kernel/k_condition_variable.h" #include "core/hle/kernel/k_handle_table.h" +#include "core/hle/kernel/k_page_table.h" #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_thread_local_page.h" #include "core/hle/kernel/k_worker_task.h" @@ -31,7 +32,6 @@ class ProgramMetadata; namespace Kernel { class KernelCore; -class KPageTable; class KResourceLimit; class KThread; class KSharedMemoryInfo; @@ -45,24 +45,6 @@ enum class MemoryRegion : u16 { BASE = 3, }; -/** - * Indicates the status of a Process instance. - * - * @note These match the values as used by kernel, - * so new entries should only be added if RE - * shows that a new value has been introduced. - */ -enum class ProcessStatus { - Created, - CreatedWithDebuggerAttached, - Running, - WaitingForDebuggerToAttach, - DebuggerAttached, - Exiting, - Exited, - DebugBreak, -}; - enum class ProcessActivity : u32 { Runnable, Paused, @@ -89,6 +71,17 @@ public: explicit KProcess(KernelCore& kernel_); ~KProcess() override; + enum class State { + Created = static_cast<u32>(Svc::ProcessState::Created), + CreatedAttached = static_cast<u32>(Svc::ProcessState::CreatedAttached), + Running = static_cast<u32>(Svc::ProcessState::Running), + Crashed = static_cast<u32>(Svc::ProcessState::Crashed), + RunningAttached = static_cast<u32>(Svc::ProcessState::RunningAttached), + Terminating = static_cast<u32>(Svc::ProcessState::Terminating), + Terminated = static_cast<u32>(Svc::ProcessState::Terminated), + DebugBreak = static_cast<u32>(Svc::ProcessState::DebugBreak), + }; + enum : u64 { /// Lowest allowed process ID for a kernel initial process. InitialKIPIDMin = 1, @@ -114,12 +107,12 @@ public: /// Gets a reference to the process' page table. KPageTable& PageTable() { - return *page_table; + return page_table; } /// Gets const a reference to the process' page table. const KPageTable& PageTable() const { - return *page_table; + return page_table; } /// Gets a reference to the process' handle table. @@ -145,26 +138,25 @@ public: } Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) { - return condition_var.Wait(address, cv_key, tag, ns); + R_RETURN(condition_var.Wait(address, cv_key, tag, ns)); } Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) { - return address_arbiter.SignalToAddress(address, signal_type, value, count); + R_RETURN(address_arbiter.SignalToAddress(address, signal_type, value, count)); } Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value, s64 timeout) { - return address_arbiter.WaitForAddress(address, arb_type, value, timeout); + R_RETURN(address_arbiter.WaitForAddress(address, arb_type, value, timeout)); } - /// Gets the address to the process' dedicated TLS region. - VAddr GetTLSRegionAddress() const { - return tls_region_address; + VAddr GetProcessLocalRegionAddress() const { + return plr_address; } /// Gets the current status of the process - ProcessStatus GetStatus() const { - return status; + State GetState() const { + return state; } /// Gets the unique ID that identifies this particular process. @@ -286,18 +278,18 @@ public: } /// Retrieves the total physical memory available to this process in bytes. - u64 GetTotalPhysicalMemoryAvailable() const; + u64 GetTotalPhysicalMemoryAvailable(); /// Retrieves the total physical memory available to this process in bytes, /// without the size of the personal system resource heap added to it. - u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource() const; + u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource(); /// Retrieves the total physical memory used by this process in bytes. - u64 GetTotalPhysicalMemoryUsed() const; + u64 GetTotalPhysicalMemoryUsed(); /// Retrieves the total physical memory used by this process in bytes, /// without the size of the personal system resource heap added to it. - u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const; + u64 GetTotalPhysicalMemoryUsedWithoutSystemResource(); /// Gets the list of all threads created with this process as their owner. std::list<KThread*>& GetThreadList() { @@ -415,19 +407,24 @@ private: pinned_threads[core_id] = nullptr; } - /// Changes the process status. If the status is different - /// from the current process status, then this will trigger - /// a process signal. - void ChangeStatus(ProcessStatus new_status); + void FinalizeHandleTable() { + // Finalize the table. + handle_table.Finalize(); + + // Note that the table is finalized. + is_handle_table_initialized = false; + } + + void ChangeState(State new_state); /// Allocates the main thread stack for the process, given the stack size in bytes. Result AllocateMainThreadStack(std::size_t stack_size); /// Memory manager for this process - std::unique_ptr<KPageTable> page_table; + KPageTable page_table; /// Current status of the process - ProcessStatus status{}; + State state{}; /// The ID of this process u64 process_id = 0; @@ -443,6 +440,8 @@ private: /// Resource limit descriptor for this process KResourceLimit* resource_limit{}; + VAddr system_resource_address{}; + /// The ideal CPU core for this process, threads are scheduled on this core by default. u8 ideal_core = 0; @@ -469,7 +468,7 @@ private: KConditionVariable condition_var; /// Address indicating the location of the process' dedicated TLS region. - VAddr tls_region_address = 0; + VAddr plr_address = 0; /// Random values for svcGetInfo RandomEntropy std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{}; @@ -495,8 +494,12 @@ private: /// Schedule count of this process s64 schedule_count{}; + size_t memory_release_hint{}; + bool is_signaled{}; bool is_suspended{}; + bool is_immortal{}; + bool is_handle_table_initialized{}; bool is_initialized{}; std::atomic<u16> num_running_threads{}; diff --git a/src/core/hle/kernel/k_readable_event.cpp b/src/core/hle/kernel/k_readable_event.cpp index 94c5464fe..5c942d47c 100644 --- a/src/core/hle/kernel/k_readable_event.cpp +++ b/src/core/hle/kernel/k_readable_event.cpp @@ -15,31 +15,44 @@ KReadableEvent::KReadableEvent(KernelCore& kernel_) : KSynchronizationObject{ker KReadableEvent::~KReadableEvent() = default; +void KReadableEvent::Initialize(KEvent* parent) { + m_is_signaled = false; + m_parent = parent; + + if (m_parent != nullptr) { + m_parent->Open(); + } +} + bool KReadableEvent::IsSignaled() const { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); - return is_signaled; + return m_is_signaled; } void KReadableEvent::Destroy() { - if (parent) { - parent->Close(); + if (m_parent) { + { + KScopedSchedulerLock sl{kernel}; + m_parent->OnReadableEventDestroyed(); + } + m_parent->Close(); } } Result KReadableEvent::Signal() { KScopedSchedulerLock lk{kernel}; - if (!is_signaled) { - is_signaled = true; - NotifyAvailable(); + if (!m_is_signaled) { + m_is_signaled = true; + this->NotifyAvailable(); } return ResultSuccess; } Result KReadableEvent::Clear() { - Reset(); + this->Reset(); return ResultSuccess; } @@ -47,11 +60,11 @@ Result KReadableEvent::Clear() { Result KReadableEvent::Reset() { KScopedSchedulerLock lk{kernel}; - if (!is_signaled) { + if (!m_is_signaled) { return ResultInvalidState; } - is_signaled = false; + m_is_signaled = false; return ResultSuccess; } diff --git a/src/core/hle/kernel/k_readable_event.h b/src/core/hle/kernel/k_readable_event.h index 18dcad289..743f96bf5 100644 --- a/src/core/hle/kernel/k_readable_event.h +++ b/src/core/hle/kernel/k_readable_event.h @@ -20,26 +20,23 @@ public: explicit KReadableEvent(KernelCore& kernel_); ~KReadableEvent() override; - void Initialize(KEvent* parent_event_, std::string&& name_) { - is_signaled = false; - parent = parent_event_; - name = std::move(name_); - } + void Initialize(KEvent* parent); KEvent* GetParent() const { - return parent; + return m_parent; } + Result Signal(); + Result Clear(); + bool IsSignaled() const override; void Destroy() override; - Result Signal(); - Result Clear(); Result Reset(); private: - bool is_signaled{}; - KEvent* parent{}; + bool m_is_signaled{}; + KEvent* m_parent{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp index 802c646a6..faf03fcc8 100644 --- a/src/core/hle/kernel/k_server_session.cpp +++ b/src/core/hle/kernel/k_server_session.cpp @@ -7,6 +7,8 @@ #include "common/assert.h" #include "common/common_types.h" #include "common/logging/log.h" +#include "common/scope_exit.h" +#include "core/core.h" #include "core/core_timing.h" #include "core/hle/ipc_helpers.h" #include "core/hle/kernel/hle_ipc.h" @@ -18,13 +20,16 @@ #include "core/hle/kernel/k_server_session.h" #include "core/hle/kernel/k_session.h" #include "core/hle/kernel/k_thread.h" +#include "core/hle/kernel/k_thread_queue.h" #include "core/hle/kernel/kernel.h" -#include "core/hle/kernel/service_thread.h" #include "core/memory.h" namespace Kernel { -KServerSession::KServerSession(KernelCore& kernel_) : KSynchronizationObject{kernel_} {} +using ThreadQueueImplForKServerSessionRequest = KThreadQueue; + +KServerSession::KServerSession(KernelCore& kernel_) + : KSynchronizationObject{kernel_}, m_lock{kernel_} {} KServerSession::~KServerSession() = default; @@ -33,17 +38,14 @@ void KServerSession::Initialize(KSession* parent_session_, std::string&& name_, // Set member variables. parent = parent_session_; name = std::move(name_); - - if (manager_) { - manager = manager_; - } else { - manager = std::make_shared<SessionRequestManager>(kernel); - } + manager = manager_; } void KServerSession::Destroy() { parent->OnServerClosed(); + this->CleanupRequests(); + parent->Close(); // Release host emulation members. @@ -54,13 +56,13 @@ void KServerSession::Destroy() { } void KServerSession::OnClientClosed() { - if (manager->HasSessionHandler()) { + if (manager && manager->HasSessionHandler()) { manager->SessionHandler().ClientDisconnected(this); } } bool KServerSession::IsSignaled() const { - ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel)); // If the client is closed, we're always signaled. if (parent->IsClientClosed()) { @@ -68,114 +70,281 @@ bool KServerSession::IsSignaled() const { } // Otherwise, we're signaled if we have a request and aren't handling one. - return false; + return !m_request_list.empty() && m_current_request == nullptr; } -void KServerSession::AppendDomainHandler(SessionRequestHandlerPtr handler) { - manager->AppendDomainHandler(std::move(handler)); +Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) { + u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))}; + auto context = std::make_shared<HLERequestContext>(kernel, memory, this, thread); + + context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf); + + return manager->QueueSyncRequest(parent, std::move(context)); } -std::size_t KServerSession::NumDomainRequestHandlers() const { - return manager->DomainHandlerCount(); +Result KServerSession::CompleteSyncRequest(HLERequestContext& context) { + Result result = manager->CompleteSyncRequest(this, context); + + // The calling thread is waiting for this request to complete, so wake it up. + context.GetThread().EndWait(result); + + return result; } -Result KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& context) { - if (!context.HasDomainMessageHeader()) { - return ResultSuccess; - } +Result KServerSession::OnRequest(KSessionRequest* request) { + // Create the wait queue. + ThreadQueueImplForKServerSessionRequest wait_queue{kernel}; - // Set domain handlers in HLE context, used for domain objects (IPC interfaces) as inputs - context.SetSessionRequestManager(manager); - - // If there is a DomainMessageHeader, then this is CommandType "Request" - const auto& domain_message_header = context.GetDomainMessageHeader(); - const u32 object_id{domain_message_header.object_id}; - switch (domain_message_header.command) { - case IPC::DomainMessageHeader::CommandType::SendMessage: - if (object_id > manager->DomainHandlerCount()) { - LOG_CRITICAL(IPC, - "object_id {} is too big! This probably means a recent service call " - "to {} needed to return a new interface!", - object_id, name); - ASSERT(false); - return ResultSuccess; // Ignore error if asserts are off - } - if (auto strong_ptr = manager->DomainHandler(object_id - 1).lock()) { - return strong_ptr->HandleSyncRequest(*this, context); + { + // Lock the scheduler. + KScopedSchedulerLock sl{kernel}; + + // Ensure that we can handle new requests. + R_UNLESS(!parent->IsServerClosed(), ResultSessionClosed); + + // Check that we're not terminating. + R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested); + + if (manager) { + // HLE request. + auto& memory{kernel.System().Memory()}; + this->QueueSyncRequest(GetCurrentThreadPointer(kernel), memory); } else { - ASSERT(false); - return ResultSuccess; - } + // Non-HLE request. - case IPC::DomainMessageHeader::CommandType::CloseVirtualHandle: { - LOG_DEBUG(IPC, "CloseVirtualHandle, object_id=0x{:08X}", object_id); + // Get whether we're empty. + const bool was_empty = m_request_list.empty(); - manager->CloseDomainHandler(object_id - 1); + // Add the request to the list. + request->Open(); + m_request_list.push_back(*request); - IPC::ResponseBuilder rb{context, 2}; - rb.Push(ResultSuccess); - return ResultSuccess; - } + // If we were empty, signal. + if (was_empty) { + this->NotifyAvailable(); + } + } + + // If we have a request event, this is asynchronous, and we don't need to wait. + R_SUCCEED_IF(request->GetEvent() != nullptr); + + // This is a synchronous request, so we should wait for our request to complete. + GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); + GetCurrentThread(kernel).BeginWait(&wait_queue); } - LOG_CRITICAL(IPC, "Unknown domain command={}", domain_message_header.command.Value()); - ASSERT(false); - return ResultSuccess; + return GetCurrentThread(kernel).GetWaitResult(); } -Result KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory) { - u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))}; - auto context = std::make_shared<HLERequestContext>(kernel, memory, this, thread); +Result KServerSession::SendReply() { + // Lock the session. + KScopedLightLock lk{m_lock}; - context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf); + // Get the request. + KSessionRequest* request; + { + KScopedSchedulerLock sl{kernel}; - // Ensure we have a session request handler - if (manager->HasSessionRequestHandler(*context)) { - if (auto strong_ptr = manager->GetServiceThread().lock()) { - strong_ptr->QueueSyncRequest(*parent, std::move(context)); - } else { - ASSERT_MSG(false, "strong_ptr is nullptr!"); + // Get the current request. + request = m_current_request; + R_UNLESS(request != nullptr, ResultInvalidState); + + // Clear the current request, since we're processing it. + m_current_request = nullptr; + if (!m_request_list.empty()) { + this->NotifyAvailable(); } - } else { - ASSERT_MSG(false, "handler is invalid!"); } - return ResultSuccess; -} + // Close reference to the request once we're done processing it. + SCOPE_EXIT({ request->Close(); }); -Result KServerSession::CompleteSyncRequest(HLERequestContext& context) { - Result result = ResultSuccess; + // Extract relevant information from the request. + const uintptr_t client_message = request->GetAddress(); + const size_t client_buffer_size = request->GetSize(); + KThread* client_thread = request->GetThread(); + KEvent* event = request->GetEvent(); - // If the session has been converted to a domain, handle the domain request - if (manager->HasSessionRequestHandler(context)) { - if (IsDomain() && context.HasDomainMessageHeader()) { - result = HandleDomainSyncRequest(context); - // If there is no domain header, the regular session handler is used - } else if (manager->HasSessionHandler()) { - // If this ServerSession has an associated HLE handler, forward the request to it. - result = manager->SessionHandler().HandleSyncRequest(*this, context); - } + // Check whether we're closed. + const bool closed = (client_thread == nullptr || parent->IsClientClosed()); + + Result result = ResultSuccess; + if (!closed) { + // If we're not closed, send the reply. + Core::Memory::Memory& memory{kernel.System().Memory()}; + KThread* server_thread{GetCurrentThreadPointer(kernel)}; + UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); + + auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); + auto* dst_msg_buffer = memory.GetPointer(client_message); + std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); } else { - ASSERT_MSG(false, "Session handler is invalid, stubbing response!"); - IPC::ResponseBuilder rb(context, 2); - rb.Push(ResultSuccess); + result = ResultSessionClosed; } - if (convert_to_domain) { - ASSERT_MSG(!IsDomain(), "ServerSession is already a domain instance."); - manager->ConvertToDomain(); - convert_to_domain = false; + // Select a result for the client. + Result client_result = result; + if (closed && R_SUCCEEDED(result)) { + result = ResultSessionClosed; + client_result = ResultSessionClosed; + } else { + result = ResultSuccess; } - // The calling thread is waiting for this request to complete, so wake it up. - context.GetThread().EndWait(result); + // If there's a client thread, update it. + if (client_thread != nullptr) { + if (event != nullptr) { + // // Get the client process/page table. + // KProcess *client_process = client_thread->GetOwnerProcess(); + // KPageTable *client_page_table = &client_process->PageTable(); + + // // If we need to, reply with an async error. + // if (R_FAILED(client_result)) { + // ReplyAsyncError(client_process, client_message, client_buffer_size, + // client_result); + // } + + // // Unlock the client buffer. + // // NOTE: Nintendo does not check the result of this. + // client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size); + + // Signal the event. + event->Signal(); + } else { + // End the client thread's wait. + KScopedSchedulerLock sl{kernel}; + + if (!client_thread->IsTerminationRequested()) { + client_thread->EndWait(client_result); + } + } + } return result; } -Result KServerSession::HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory, - Core::Timing::CoreTiming& core_timing) { - return QueueSyncRequest(thread, memory); +Result KServerSession::ReceiveRequest() { + // Lock the session. + KScopedLightLock lk{m_lock}; + + // Get the request and client thread. + KSessionRequest* request; + KThread* client_thread; + + { + KScopedSchedulerLock sl{kernel}; + + // Ensure that we can service the request. + R_UNLESS(!parent->IsClientClosed(), ResultSessionClosed); + + // Ensure we aren't already servicing a request. + R_UNLESS(m_current_request == nullptr, ResultNotFound); + + // Ensure we have a request to service. + R_UNLESS(!m_request_list.empty(), ResultNotFound); + + // Pop the first request from the list. + request = &m_request_list.front(); + m_request_list.pop_front(); + + // Get the thread for the request. + client_thread = request->GetThread(); + R_UNLESS(client_thread != nullptr, ResultSessionClosed); + + // Open the client thread. + client_thread->Open(); + } + + SCOPE_EXIT({ client_thread->Close(); }); + + // Set the request as our current. + m_current_request = request; + + // Get the client address. + uintptr_t client_message = request->GetAddress(); + size_t client_buffer_size = request->GetSize(); + // bool recv_list_broken = false; + + // Receive the message. + Core::Memory::Memory& memory{kernel.System().Memory()}; + KThread* server_thread{GetCurrentThreadPointer(kernel)}; + UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess()); + + auto* src_msg_buffer = memory.GetPointer(client_message); + auto* dst_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress()); + std::memcpy(dst_msg_buffer, src_msg_buffer, client_buffer_size); + + // We succeeded. + return ResultSuccess; +} + +void KServerSession::CleanupRequests() { + KScopedLightLock lk(m_lock); + + // Clean up any pending requests. + while (true) { + // Get the next request. + KSessionRequest* request = nullptr; + { + KScopedSchedulerLock sl{kernel}; + + if (m_current_request) { + // Choose the current request if we have one. + request = m_current_request; + m_current_request = nullptr; + } else if (!m_request_list.empty()) { + // Pop the request from the front of the list. + request = &m_request_list.front(); + m_request_list.pop_front(); + } + } + + // If there's no request, we're done. + if (request == nullptr) { + break; + } + + // Close a reference to the request once it's cleaned up. + SCOPE_EXIT({ request->Close(); }); + + // Extract relevant information from the request. + // const uintptr_t client_message = request->GetAddress(); + // const size_t client_buffer_size = request->GetSize(); + KThread* client_thread = request->GetThread(); + KEvent* event = request->GetEvent(); + + // KProcess *server_process = request->GetServerProcess(); + // KProcess *client_process = (client_thread != nullptr) ? + // client_thread->GetOwnerProcess() : nullptr; + // KProcessPageTable *client_page_table = (client_process != nullptr) ? + // &client_process->GetPageTable() : nullptr; + + // Cleanup the mappings. + // Result result = CleanupMap(request, server_process, client_page_table); + + // If there's a client thread, update it. + if (client_thread != nullptr) { + if (event != nullptr) { + // // We need to reply async. + // ReplyAsyncError(client_process, client_message, client_buffer_size, + // (R_SUCCEEDED(result) ? ResultSessionClosed : result)); + + // // Unlock the client buffer. + // NOTE: Nintendo does not check the result of this. + // client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size); + + // Signal the event. + event->Signal(); + } else { + // End the client thread's wait. + KScopedSchedulerLock sl{kernel}; + + if (!client_thread->IsTerminationRequested()) { + client_thread->EndWait(ResultSessionClosed); + } + } + } + } } } // namespace Kernel diff --git a/src/core/hle/kernel/k_server_session.h b/src/core/hle/kernel/k_server_session.h index 6d0821945..32135473b 100644 --- a/src/core/hle/kernel/k_server_session.h +++ b/src/core/hle/kernel/k_server_session.h @@ -3,6 +3,7 @@ #pragma once +#include <list> #include <memory> #include <string> #include <utility> @@ -10,6 +11,8 @@ #include <boost/intrusive/list.hpp> #include "core/hle/kernel/hle_ipc.h" +#include "core/hle/kernel/k_light_lock.h" +#include "core/hle/kernel/k_session_request.h" #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/result.h" @@ -55,64 +58,29 @@ public: } bool IsSignaled() const override; - void OnClientClosed(); - void ClientConnected(SessionRequestHandlerPtr handler) { - manager->SetSessionHandler(std::move(handler)); - } - - void ClientDisconnected() { - manager = nullptr; - } - - /** - * Handle a sync request from the emulated application. - * - * @param thread Thread that initiated the request. - * @param memory Memory context to handle the sync request under. - * @param core_timing Core timing context to schedule the request event under. - * - * @returns Result from the operation. - */ - Result HandleSyncRequest(KThread* thread, Core::Memory::Memory& memory, - Core::Timing::CoreTiming& core_timing); - - /// Adds a new domain request handler to the collection of request handlers within - /// this ServerSession instance. - void AppendDomainHandler(SessionRequestHandlerPtr handler); - - /// Retrieves the total number of domain request handlers that have been - /// appended to this ServerSession instance. - std::size_t NumDomainRequestHandlers() const; - - /// Returns true if the session has been converted to a domain, otherwise False - bool IsDomain() const { - return manager->IsDomain(); - } - - /// Converts the session to a domain at the end of the current command - void ConvertToDomain() { - convert_to_domain = true; - } - /// Gets the session request manager, which forwards requests to the underlying service std::shared_ptr<SessionRequestManager>& GetSessionRequestManager() { return manager; } + /// TODO: flesh these out to match the real kernel + Result OnRequest(KSessionRequest* request); + Result SendReply(); + Result ReceiveRequest(); + private: + /// Frees up waiting client sessions when this server session is about to die + void CleanupRequests(); + /// Queues a sync request from the emulated application. Result QueueSyncRequest(KThread* thread, Core::Memory::Memory& memory); /// Completes a sync request from the emulated application. Result CompleteSyncRequest(HLERequestContext& context); - /// Handles a SyncRequest to a domain, forwarding the request to the proper object or closing an - /// object handle. - Result HandleDomainSyncRequest(Kernel::HLERequestContext& context); - - /// This session's HLE request handlers + /// This session's HLE request handlers; if nullptr, this is not an HLE server std::shared_ptr<SessionRequestManager> manager; /// When set to True, converts the session to a domain at the end of the command @@ -120,6 +88,12 @@ private: /// KSession that owns this KServerSession KSession* parent{}; + + /// List of threads which are pending a reply. + boost::intrusive::list<KSessionRequest> m_request_list; + KSessionRequest* m_current_request; + + KLightLock m_lock; }; } // namespace Kernel diff --git a/src/core/hle/kernel/k_session_request.cpp b/src/core/hle/kernel/k_session_request.cpp new file mode 100644 index 000000000..520da6aa7 --- /dev/null +++ b/src/core/hle/kernel/k_session_request.cpp @@ -0,0 +1,61 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "core/hle/kernel/k_page_buffer.h" +#include "core/hle/kernel/k_session_request.h" + +namespace Kernel { + +Result KSessionRequest::SessionMappings::PushMap(VAddr client, VAddr server, size_t size, + KMemoryState state, size_t index) { + // At most 15 buffers of each type (4-bit descriptor counts). + ASSERT(index < ((1ul << 4) - 1) * 3); + + // Get the mapping. + Mapping* mapping; + if (index < NumStaticMappings) { + mapping = &m_static_mappings[index]; + } else { + // Allocate a page for the extra mappings. + if (m_mappings == nullptr) { + KPageBuffer* page_buffer = KPageBuffer::Allocate(kernel); + R_UNLESS(page_buffer != nullptr, ResultOutOfMemory); + + m_mappings = reinterpret_cast<Mapping*>(page_buffer); + } + + mapping = &m_mappings[index - NumStaticMappings]; + } + + // Set the mapping. + mapping->Set(client, server, size, state); + + return ResultSuccess; +} + +Result KSessionRequest::SessionMappings::PushSend(VAddr client, VAddr server, size_t size, + KMemoryState state) { + ASSERT(m_num_recv == 0); + ASSERT(m_num_exch == 0); + return this->PushMap(client, server, size, state, m_num_send++); +} + +Result KSessionRequest::SessionMappings::PushReceive(VAddr client, VAddr server, size_t size, + KMemoryState state) { + ASSERT(m_num_exch == 0); + return this->PushMap(client, server, size, state, m_num_send + m_num_recv++); +} + +Result KSessionRequest::SessionMappings::PushExchange(VAddr client, VAddr server, size_t size, + KMemoryState state) { + return this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++); +} + +void KSessionRequest::SessionMappings::Finalize() { + if (m_mappings) { + KPageBuffer::Free(kernel, reinterpret_cast<KPageBuffer*>(m_mappings)); + m_mappings = nullptr; + } +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_session_request.h b/src/core/hle/kernel/k_session_request.h new file mode 100644 index 000000000..e5558bc2c --- /dev/null +++ b/src/core/hle/kernel/k_session_request.h @@ -0,0 +1,306 @@ +// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include <array> + +#include "core/hle/kernel/k_auto_object.h" +#include "core/hle/kernel/k_event.h" +#include "core/hle/kernel/k_memory_block.h" +#include "core/hle/kernel/k_process.h" +#include "core/hle/kernel/k_thread.h" +#include "core/hle/kernel/slab_helpers.h" + +namespace Kernel { + +class KSessionRequest final : public KSlabAllocated<KSessionRequest>, + public KAutoObject, + public boost::intrusive::list_base_hook<> { + KERNEL_AUTOOBJECT_TRAITS(KSessionRequest, KAutoObject); + +public: + class SessionMappings { + private: + static constexpr size_t NumStaticMappings = 8; + + class Mapping { + public: + constexpr void Set(VAddr c, VAddr s, size_t sz, KMemoryState st) { + m_client_address = c; + m_server_address = s; + m_size = sz; + m_state = st; + } + + constexpr VAddr GetClientAddress() const { + return m_client_address; + } + constexpr VAddr GetServerAddress() const { + return m_server_address; + } + constexpr size_t GetSize() const { + return m_size; + } + constexpr KMemoryState GetMemoryState() const { + return m_state; + } + + private: + VAddr m_client_address; + VAddr m_server_address; + size_t m_size; + KMemoryState m_state; + }; + + public: + explicit SessionMappings(KernelCore& kernel_) : kernel(kernel_) {} + + void Initialize() {} + void Finalize(); + + size_t GetSendCount() const { + return m_num_send; + } + size_t GetReceiveCount() const { + return m_num_recv; + } + size_t GetExchangeCount() const { + return m_num_exch; + } + + Result PushSend(VAddr client, VAddr server, size_t size, KMemoryState state); + Result PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state); + Result PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state); + + VAddr GetSendClientAddress(size_t i) const { + return GetSendMapping(i).GetClientAddress(); + } + VAddr GetSendServerAddress(size_t i) const { + return GetSendMapping(i).GetServerAddress(); + } + size_t GetSendSize(size_t i) const { + return GetSendMapping(i).GetSize(); + } + KMemoryState GetSendMemoryState(size_t i) const { + return GetSendMapping(i).GetMemoryState(); + } + + VAddr GetReceiveClientAddress(size_t i) const { + return GetReceiveMapping(i).GetClientAddress(); + } + VAddr GetReceiveServerAddress(size_t i) const { + return GetReceiveMapping(i).GetServerAddress(); + } + size_t GetReceiveSize(size_t i) const { + return GetReceiveMapping(i).GetSize(); + } + KMemoryState GetReceiveMemoryState(size_t i) const { + return GetReceiveMapping(i).GetMemoryState(); + } + + VAddr GetExchangeClientAddress(size_t i) const { + return GetExchangeMapping(i).GetClientAddress(); + } + VAddr GetExchangeServerAddress(size_t i) const { + return GetExchangeMapping(i).GetServerAddress(); + } + size_t GetExchangeSize(size_t i) const { + return GetExchangeMapping(i).GetSize(); + } + KMemoryState GetExchangeMemoryState(size_t i) const { + return GetExchangeMapping(i).GetMemoryState(); + } + + private: + Result PushMap(VAddr client, VAddr server, size_t size, KMemoryState state, size_t index); + + const Mapping& GetSendMapping(size_t i) const { + ASSERT(i < m_num_send); + + const size_t index = i; + if (index < NumStaticMappings) { + return m_static_mappings[index]; + } else { + return m_mappings[index - NumStaticMappings]; + } + } + + const Mapping& GetReceiveMapping(size_t i) const { + ASSERT(i < m_num_recv); + + const size_t index = m_num_send + i; + if (index < NumStaticMappings) { + return m_static_mappings[index]; + } else { + return m_mappings[index - NumStaticMappings]; + } + } + + const Mapping& GetExchangeMapping(size_t i) const { + ASSERT(i < m_num_exch); + + const size_t index = m_num_send + m_num_recv + i; + if (index < NumStaticMappings) { + return m_static_mappings[index]; + } else { + return m_mappings[index - NumStaticMappings]; + } + } + + private: + KernelCore& kernel; + std::array<Mapping, NumStaticMappings> m_static_mappings; + Mapping* m_mappings{}; + u8 m_num_send{}; + u8 m_num_recv{}; + u8 m_num_exch{}; + }; + +public: + explicit KSessionRequest(KernelCore& kernel_) : KAutoObject(kernel_), m_mappings(kernel_) {} + + static KSessionRequest* Create(KernelCore& kernel) { + KSessionRequest* req = KSessionRequest::Allocate(kernel); + if (req != nullptr) [[likely]] { + KAutoObject::Create(req); + } + return req; + } + + void Destroy() override { + this->Finalize(); + KSessionRequest::Free(kernel, this); + } + + void Initialize(KEvent* event, uintptr_t address, size_t size) { + m_mappings.Initialize(); + + m_thread = GetCurrentThreadPointer(kernel); + m_event = event; + m_address = address; + m_size = size; + + m_thread->Open(); + if (m_event != nullptr) { + m_event->Open(); + } + } + + static void PostDestroy(uintptr_t arg) {} + + KThread* GetThread() const { + return m_thread; + } + KEvent* GetEvent() const { + return m_event; + } + uintptr_t GetAddress() const { + return m_address; + } + size_t GetSize() const { + return m_size; + } + KProcess* GetServerProcess() const { + return m_server; + } + + void SetServerProcess(KProcess* process) { + m_server = process; + m_server->Open(); + } + + void ClearThread() { + m_thread = nullptr; + } + void ClearEvent() { + m_event = nullptr; + } + + size_t GetSendCount() const { + return m_mappings.GetSendCount(); + } + size_t GetReceiveCount() const { + return m_mappings.GetReceiveCount(); + } + size_t GetExchangeCount() const { + return m_mappings.GetExchangeCount(); + } + + Result PushSend(VAddr client, VAddr server, size_t size, KMemoryState state) { + return m_mappings.PushSend(client, server, size, state); + } + + Result PushReceive(VAddr client, VAddr server, size_t size, KMemoryState state) { + return m_mappings.PushReceive(client, server, size, state); + } + + Result PushExchange(VAddr client, VAddr server, size_t size, KMemoryState state) { + return m_mappings.PushExchange(client, server, size, state); + } + + VAddr GetSendClientAddress(size_t i) const { + return m_mappings.GetSendClientAddress(i); + } + VAddr GetSendServerAddress(size_t i) const { + return m_mappings.GetSendServerAddress(i); + } + size_t GetSendSize(size_t i) const { + return m_mappings.GetSendSize(i); + } + KMemoryState GetSendMemoryState(size_t i) const { + return m_mappings.GetSendMemoryState(i); + } + + VAddr GetReceiveClientAddress(size_t i) const { + return m_mappings.GetReceiveClientAddress(i); + } + VAddr GetReceiveServerAddress(size_t i) const { + return m_mappings.GetReceiveServerAddress(i); + } + size_t GetReceiveSize(size_t i) const { + return m_mappings.GetReceiveSize(i); + } + KMemoryState GetReceiveMemoryState(size_t i) const { + return m_mappings.GetReceiveMemoryState(i); + } + + VAddr GetExchangeClientAddress(size_t i) const { + return m_mappings.GetExchangeClientAddress(i); + } + VAddr GetExchangeServerAddress(size_t i) const { + return m_mappings.GetExchangeServerAddress(i); + } + size_t GetExchangeSize(size_t i) const { + return m_mappings.GetExchangeSize(i); + } + KMemoryState GetExchangeMemoryState(size_t i) const { + return m_mappings.GetExchangeMemoryState(i); + } + +private: + // NOTE: This is public and virtual in Nintendo's kernel. + void Finalize() override { + m_mappings.Finalize(); + + if (m_thread) { + m_thread->Close(); + } + if (m_event) { + m_event->Close(); + } + if (m_server) { + m_server->Close(); + } + } + +private: + SessionMappings m_mappings; + KThread* m_thread{}; + KProcess* m_server{}; + KEvent* m_event{}; + uintptr_t m_address{}; + size_t m_size{}; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_shared_memory.cpp b/src/core/hle/kernel/k_shared_memory.cpp index 8ff1545b6..a039cc591 100644 --- a/src/core/hle/kernel/k_shared_memory.cpp +++ b/src/core/hle/kernel/k_shared_memory.cpp @@ -50,7 +50,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o is_initialized = true; // Clear all pages in the memory. - std::memset(device_memory_.GetPointer(physical_address_), 0, size_); + std::memset(device_memory_.GetPointer<void>(physical_address_), 0, size_); return ResultSuccess; } diff --git a/src/core/hle/kernel/k_shared_memory.h b/src/core/hle/kernel/k_shared_memory.h index 34cb98456..5620c3660 100644 --- a/src/core/hle/kernel/k_shared_memory.h +++ b/src/core/hle/kernel/k_shared_memory.h @@ -54,7 +54,7 @@ public: * @return A pointer to the shared memory block from the specified offset */ u8* GetPointer(std::size_t offset = 0) { - return device_memory->GetPointer(physical_address + offset); + return device_memory->GetPointer<u8>(physical_address + offset); } /** @@ -63,7 +63,7 @@ public: * @return A pointer to the shared memory block from the specified offset */ const u8* GetPointer(std::size_t offset = 0) const { - return device_memory->GetPointer(physical_address + offset); + return device_memory->GetPointer<u8>(physical_address + offset); } void Finalize() override; diff --git a/src/core/hle/kernel/k_shared_memory_info.h b/src/core/hle/kernel/k_shared_memory_info.h index e43db8515..2bb6b6d08 100644 --- a/src/core/hle/kernel/k_shared_memory_info.h +++ b/src/core/hle/kernel/k_shared_memory_info.h @@ -15,7 +15,8 @@ class KSharedMemoryInfo final : public KSlabAllocated<KSharedMemoryInfo>, public boost::intrusive::list_base_hook<> { public: - explicit KSharedMemoryInfo() = default; + explicit KSharedMemoryInfo(KernelCore&) {} + KSharedMemoryInfo() = default; constexpr void Initialize(KSharedMemory* shmem) { shared_memory = shmem; diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h index 2b303537e..a8c77a7d4 100644 --- a/src/core/hle/kernel/k_slab_heap.h +++ b/src/core/hle/kernel/k_slab_heap.h @@ -8,6 +8,7 @@ #include "common/assert.h" #include "common/common_funcs.h" #include "common/common_types.h" +#include "common/spin_lock.h" namespace Kernel { @@ -36,28 +37,34 @@ public: } void* Allocate() { - Node* ret = m_head.load(); + // KScopedInterruptDisable di; - do { - if (ret == nullptr) { - break; - } - } while (!m_head.compare_exchange_weak(ret, ret->next)); + m_lock.lock(); + + Node* ret = m_head; + if (ret != nullptr) [[likely]] { + m_head = ret->next; + } + m_lock.unlock(); return ret; } void Free(void* obj) { + // KScopedInterruptDisable di; + + m_lock.lock(); + Node* node = static_cast<Node*>(obj); + node->next = m_head; + m_head = node; - Node* cur_head = m_head.load(); - do { - node->next = cur_head; - } while (!m_head.compare_exchange_weak(cur_head, node)); + m_lock.unlock(); } private: std::atomic<Node*> m_head{}; + Common::SpinLock m_lock; }; } // namespace impl diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 174afc80d..b7bfcdce3 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -30,6 +30,7 @@ #include "core/hle/kernel/k_worker_task_manager.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/svc_results.h" +#include "core/hle/kernel/svc_types.h" #include "core/hle/result.h" #include "core/memory.h" @@ -38,6 +39,9 @@ #endif namespace { + +constexpr inline s32 TerminatingThreadPriority = Kernel::Svc::SystemThreadPriorityHighest - 1; + static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, u32 entry_point, u32 arg) { context = {}; @@ -241,7 +245,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack } } - return ResultSuccess; + R_SUCCEED(); } Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, @@ -254,7 +258,7 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_ thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func)); thread->is_single_core = !Settings::values.use_multi_core.GetValue(); - return ResultSuccess; + R_SUCCEED(); } Result KThread::InitializeDummyThread(KThread* thread) { @@ -264,31 +268,32 @@ Result KThread::InitializeDummyThread(KThread* thread) { // Initialize emulation parameters. thread->stack_parameters.disable_count = 0; - return ResultSuccess; + R_SUCCEED(); } Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) { - return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, - system.GetCpuManager().GetGuestActivateFunc()); + R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, + ThreadType::Main, system.GetCpuManager().GetGuestActivateFunc())); } Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { - return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, - system.GetCpuManager().GetIdleThreadStartFunc()); + R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, + ThreadType::Main, system.GetCpuManager().GetIdleThreadStartFunc())); } Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread, KThreadFunction func, uintptr_t arg, s32 virt_core) { - return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority, - system.GetCpuManager().GetShutdownThreadStartFunc()); + R_RETURN(InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, + ThreadType::HighPriority, + system.GetCpuManager().GetShutdownThreadStartFunc())); } Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core, KProcess* owner) { system.Kernel().GlobalSchedulerContext().AddThread(thread); - return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, - ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()); + R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, + ThreadType::User, system.GetCpuManager().GetGuestThreadFunc())); } void KThread::PostDestroy(uintptr_t arg) { @@ -538,7 +543,7 @@ Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { *out_ideal_core = virtual_ideal_core_id; *out_affinity_mask = virtual_affinity_mask; - return ResultSuccess; + R_SUCCEED(); } Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { @@ -554,7 +559,7 @@ Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) *out_affinity_mask = original_physical_affinity_mask.GetAffinityMask(); } - return ResultSuccess; + R_SUCCEED(); } Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { @@ -666,7 +671,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { } while (retry_update); } - return ResultSuccess; + R_SUCCEED(); } void KThread::SetBasePriority(s32 value) { @@ -839,7 +844,7 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) { } while (thread_is_current); } - return ResultSuccess; + R_SUCCEED(); } Result KThread::GetThreadContext3(std::vector<u8>& out) { @@ -874,7 +879,7 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) { } } - return ResultSuccess; + R_SUCCEED(); } void KThread::AddWaiterImpl(KThread* thread) { @@ -1038,7 +1043,7 @@ Result KThread::Run() { // Set our state and finish. SetState(ThreadState::Runnable); - return ResultSuccess; + R_SUCCEED(); } } @@ -1073,6 +1078,78 @@ void KThread::Exit() { UNREACHABLE_MSG("KThread::Exit() would return"); } +Result KThread::Terminate() { + ASSERT(this != GetCurrentThreadPointer(kernel)); + + // Request the thread terminate if it hasn't already. + if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) { + // If the thread isn't terminated, wait for it to terminate. + s32 index; + KSynchronizationObject* objects[] = {this}; + R_TRY(KSynchronizationObject::Wait(kernel, std::addressof(index), objects, 1, + Svc::WaitInfinite)); + } + + R_SUCCEED(); +} + +ThreadState KThread::RequestTerminate() { + ASSERT(this != GetCurrentThreadPointer(kernel)); + + KScopedSchedulerLock sl{kernel}; + + // Determine if this is the first termination request. + const bool first_request = [&]() -> bool { + // Perform an atomic compare-and-swap from false to true. + bool expected = false; + return termination_requested.compare_exchange_strong(expected, true); + }(); + + // If this is the first request, start termination procedure. + if (first_request) { + // If the thread is in initialized state, just change state to terminated. + if (this->GetState() == ThreadState::Initialized) { + thread_state = ThreadState::Terminated; + return ThreadState::Terminated; + } + + // Register the terminating dpc. + this->RegisterDpc(DpcFlag::Terminating); + + // If the thread is pinned, unpin it. + if (this->GetStackParameters().is_pinned) { + this->GetOwnerProcess()->UnpinThread(this); + } + + // If the thread is suspended, continue it. + if (this->IsSuspended()) { + suspend_allowed_flags = 0; + this->UpdateState(); + } + + // Change the thread's priority to be higher than any system thread's. + if (this->GetBasePriority() >= Svc::SystemThreadPriorityHighest) { + this->SetBasePriority(TerminatingThreadPriority); + } + + // If the thread is runnable, send a termination interrupt to other cores. + if (this->GetState() == ThreadState::Runnable) { + if (const u64 core_mask = + physical_affinity_mask.GetAffinityMask() & ~(1ULL << GetCurrentCoreId(kernel)); + core_mask != 0) { + Kernel::KInterruptManager::SendInterProcessorInterrupt(kernel, core_mask); + } + } + + // Wake up the thread. + if (this->GetState() == ThreadState::Waiting) { + wait_queue->CancelWait(this, ResultTerminationRequested, true); + } + } + + return this->GetState(); +} + Result KThread::Sleep(s64 timeout) { ASSERT(!kernel.GlobalSchedulerContext().IsLocked()); ASSERT(this == GetCurrentThreadPointer(kernel)); @@ -1086,7 +1163,7 @@ Result KThread::Sleep(s64 timeout) { // Check if the thread should terminate. if (this->IsTerminationRequested()) { slp.CancelSleep(); - return ResultTerminationRequested; + R_THROW(ResultTerminationRequested); } // Wait for the sleep to end. @@ -1094,7 +1171,7 @@ Result KThread::Sleep(s64 timeout) { SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep); } - return ResultSuccess; + R_SUCCEED(); } void KThread::IfDummyThreadTryWait() { diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index 9ee20208e..e2a27d603 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -180,6 +180,10 @@ public: void Exit(); + Result Terminate(); + + ThreadState RequestTerminate(); + [[nodiscard]] u32 GetSuspendFlags() const { return suspend_allowed_flags & suspend_request_flags; } diff --git a/src/core/hle/kernel/k_thread_local_page.h b/src/core/hle/kernel/k_thread_local_page.h index 0a7f22680..5d466ace7 100644 --- a/src/core/hle/kernel/k_thread_local_page.h +++ b/src/core/hle/kernel/k_thread_local_page.h @@ -26,7 +26,7 @@ public: static_assert(RegionsPerPage > 0); public: - constexpr explicit KThreadLocalPage(VAddr addr = {}) : m_virt_addr(addr) { + constexpr explicit KThreadLocalPage(KernelCore&, VAddr addr = {}) : m_virt_addr(addr) { m_is_region_free.fill(true); } diff --git a/src/core/hle/kernel/k_writable_event.cpp b/src/core/hle/kernel/k_writable_event.cpp deleted file mode 100644 index ff88c5acd..000000000 --- a/src/core/hle/kernel/k_writable_event.cpp +++ /dev/null @@ -1,35 +0,0 @@ -// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later - -#include "core/hle/kernel/k_event.h" -#include "core/hle/kernel/k_readable_event.h" -#include "core/hle/kernel/k_writable_event.h" - -namespace Kernel { - -KWritableEvent::KWritableEvent(KernelCore& kernel_) - : KAutoObjectWithSlabHeapAndContainer{kernel_} {} - -KWritableEvent::~KWritableEvent() = default; - -void KWritableEvent::Initialize(KEvent* parent_event_, std::string&& name_) { - parent = parent_event_; - name = std::move(name_); - parent->GetReadableEvent().Open(); -} - -Result KWritableEvent::Signal() { - return parent->GetReadableEvent().Signal(); -} - -Result KWritableEvent::Clear() { - return parent->GetReadableEvent().Clear(); -} - -void KWritableEvent::Destroy() { - // Close our references. - parent->GetReadableEvent().Close(); - parent->Close(); -} - -} // namespace Kernel diff --git a/src/core/hle/kernel/k_writable_event.h b/src/core/hle/kernel/k_writable_event.h deleted file mode 100644 index 3fd0c7d0a..000000000 --- a/src/core/hle/kernel/k_writable_event.h +++ /dev/null @@ -1,39 +0,0 @@ -// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project -// SPDX-License-Identifier: GPL-2.0-or-later - -#pragma once - -#include "core/hle/kernel/k_auto_object.h" -#include "core/hle/kernel/slab_helpers.h" -#include "core/hle/result.h" - -namespace Kernel { - -class KernelCore; -class KEvent; - -class KWritableEvent final - : public KAutoObjectWithSlabHeapAndContainer<KWritableEvent, KAutoObjectWithList> { - KERNEL_AUTOOBJECT_TRAITS(KWritableEvent, KAutoObject); - -public: - explicit KWritableEvent(KernelCore& kernel_); - ~KWritableEvent() override; - - void Destroy() override; - - static void PostDestroy([[maybe_unused]] uintptr_t arg) {} - - void Initialize(KEvent* parent_, std::string&& name_); - Result Signal(); - Result Clear(); - - KEvent* GetParent() const { - return parent; - } - -private: - KEvent* parent{}; -}; - -} // namespace Kernel diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 9251f29ad..eed2dc9f3 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -24,6 +24,7 @@ #include "core/hardware_properties.h" #include "core/hle/kernel/init/init_slab_setup.h" #include "core/hle/kernel/k_client_port.h" +#include "core/hle/kernel/k_dynamic_resource_manager.h" #include "core/hle/kernel/k_handle_table.h" #include "core/hle/kernel/k_memory_layout.h" #include "core/hle/kernel/k_memory_manager.h" @@ -73,8 +74,16 @@ struct KernelCore::Impl { InitializeMemoryLayout(); Init::InitializeKPageBufferSlabHeap(system); InitializeShutdownThreads(); - InitializePreemption(kernel); InitializePhysicalCores(); + InitializePreemption(kernel); + + // Initialize the Dynamic Slab Heaps. + { + const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion(); + ASSERT(pt_heap_region.GetEndAddress() != 0); + + InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize()); + } RegisterHostThread(); } @@ -86,6 +95,15 @@ struct KernelCore::Impl { } } + void CloseCurrentProcess() { + (*current_process).Finalize(); + // current_process->Close(); + // TODO: The current process should be destroyed based on accurate ref counting after + // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak. + (*current_process).Destroy(); + current_process = nullptr; + } + void Shutdown() { is_shutting_down.store(true, std::memory_order_relaxed); SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); }); @@ -99,10 +117,6 @@ struct KernelCore::Impl { next_user_process_id = KProcess::ProcessIDMin; next_thread_id = 1; - for (auto& core : cores) { - core = nullptr; - } - global_handle_table->Finalize(); global_handle_table.reset(); @@ -152,15 +166,7 @@ struct KernelCore::Impl { } } - // Shutdown all processes. - if (current_process) { - (*current_process).Finalize(); - // current_process->Close(); - // TODO: The current process should be destroyed based on accurate ref counting after - // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak. - (*current_process).Destroy(); - current_process = nullptr; - } + CloseCurrentProcess(); // Track kernel objects that were not freed on shutdown { @@ -257,6 +263,18 @@ struct KernelCore::Impl { system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event); } + void InitializeResourceManagers(VAddr address, size_t size) { + dynamic_page_manager = std::make_unique<KDynamicPageManager>(); + memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>(); + app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>(); + + dynamic_page_manager->Initialize(address, size); + static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000; + memory_block_heap->Initialize(dynamic_page_manager.get(), + ApplicationMemoryBlockSlabHeapSize); + app_memory_block_manager->Initialize(nullptr, memory_block_heap.get()); + } + void InitializeShutdownThreads() { for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { shutdown_threads[core_id] = KThread::Create(system.Kernel()); @@ -344,11 +362,6 @@ struct KernelCore::Impl { static inline thread_local KThread* current_thread{nullptr}; KThread* GetCurrentEmuThread() { - // If we are shutting down the kernel, none of this is relevant anymore. - if (IsShuttingDown()) { - return {}; - } - const auto thread_id = GetCurrentHostThreadID(); if (thread_id >= Core::Hardware::NUM_CPU_CORES) { return GetHostDummyThread(); @@ -770,6 +783,11 @@ struct KernelCore::Impl { // Kernel memory management std::unique_ptr<KMemoryManager> memory_manager; + // Dynamic slab managers + std::unique_ptr<KDynamicPageManager> dynamic_page_manager; + std::unique_ptr<KMemoryBlockSlabHeap> memory_block_heap; + std::unique_ptr<KMemoryBlockSlabManager> app_memory_block_manager; + // Shared memory for services Kernel::KSharedMemory* hid_shared_mem{}; Kernel::KSharedMemory* font_shared_mem{}; @@ -853,6 +871,10 @@ const KProcess* KernelCore::CurrentProcess() const { return impl->current_process; } +void KernelCore::CloseCurrentProcess() { + impl->CloseCurrentProcess(); +} + const std::vector<KProcess*>& KernelCore::GetProcessList() const { return impl->process_list; } @@ -1041,6 +1063,14 @@ const KMemoryManager& KernelCore::MemoryManager() const { return *impl->memory_manager; } +KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() { + return *impl->app_memory_block_manager; +} + +const KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() const { + return *impl->app_memory_block_manager; +} + Kernel::KSharedMemory& KernelCore::GetHidSharedMem() { return *impl->hid_shared_mem; } diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index bcf016a97..266be2bc4 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -37,6 +37,7 @@ class KClientSession; class KEvent; class KHandleTable; class KLinkedListNode; +class KMemoryBlockSlabManager; class KMemoryLayout; class KMemoryManager; class KPageBuffer; @@ -46,13 +47,13 @@ class KResourceLimit; class KScheduler; class KServerSession; class KSession; +class KSessionRequest; class KSharedMemory; class KSharedMemoryInfo; class KThread; class KThreadLocalPage; class KTransferMemory; class KWorkerTaskManager; -class KWritableEvent; class KCodeMemory; class PhysicalCore; class ServiceThread; @@ -131,6 +132,9 @@ public: /// Retrieves a const pointer to the current process. const KProcess* CurrentProcess() const; + /// Closes the current process. + void CloseCurrentProcess(); + /// Retrieves the list of processes. const std::vector<KProcess*>& GetProcessList() const; @@ -239,6 +243,12 @@ public: /// Gets the virtual memory manager for the kernel. const KMemoryManager& MemoryManager() const; + /// Gets the application memory block manager for the kernel. + KMemoryBlockSlabManager& GetApplicationMemoryBlockManager(); + + /// Gets the application memory block manager for the kernel. + const KMemoryBlockSlabManager& GetApplicationMemoryBlockManager() const; + /// Gets the shared memory object for HID services. Kernel::KSharedMemory& GetHidSharedMem(); @@ -345,14 +355,14 @@ public: return slab_heap_container->thread; } else if constexpr (std::is_same_v<T, KTransferMemory>) { return slab_heap_container->transfer_memory; - } else if constexpr (std::is_same_v<T, KWritableEvent>) { - return slab_heap_container->writeable_event; } else if constexpr (std::is_same_v<T, KCodeMemory>) { return slab_heap_container->code_memory; } else if constexpr (std::is_same_v<T, KPageBuffer>) { return slab_heap_container->page_buffer; } else if constexpr (std::is_same_v<T, KThreadLocalPage>) { return slab_heap_container->thread_local_page; + } else if constexpr (std::is_same_v<T, KSessionRequest>) { + return slab_heap_container->session_request; } } @@ -412,10 +422,10 @@ private: KSlabHeap<KSharedMemoryInfo> shared_memory_info; KSlabHeap<KThread> thread; KSlabHeap<KTransferMemory> transfer_memory; - KSlabHeap<KWritableEvent> writeable_event; KSlabHeap<KCodeMemory> code_memory; KSlabHeap<KPageBuffer> page_buffer; KSlabHeap<KThreadLocalPage> thread_local_page; + KSlabHeap<KSessionRequest> session_request; }; std::unique_ptr<SlabHeapContainer> slab_heap_container; diff --git a/src/core/hle/kernel/slab_helpers.h b/src/core/hle/kernel/slab_helpers.h index 299a981a8..06b51e919 100644 --- a/src/core/hle/kernel/slab_helpers.h +++ b/src/core/hle/kernel/slab_helpers.h @@ -24,7 +24,7 @@ public: } static Derived* Allocate(KernelCore& kernel) { - return kernel.SlabHeap<Derived>().Allocate(); + return kernel.SlabHeap<Derived>().Allocate(kernel); } static void Free(KernelCore& kernel, Derived* obj) { diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 27e5a805d..b07ae3f02 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -29,12 +29,12 @@ #include "core/hle/kernel/k_resource_limit.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scoped_resource_reservation.h" +#include "core/hle/kernel/k_session.h" #include "core/hle/kernel/k_shared_memory.h" #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/k_thread_queue.h" #include "core/hle/kernel/k_transfer_memory.h" -#include "core/hle/kernel/k_writable_event.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/physical_core.h" #include "core/hle/kernel/svc.h" @@ -256,6 +256,93 @@ static Result UnmapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, u3 return UnmapMemory(system, dst_addr, src_addr, size); } +template <typename T> +Result CreateSession(Core::System& system, Handle* out_server, Handle* out_client, u64 name) { + auto& process = *system.CurrentProcess(); + auto& handle_table = process.GetHandleTable(); + + // Declare the session we're going to allocate. + T* session; + + // Reserve a new session from the process resource limit. + // FIXME: LimitableResource_SessionCountMax + KScopedResourceReservation session_reservation(&process, LimitableResource::Sessions); + if (session_reservation.Succeeded()) { + session = T::Create(system.Kernel()); + } else { + return ResultLimitReached; + + // // We couldn't reserve a session. Check that we support dynamically expanding the + // // resource limit. + // R_UNLESS(process.GetResourceLimit() == + // &system.Kernel().GetSystemResourceLimit(), ResultLimitReached); + // R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), ResultLimitReached()); + + // // Try to allocate a session from unused slab memory. + // session = T::CreateFromUnusedSlabMemory(); + // R_UNLESS(session != nullptr, ResultLimitReached); + // ON_RESULT_FAILURE { session->Close(); }; + + // // If we're creating a KSession, we want to add two KSessionRequests to the heap, to + // // prevent request exhaustion. + // // NOTE: Nintendo checks if session->DynamicCast<KSession *>() != nullptr, but there's + // // no reason to not do this statically. + // if constexpr (std::same_as<T, KSession>) { + // for (size_t i = 0; i < 2; i++) { + // KSessionRequest* request = KSessionRequest::CreateFromUnusedSlabMemory(); + // R_UNLESS(request != nullptr, ResultLimitReached); + // request->Close(); + // } + // } + + // We successfully allocated a session, so add the object we allocated to the resource + // limit. + // system.Kernel().GetSystemResourceLimit().Reserve(LimitableResource::Sessions, 1); + } + + // Check that we successfully created a session. + R_UNLESS(session != nullptr, ResultOutOfResource); + + // Initialize the session. + session->Initialize(nullptr, fmt::format("{}", name)); + + // Commit the session reservation. + session_reservation.Commit(); + + // Ensure that we clean up the session (and its only references are handle table) on function + // end. + SCOPE_EXIT({ + session->GetClientSession().Close(); + session->GetServerSession().Close(); + }); + + // Register the session. + T::Register(system.Kernel(), session); + + // Add the server session to the handle table. + R_TRY(handle_table.Add(out_server, &session->GetServerSession())); + + // Add the client session to the handle table. + const auto result = handle_table.Add(out_client, &session->GetClientSession()); + + if (!R_SUCCEEDED(result)) { + // Ensure that we maintaing a clean handle state on exit. + handle_table.Remove(*out_server); + } + + return result; +} + +static Result CreateSession(Core::System& system, Handle* out_server, Handle* out_client, + u32 is_light, u64 name) { + if (is_light) { + // return CreateSession<KLightSession>(system, out_server, out_client, name); + return ResultUnknown; + } else { + return CreateSession<KSession>(system, out_server, out_client, name); + } +} + /// Connect to an OS service given the port name, returns the handle to the port to out static Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_name_address) { auto& memory = system.Memory(); @@ -295,7 +382,8 @@ static Result ConnectToNamedPort(Core::System& system, Handle* out, VAddr port_n // Create a session. KClientSession* session{}; - R_TRY(port->CreateSession(std::addressof(session))); + R_TRY(port->CreateSession(std::addressof(session), + std::make_shared<SessionRequestManager>(kernel))); port->Close(); // Register the session in the table, close the extra reference. @@ -313,7 +401,7 @@ static Result ConnectToNamedPort32(Core::System& system, Handle* out_handle, return ConnectToNamedPort(system, out_handle, port_name_address); } -/// Makes a blocking IPC call to an OS service. +/// Makes a blocking IPC call to a service. static Result SendSyncRequest(Core::System& system, Handle handle) { auto& kernel = system.Kernel(); @@ -327,22 +415,75 @@ static Result SendSyncRequest(Core::System& system, Handle handle) { LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName()); - { - KScopedSchedulerLock lock(kernel); - - // This is a synchronous request, so we should wait for our request to complete. - GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue)); - GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); - session->SendSyncRequest(&GetCurrentThread(kernel), system.Memory(), system.CoreTiming()); - } - - return GetCurrentThread(kernel).GetWaitResult(); + return session->SendSyncRequest(); } static Result SendSyncRequest32(Core::System& system, Handle handle) { return SendSyncRequest(system, handle); } +static Result ReplyAndReceive(Core::System& system, s32* out_index, Handle* handles, + s32 num_handles, Handle reply_target, s64 timeout_ns) { + auto& kernel = system.Kernel(); + auto& handle_table = GetCurrentThread(kernel).GetOwnerProcess()->GetHandleTable(); + + // Convert handle list to object table. + std::vector<KSynchronizationObject*> objs(num_handles); + R_UNLESS( + handle_table.GetMultipleObjects<KSynchronizationObject>(objs.data(), handles, num_handles), + ResultInvalidHandle); + + // Ensure handles are closed when we're done. + SCOPE_EXIT({ + for (auto i = 0; i < num_handles; ++i) { + objs[i]->Close(); + } + }); + + // Reply to the target, if one is specified. + if (reply_target != InvalidHandle) { + KScopedAutoObject session = handle_table.GetObject<KServerSession>(reply_target); + R_UNLESS(session.IsNotNull(), ResultInvalidHandle); + + // If we fail to reply, we want to set the output index to -1. + // ON_RESULT_FAILURE { *out_index = -1; }; + + // Send the reply. + // R_TRY(session->SendReply()); + + Result rc = session->SendReply(); + if (!R_SUCCEEDED(rc)) { + *out_index = -1; + return rc; + } + } + + // Wait for a message. + while (true) { + // Wait for an object. + s32 index; + Result result = KSynchronizationObject::Wait(kernel, &index, objs.data(), + static_cast<s32>(objs.size()), timeout_ns); + if (result == ResultTimedOut) { + return result; + } + + // Receive the request. + if (R_SUCCEEDED(result)) { + KServerSession* session = objs[index]->DynamicCast<KServerSession*>(); + if (session != nullptr) { + result = session->ReceiveRequest(); + if (result == ResultNotFound) { + continue; + } + } + } + + *out_index = index; + return result; + } +} + /// Get the ID for the specified thread. static Result GetThreadId(Core::System& system, u64* out_thread_id, Handle thread_handle) { // Get the thread from its handle. @@ -792,7 +933,7 @@ static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle han return ResultSuccess; case GetInfoType::UserExceptionContextAddr: - *result = process->GetTLSRegionAddress(); + *result = process->GetProcessLocalRegionAddress(); return ResultSuccess; case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource: @@ -1747,7 +1888,7 @@ static void ExitProcess(Core::System& system) { auto* current_process = system.Kernel().CurrentProcess(); LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID()); - ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running, + ASSERT_MSG(current_process->GetState() == KProcess::State::Running, "Process has already exited"); system.Exit(); @@ -2303,11 +2444,11 @@ static Result SignalEvent(Core::System& system, Handle event_handle) { // Get the current handle table. const KHandleTable& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - // Get the writable event. - KScopedAutoObject writable_event = handle_table.GetObject<KWritableEvent>(event_handle); - R_UNLESS(writable_event.IsNotNull(), ResultInvalidHandle); + // Get the event. + KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle); + R_UNLESS(event.IsNotNull(), ResultInvalidHandle); - return writable_event->Signal(); + return event->Signal(); } static Result SignalEvent32(Core::System& system, Handle event_handle) { @@ -2322,9 +2463,9 @@ static Result ClearEvent(Core::System& system, Handle event_handle) { // Try to clear the writable event. { - KScopedAutoObject writable_event = handle_table.GetObject<KWritableEvent>(event_handle); - if (writable_event.IsNotNull()) { - return writable_event->Clear(); + KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle); + if (event.IsNotNull()) { + return event->Clear(); } } @@ -2362,24 +2503,24 @@ static Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_r R_UNLESS(event != nullptr, ResultOutOfResource); // Initialize the event. - event->Initialize("CreateEvent", kernel.CurrentProcess()); + event->Initialize(kernel.CurrentProcess()); // Commit the thread reservation. event_reservation.Commit(); // Ensure that we clean up the event (and its only references are handle table) on function end. SCOPE_EXIT({ - event->GetWritableEvent().Close(); event->GetReadableEvent().Close(); + event->Close(); }); // Register the event. KEvent::Register(kernel, event); - // Add the writable event to the handle table. - R_TRY(handle_table.Add(out_write, std::addressof(event->GetWritableEvent()))); + // Add the event to the handle table. + R_TRY(handle_table.Add(out_write, event)); - // Add the writable event to the handle table. + // Ensure that we maintaing a clean handle state on exit. auto handle_guard = SCOPE_GUARD({ handle_table.Remove(*out_write); }); // Add the readable event to the handle table. @@ -2416,7 +2557,7 @@ static Result GetProcessInfo(Core::System& system, u64* out, Handle process_hand return ResultInvalidEnumValue; } - *out = static_cast<u64>(process->GetStatus()); + *out = static_cast<u64>(process->GetState()); return ResultSuccess; } @@ -2860,10 +3001,10 @@ static const FunctionDef SVC_Table_64[] = { {0x3D, SvcWrap64<ChangeKernelTraceState>, "ChangeKernelTraceState"}, {0x3E, nullptr, "Unknown3e"}, {0x3F, nullptr, "Unknown3f"}, - {0x40, nullptr, "CreateSession"}, + {0x40, SvcWrap64<CreateSession>, "CreateSession"}, {0x41, nullptr, "AcceptSession"}, {0x42, nullptr, "ReplyAndReceiveLight"}, - {0x43, nullptr, "ReplyAndReceive"}, + {0x43, SvcWrap64<ReplyAndReceive>, "ReplyAndReceive"}, {0x44, nullptr, "ReplyAndReceiveWithUserBuffer"}, {0x45, SvcWrap64<CreateEvent>, "CreateEvent"}, {0x46, nullptr, "MapIoRegion"}, diff --git a/src/core/hle/kernel/svc_common.h b/src/core/hle/kernel/svc_common.h index 95750c3eb..85506710e 100644 --- a/src/core/hle/kernel/svc_common.h +++ b/src/core/hle/kernel/svc_common.h @@ -14,8 +14,11 @@ namespace Kernel::Svc { using namespace Common::Literals; -constexpr s32 ArgumentHandleCountMax = 0x40; -constexpr u32 HandleWaitMask{1u << 30}; +constexpr inline s32 ArgumentHandleCountMax = 0x40; + +constexpr inline u32 HandleWaitMask = 1u << 30; + +constexpr inline s64 WaitInfinite = -1; constexpr inline std::size_t HeapSizeAlignment = 2_MiB; diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h index 79e15183a..abb9847fe 100644 --- a/src/core/hle/kernel/svc_types.h +++ b/src/core/hle/kernel/svc_types.h @@ -95,6 +95,19 @@ constexpr inline s32 IdealCoreNoUpdate = -3; constexpr inline s32 LowestThreadPriority = 63; constexpr inline s32 HighestThreadPriority = 0; +constexpr inline s32 SystemThreadPriorityHighest = 16; + +enum class ProcessState : u32 { + Created = 0, + CreatedAttached = 1, + Running = 2, + Crashed = 3, + RunningAttached = 4, + Terminating = 5, + Terminated = 6, + DebugBreak = 7, +}; + constexpr inline size_t ThreadLocalRegionSize = 0x200; } // namespace Kernel::Svc diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h index 4bc49087e..272c54cf7 100644 --- a/src/core/hle/kernel/svc_wrap.h +++ b/src/core/hle/kernel/svc_wrap.h @@ -8,6 +8,7 @@ #include "core/core.h" #include "core/hle/kernel/svc_types.h" #include "core/hle/result.h" +#include "core/memory.h" namespace Kernel { @@ -346,6 +347,37 @@ void SvcWrap64(Core::System& system) { FuncReturn(system, retval); } +// Used by CreateSession +template <Result func(Core::System&, Handle*, Handle*, u32, u64)> +void SvcWrap64(Core::System& system) { + Handle param_1 = 0; + Handle param_2 = 0; + const u32 retval = func(system, ¶m_1, ¶m_2, static_cast<u32>(Param(system, 2)), + static_cast<u32>(Param(system, 3))) + .raw; + + system.CurrentArmInterface().SetReg(1, param_1); + system.CurrentArmInterface().SetReg(2, param_2); + FuncReturn(system, retval); +} + +// Used by ReplyAndReceive +template <Result func(Core::System&, s32*, Handle*, s32, Handle, s64)> +void SvcWrap64(Core::System& system) { + s32 param_1 = 0; + s32 num_handles = static_cast<s32>(Param(system, 2)); + + std::vector<Handle> handles(num_handles); + system.Memory().ReadBlock(Param(system, 1), handles.data(), num_handles * sizeof(Handle)); + + const u32 retval = func(system, ¶m_1, handles.data(), num_handles, + static_cast<s32>(Param(system, 3)), static_cast<s64>(Param(system, 4))) + .raw; + + system.CurrentArmInterface().SetReg(1, param_1); + FuncReturn(system, retval); +} + // Used by WaitForAddress template <Result func(Core::System&, u64, Svc::ArbitrationType, s32, s64)> void SvcWrap64(Core::System& system) { |