diff options
Diffstat (limited to 'src/core')
-rw-r--r-- | src/core/CMakeLists.txt | 2 | ||||
-rw-r--r-- | src/core/hle/kernel/memory/memory_manager.cpp | 177 | ||||
-rw-r--r-- | src/core/hle/kernel/memory/memory_manager.h | 97 |
3 files changed, 276 insertions, 0 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 9fc5bd84b..ff38c6cc2 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -159,6 +159,8 @@ add_library(core STATIC hle/kernel/memory/memory_block.h hle/kernel/memory/memory_block_manager.cpp hle/kernel/memory/memory_block_manager.h + hle/kernel/memory/memory_manager.cpp + hle/kernel/memory/memory_manager.h hle/kernel/memory/memory_types.h hle/kernel/memory/page_linked_list.h hle/kernel/memory/page_heap.cpp diff --git a/src/core/hle/kernel/memory/memory_manager.cpp b/src/core/hle/kernel/memory/memory_manager.cpp new file mode 100644 index 000000000..9c1bb981b --- /dev/null +++ b/src/core/hle/kernel/memory/memory_manager.cpp @@ -0,0 +1,177 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <algorithm> + +#include "common/alignment.h" +#include "common/assert.h" +#include "common/common_types.h" +#include "common/scope_exit.h" +#include "core/hle/kernel/errors.h" +#include "core/hle/kernel/memory/memory_manager.h" +#include "core/hle/kernel/memory/page_linked_list.h" + +namespace Kernel::Memory { + +std::size_t MemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u64 end_address) { + const std::size_t size{end_address - start_address}; + + // Calculate metadata sizes + const std::size_t ref_count_size{(size / PageSize) * sizeof(u16)}; + const std::size_t optimize_map_size{(Common::AlignUp((size / PageSize), 64) / 64) * + sizeof(u64)}; + const std::size_t manager_size{Common::AlignUp(optimize_map_size + ref_count_size, PageSize)}; + const std::size_t page_heap_size{PageHeap::CalculateMetadataOverheadSize(size)}; + const std::size_t total_metadata_size{manager_size + page_heap_size}; + ASSERT(manager_size <= total_metadata_size); + ASSERT(Common::IsAligned(total_metadata_size, PageSize)); + + // Setup region + pool = new_pool; + + // Initialize the manager's KPageHeap + heap.Initialize(start_address, size, page_heap_size); + + // Free the memory to the heap + heap.Free(start_address, size / PageSize); + + // Update the heap's used size + heap.UpdateUsedSize(); + + return total_metadata_size; +} + +void MemoryManager::InitializeManager(Pool pool, u64 start_address, u64 end_address) { + ASSERT(pool < Pool::Count); + managers[static_cast<std::size_t>(pool)].Initialize(pool, start_address, end_address); +} + +VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align_pages, Pool pool, + Direction dir) { + // Early return if we're allocating no pages + if (num_pages == 0) { + return {}; + } + + // Lock the pool that we're allocating from + const std::size_t pool_index{static_cast<std::size_t>(pool)}; + std::lock_guard lock{pool_locks[pool_index]}; + + // Choose a heap based on our page size request + const s32 heap_index{PageHeap::GetAlignedBlockIndex(num_pages, align_pages)}; + + // Loop, trying to iterate from each block + // TODO (bunnei): Support multiple managers + Impl& chosen_manager{managers[pool_index]}; + VAddr allocated_block{chosen_manager.AllocateBlock(heap_index)}; + + // If we failed to allocate, quit now + if (!allocated_block) { + return {}; + } + + // If we allocated more than we need, free some + const std::size_t allocated_pages{PageHeap::GetBlockNumPages(heap_index)}; + if (allocated_pages > num_pages) { + chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); + } + + return allocated_block; +} + +ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pages, Pool pool, + Direction dir) { + ASSERT(page_list.GetNumPages() == 0); + + // Early return if we're allocating no pages + if (num_pages == 0) { + return RESULT_SUCCESS; + } + + // Lock the pool that we're allocating from + const std::size_t pool_index{static_cast<std::size_t>(pool)}; + std::lock_guard lock{pool_locks[pool_index]}; + + // Choose a heap based on our page size request + const s32 heap_index{PageHeap::GetBlockIndex(num_pages)}; + if (heap_index < 0) { + return ERR_OUT_OF_MEMORY; + } + + // TODO (bunnei): Support multiple managers + Impl& chosen_manager{managers[pool_index]}; + + // Ensure that we don't leave anything un-freed + auto group_guard = detail::ScopeExit([&] { + for (const auto& it : page_list.Nodes()) { + const std::size_t num_pages{std::min( + it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)}; + chosen_manager.Free(it.GetAddress(), num_pages); + } + }); + + // Keep allocating until we've allocated all our pages + for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) { + const std::size_t pages_per_alloc{PageHeap::GetBlockNumPages(index)}; + + while (num_pages >= pages_per_alloc) { + // Allocate a block + VAddr allocated_block{chosen_manager.AllocateBlock(index)}; + if (allocated_block == 0) { + break; + } + + // Safely add it to our group + { + auto block_guard = detail::ScopeExit( + [&] { chosen_manager.Free(allocated_block, pages_per_alloc); }); + + if (const ResultCode result{page_list.AddBlock(allocated_block, pages_per_alloc)}; + result.IsError()) { + return result; + } + + block_guard.Cancel(); + } + + num_pages -= pages_per_alloc; + } + } + + // Only succeed if we allocated as many pages as we wanted + ASSERT(num_pages >= 0); + if (num_pages) { + return ERR_OUT_OF_MEMORY; + } + + // We succeeded! + group_guard.Cancel(); + return RESULT_SUCCESS; +} + +ResultCode MemoryManager::Free(PageLinkedList& page_list, std::size_t num_pages, Pool pool, + Direction dir) { + // Early return if we're freeing no pages + if (!num_pages) { + return RESULT_SUCCESS; + } + + // Lock the pool that we're freeing from + const std::size_t pool_index{static_cast<std::size_t>(pool)}; + std::lock_guard lock{pool_locks[pool_index]}; + + // TODO (bunnei): Support multiple managers + Impl& chosen_manager{managers[pool_index]}; + + // Free all of the pages + for (const auto& it : page_list.Nodes()) { + const std::size_t num_pages{std::min( + it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)}; + chosen_manager.Free(it.GetAddress(), num_pages); + } + + return RESULT_SUCCESS; +} + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/memory_manager.h b/src/core/hle/kernel/memory/memory_manager.h new file mode 100644 index 000000000..b078d7a5e --- /dev/null +++ b/src/core/hle/kernel/memory/memory_manager.h @@ -0,0 +1,97 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <array> +#include <mutex> + +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "core/hle/kernel/memory/page_heap.h" +#include "core/hle/result.h" + +namespace Kernel::Memory { + +class PageLinkedList; + +class MemoryManager final : NonCopyable { +public: + enum class Pool : u32 { + Application = 0, + Applet = 1, + System = 2, + SystemNonSecure = 3, + + Count, + + Shift = 4, + Mask = (0xF << Shift), + }; + + enum class Direction : u32 { + FromFront = 0, + FromBack = 1, + + Shift = 0, + Mask = (0xF << Shift), + }; + + MemoryManager() = default; + + constexpr std::size_t GetSize(Pool pool) const { + return managers[static_cast<std::size_t>(pool)].GetSize(); + } + + void InitializeManager(Pool pool, u64 start_address, u64 end_address); + VAddr AllocateContinuous(std::size_t num_pages, std::size_t align_pages, Pool pool, + Direction dir = Direction::FromFront); + ResultCode Allocate(PageLinkedList& page_list, std::size_t num_pages, Pool pool, + Direction dir = Direction::FromFront); + ResultCode Free(PageLinkedList& page_list, std::size_t num_pages, Pool pool, + Direction dir = Direction::FromFront); + + static constexpr std::size_t MaxManagerCount = 10; + +private: + class Impl final : NonCopyable { + private: + using RefCount = u16; + + private: + PageHeap heap; + Pool pool{}; + + public: + Impl() = default; + + std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address); + + VAddr AllocateBlock(s32 index) { + return heap.AllocateBlock(index); + } + + void Free(VAddr addr, std::size_t num_pages) { + heap.Free(addr, num_pages); + } + + constexpr std::size_t GetSize() const { + return heap.GetSize(); + } + + constexpr VAddr GetAddress() const { + return heap.GetAddress(); + } + + constexpr VAddr GetEndAddress() const { + return heap.GetEndAddress(); + } + }; + +private: + std::array<std::mutex, static_cast<std::size_t>(Pool::Count)> pool_locks; + std::array<Impl, MaxManagerCount> managers; +}; + +} // namespace Kernel::Memory |