diff options
Diffstat (limited to 'src')
168 files changed, 5315 insertions, 3956 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 9d0af02fd..0913be72c 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -53,7 +53,11 @@ if (MSVC) else() add_compile_options( -Wall + -Werror=implicit-fallthrough + -Werror=reorder + -Wextra -Wno-attributes + -Wno-unused-parameter ) if (APPLE AND CMAKE_CXX_COMPILER_ID STREQUAL Clang) diff --git a/src/audio_core/audio_renderer.cpp b/src/audio_core/audio_renderer.cpp index c187d8ac5..7a9dc61d4 100644 --- a/src/audio_core/audio_renderer.cpp +++ b/src/audio_core/audio_renderer.cpp @@ -36,9 +36,9 @@ public: } void SetWaveIndex(std::size_t index); - std::vector<s16> DequeueSamples(std::size_t sample_count, Memory::Memory& memory); + std::vector<s16> DequeueSamples(std::size_t sample_count, Core::Memory::Memory& memory); void UpdateState(); - void RefreshBuffer(Memory::Memory& memory); + void RefreshBuffer(Core::Memory::Memory& memory); private: bool is_in_use{}; @@ -66,13 +66,14 @@ public: return info; } - void UpdateState(Memory::Memory& memory); + void UpdateState(Core::Memory::Memory& memory); private: EffectOutStatus out_status{}; EffectInStatus info{}; }; -AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing, Memory::Memory& memory_, + +AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing, Core::Memory::Memory& memory_, AudioRendererParameter params, std::shared_ptr<Kernel::WritableEvent> buffer_event, std::size_t instance_number) @@ -208,7 +209,7 @@ void AudioRenderer::VoiceState::SetWaveIndex(std::size_t index) { } std::vector<s16> AudioRenderer::VoiceState::DequeueSamples(std::size_t sample_count, - Memory::Memory& memory) { + Core::Memory::Memory& memory) { if (!IsPlaying()) { return {}; } @@ -258,7 +259,7 @@ void AudioRenderer::VoiceState::UpdateState() { is_in_use = info.is_in_use; } -void AudioRenderer::VoiceState::RefreshBuffer(Memory::Memory& memory) { +void AudioRenderer::VoiceState::RefreshBuffer(Core::Memory::Memory& memory) { const auto wave_buffer_address = info.wave_buffer[wave_index].buffer_addr; const auto wave_buffer_size = info.wave_buffer[wave_index].buffer_sz; std::vector<s16> new_samples(wave_buffer_size / sizeof(s16)); @@ -310,7 +311,7 @@ void AudioRenderer::VoiceState::RefreshBuffer(Memory::Memory& memory) { is_refresh_pending = false; } -void AudioRenderer::EffectState::UpdateState(Memory::Memory& memory) { +void AudioRenderer::EffectState::UpdateState(Core::Memory::Memory& memory) { if (info.is_new) { out_status.state = EffectStatus::New; } else { diff --git a/src/audio_core/audio_renderer.h b/src/audio_core/audio_renderer.h index c0fae669e..62faf9f19 100644 --- a/src/audio_core/audio_renderer.h +++ b/src/audio_core/audio_renderer.h @@ -22,7 +22,7 @@ namespace Kernel { class WritableEvent; } -namespace Memory { +namespace Core::Memory { class Memory; } @@ -221,7 +221,7 @@ static_assert(sizeof(UpdateDataHeader) == 0x40, "UpdateDataHeader has wrong size class AudioRenderer { public: - AudioRenderer(Core::Timing::CoreTiming& core_timing, Memory::Memory& memory_, + AudioRenderer(Core::Timing::CoreTiming& core_timing, Core::Memory::Memory& memory_, AudioRendererParameter params, std::shared_ptr<Kernel::WritableEvent> buffer_event, std::size_t instance_number); ~AudioRenderer(); @@ -244,7 +244,7 @@ private: std::vector<EffectState> effects; std::unique_ptr<AudioOut> audio_out; StreamPtr stream; - Memory::Memory& memory; + Core::Memory::Memory& memory; }; } // namespace AudioCore diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index eeceaa655..6ffc612e7 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -155,6 +155,8 @@ add_library(common STATIC uuid.cpp uuid.h vector_math.h + virtual_buffer.cpp + virtual_buffer.h web_result.h zstd_compression.cpp zstd_compression.h diff --git a/src/common/alignment.h b/src/common/alignment.h index cdd4833f8..f8c49e079 100644 --- a/src/common/alignment.h +++ b/src/common/alignment.h @@ -38,6 +38,13 @@ constexpr bool IsWordAligned(T value) { return (value & 0b11) == 0; } +template <typename T> +constexpr bool IsAligned(T value, std::size_t alignment) { + using U = typename std::make_unsigned<T>::type; + const U mask = static_cast<U>(alignment - 1); + return (value & mask) == 0; +} + template <typename T, std::size_t Align = 16> class AlignmentAllocator { public: diff --git a/src/common/common_funcs.h b/src/common/common_funcs.h index 052254678..88cf5250a 100644 --- a/src/common/common_funcs.h +++ b/src/common/common_funcs.h @@ -55,6 +55,38 @@ __declspec(dllimport) void __stdcall DebugBreak(void); // Defined in Misc.cpp. std::string GetLastErrorMsg(); +#define DECLARE_ENUM_FLAG_OPERATORS(type) \ + constexpr type operator|(type a, type b) noexcept { \ + using T = std::underlying_type_t<type>; \ + return static_cast<type>(static_cast<T>(a) | static_cast<T>(b)); \ + } \ + constexpr type operator&(type a, type b) noexcept { \ + using T = std::underlying_type_t<type>; \ + return static_cast<type>(static_cast<T>(a) & static_cast<T>(b)); \ + } \ + constexpr type& operator|=(type& a, type b) noexcept { \ + using T = std::underlying_type_t<type>; \ + a = static_cast<type>(static_cast<T>(a) | static_cast<T>(b)); \ + return a; \ + } \ + constexpr type& operator&=(type& a, type b) noexcept { \ + using T = std::underlying_type_t<type>; \ + a = static_cast<type>(static_cast<T>(a) & static_cast<T>(b)); \ + return a; \ + } \ + constexpr type operator~(type key) noexcept { \ + using T = std::underlying_type_t<type>; \ + return static_cast<type>(~static_cast<T>(key)); \ + } \ + constexpr bool True(type key) noexcept { \ + using T = std::underlying_type_t<type>; \ + return static_cast<T>(key) != 0; \ + } \ + constexpr bool False(type key) noexcept { \ + using T = std::underlying_type_t<type>; \ + return static_cast<T>(key) == 0; \ + } + namespace Common { constexpr u32 MakeMagic(char a, char b, char c, char d) { diff --git a/src/common/file_util.cpp b/src/common/file_util.cpp index 35eee0096..45b750e1e 100644 --- a/src/common/file_util.cpp +++ b/src/common/file_util.cpp @@ -888,7 +888,14 @@ std::string SanitizePath(std::string_view path_, DirectorySeparator directory_se } std::replace(path.begin(), path.end(), type1, type2); - path.erase(std::unique(path.begin(), path.end(), + + auto start = path.begin(); +#ifdef _WIN32 + // allow network paths which start with a double backslash (e.g. \\server\share) + if (start != path.end()) + ++start; +#endif + path.erase(std::unique(start, path.end(), [type2](char c1, char c2) { return c1 == type2 && c2 == type2; }), path.end()); return std::string(RemoveTrailingSlash(path)); @@ -967,6 +974,34 @@ bool IOFile::Flush() { return IsOpen() && 0 == std::fflush(m_file); } +std::size_t IOFile::ReadImpl(void* data, std::size_t length, std::size_t data_size) const { + if (!IsOpen()) { + return std::numeric_limits<std::size_t>::max(); + } + + if (length == 0) { + return 0; + } + + DEBUG_ASSERT(data != nullptr); + + return std::fread(data, data_size, length, m_file); +} + +std::size_t IOFile::WriteImpl(const void* data, std::size_t length, std::size_t data_size) { + if (!IsOpen()) { + return std::numeric_limits<std::size_t>::max(); + } + + if (length == 0) { + return 0; + } + + DEBUG_ASSERT(data != nullptr); + + return std::fwrite(data, data_size, length, m_file); +} + bool IOFile::Resize(u64 size) { return IsOpen() && 0 == #ifdef _WIN32 diff --git a/src/common/file_util.h b/src/common/file_util.h index cde7ddf2d..f7a0c33fa 100644 --- a/src/common/file_util.h +++ b/src/common/file_util.h @@ -222,22 +222,15 @@ public: static_assert(std::is_trivially_copyable_v<T>, "Given array does not consist of trivially copyable objects"); - if (!IsOpen()) { - return std::numeric_limits<std::size_t>::max(); - } - - return std::fread(data, sizeof(T), length, m_file); + return ReadImpl(data, length, sizeof(T)); } template <typename T> std::size_t WriteArray(const T* data, std::size_t length) { static_assert(std::is_trivially_copyable_v<T>, "Given array does not consist of trivially copyable objects"); - if (!IsOpen()) { - return std::numeric_limits<std::size_t>::max(); - } - return std::fwrite(data, sizeof(T), length, m_file); + return WriteImpl(data, length, sizeof(T)); } template <typename T> @@ -278,6 +271,9 @@ public: } private: + std::size_t ReadImpl(void* data, std::size_t length, std::size_t data_size) const; + std::size_t WriteImpl(const void* data, std::size_t length, std::size_t data_size); + std::FILE* m_file = nullptr; }; diff --git a/src/common/page_table.cpp b/src/common/page_table.cpp index 566b57b62..e5d3090d5 100644 --- a/src/common/page_table.cpp +++ b/src/common/page_table.cpp @@ -6,36 +6,20 @@ namespace Common { -PageTable::PageTable(std::size_t page_size_in_bits) : page_size_in_bits{page_size_in_bits} {} +PageTable::PageTable() = default; PageTable::~PageTable() = default; -void PageTable::Resize(std::size_t address_space_width_in_bits) { - const std::size_t num_page_table_entries = 1ULL - << (address_space_width_in_bits - page_size_in_bits); - +void PageTable::Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits, + bool has_attribute) { + const std::size_t num_page_table_entries{1ULL + << (address_space_width_in_bits - page_size_in_bits)}; pointers.resize(num_page_table_entries); - attributes.resize(num_page_table_entries); - - // The default is a 39-bit address space, which causes an initial 1GB allocation size. If the - // vector size is subsequently decreased (via resize), the vector might not automatically - // actually reallocate/resize its underlying allocation, which wastes up to ~800 MB for - // 36-bit titles. Call shrink_to_fit to reduce capacity to what's actually in use. - - pointers.shrink_to_fit(); - attributes.shrink_to_fit(); -} - -BackingPageTable::BackingPageTable(std::size_t page_size_in_bits) : PageTable{page_size_in_bits} {} - -BackingPageTable::~BackingPageTable() = default; - -void BackingPageTable::Resize(std::size_t address_space_width_in_bits) { - PageTable::Resize(address_space_width_in_bits); - const std::size_t num_page_table_entries = 1ULL - << (address_space_width_in_bits - page_size_in_bits); backing_addr.resize(num_page_table_entries); - backing_addr.shrink_to_fit(); + + if (has_attribute) { + attributes.resize(num_page_table_entries); + } } } // namespace Common diff --git a/src/common/page_table.h b/src/common/page_table.h index dbc272ab7..1e8bd3187 100644 --- a/src/common/page_table.h +++ b/src/common/page_table.h @@ -5,9 +5,12 @@ #pragma once #include <vector> + #include <boost/icl/interval_map.hpp> + #include "common/common_types.h" #include "common/memory_hook.h" +#include "common/virtual_buffer.h" namespace Common { @@ -47,7 +50,7 @@ struct SpecialRegion { * mimics the way a real CPU page table works. */ struct PageTable { - explicit PageTable(std::size_t page_size_in_bits); + PageTable(); ~PageTable(); /** @@ -56,40 +59,18 @@ struct PageTable { * * @param address_space_width_in_bits The address size width in bits. */ - void Resize(std::size_t address_space_width_in_bits); + void Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits, + bool has_attribute); /** * Vector of memory pointers backing each page. An entry can only be non-null if the * corresponding entry in the `attributes` vector is of type `Memory`. */ - std::vector<u8*> pointers; - - /** - * Contains MMIO handlers that back memory regions whose entries in the `attribute` vector is - * of type `Special`. - */ - boost::icl::interval_map<u64, std::set<SpecialRegion>> special_regions; - - /** - * Vector of fine grained page attributes. If it is set to any value other than `Memory`, then - * the corresponding entry in `pointers` MUST be set to null. - */ - std::vector<PageType> attributes; - - const std::size_t page_size_in_bits{}; -}; - -/** - * A more advanced Page Table with the ability to save a backing address when using it - * depends on another MMU. - */ -struct BackingPageTable : PageTable { - explicit BackingPageTable(std::size_t page_size_in_bits); - ~BackingPageTable(); + VirtualBuffer<u8*> pointers; - void Resize(std::size_t address_space_width_in_bits); + VirtualBuffer<u64> backing_addr; - std::vector<u64> backing_addr; + VirtualBuffer<PageType> attributes; }; } // namespace Common diff --git a/src/common/scope_exit.h b/src/common/scope_exit.h index 1176a72b1..68ef5f197 100644 --- a/src/common/scope_exit.h +++ b/src/common/scope_exit.h @@ -12,10 +12,17 @@ template <typename Func> struct ScopeExitHelper { explicit ScopeExitHelper(Func&& func) : func(std::move(func)) {} ~ScopeExitHelper() { - func(); + if (active) { + func(); + } + } + + void Cancel() { + active = false; } Func func; + bool active{true}; }; template <typename Func> diff --git a/src/common/virtual_buffer.cpp b/src/common/virtual_buffer.cpp new file mode 100644 index 000000000..b426f4747 --- /dev/null +++ b/src/common/virtual_buffer.cpp @@ -0,0 +1,52 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#ifdef _WIN32 +#include <windows.h> +#else +#include <stdio.h> +#include <sys/mman.h> +#include <sys/types.h> +#if defined __APPLE__ || defined __FreeBSD__ || defined __OpenBSD__ +#include <sys/sysctl.h> +#elif defined __HAIKU__ +#include <OS.h> +#else +#include <sys/sysinfo.h> +#endif +#endif + +#include "common/assert.h" +#include "common/virtual_buffer.h" + +namespace Common { + +void* AllocateMemoryPages(std::size_t size) { +#ifdef _WIN32 + void* base{VirtualAlloc(nullptr, size, MEM_COMMIT, PAGE_READWRITE)}; +#else + void* base{mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0)}; + + if (base == MAP_FAILED) { + base = nullptr; + } +#endif + + ASSERT(base); + + return base; +} + +void FreeMemoryPages(void* base, std::size_t size) { + if (!base) { + return; + } +#ifdef _WIN32 + ASSERT(VirtualFree(base, 0, MEM_RELEASE)); +#else + ASSERT(munmap(base, size) == 0); +#endif +} + +} // namespace Common diff --git a/src/common/virtual_buffer.h b/src/common/virtual_buffer.h new file mode 100644 index 000000000..da064e59e --- /dev/null +++ b/src/common/virtual_buffer.h @@ -0,0 +1,58 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_funcs.h" + +namespace Common { + +void* AllocateMemoryPages(std::size_t size); +void FreeMemoryPages(void* base, std::size_t size); + +template <typename T> +class VirtualBuffer final : NonCopyable { +public: + constexpr VirtualBuffer() = default; + explicit VirtualBuffer(std::size_t count) : alloc_size{count * sizeof(T)} { + base_ptr = reinterpret_cast<T*>(AllocateMemoryPages(alloc_size)); + } + + ~VirtualBuffer() { + FreeMemoryPages(base_ptr, alloc_size); + } + + void resize(std::size_t count) { + FreeMemoryPages(base_ptr, alloc_size); + + alloc_size = count * sizeof(T); + base_ptr = reinterpret_cast<T*>(AllocateMemoryPages(alloc_size)); + } + + constexpr const T& operator[](std::size_t index) const { + return base_ptr[index]; + } + + constexpr T& operator[](std::size_t index) { + return base_ptr[index]; + } + + constexpr T* data() { + return base_ptr; + } + + constexpr const T* data() const { + return base_ptr; + } + + constexpr std::size_t size() const { + return alloc_size / sizeof(T); + } + +private: + std::size_t alloc_size{}; + T* base_ptr{}; +}; + +} // namespace Common diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 66497a386..8546d3602 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -35,6 +35,8 @@ add_library(core STATIC crypto/ctr_encryption_layer.h crypto/xts_encryption_layer.cpp crypto/xts_encryption_layer.h + device_memory.cpp + device_memory.h file_sys/bis_factory.cpp file_sys/bis_factory.h file_sys/card_image.cpp @@ -152,6 +154,23 @@ add_library(core STATIC hle/kernel/hle_ipc.h hle/kernel/kernel.cpp hle/kernel/kernel.h + hle/kernel/memory/address_space_info.cpp + hle/kernel/memory/address_space_info.h + hle/kernel/memory/memory_block.h + hle/kernel/memory/memory_block_manager.cpp + hle/kernel/memory/memory_block_manager.h + hle/kernel/memory/memory_layout.h + hle/kernel/memory/memory_manager.cpp + hle/kernel/memory/memory_manager.h + hle/kernel/memory/memory_types.h + hle/kernel/memory/page_linked_list.h + hle/kernel/memory/page_heap.cpp + hle/kernel/memory/page_heap.h + hle/kernel/memory/page_table.cpp + hle/kernel/memory/page_table.h + hle/kernel/memory/slab_heap.h + hle/kernel/memory/system_control.cpp + hle/kernel/memory/system_control.h hle/kernel/mutex.cpp hle/kernel/mutex.h hle/kernel/object.cpp @@ -178,6 +197,7 @@ add_library(core STATIC hle/kernel/shared_memory.h hle/kernel/svc.cpp hle/kernel/svc.h + hle/kernel/svc_types.h hle/kernel/svc_wrap.h hle/kernel/synchronization_object.cpp hle/kernel/synchronization_object.h @@ -189,8 +209,6 @@ add_library(core STATIC hle/kernel/time_manager.h hle/kernel/transfer_memory.cpp hle/kernel/transfer_memory.h - hle/kernel/vm_manager.cpp - hle/kernel/vm_manager.h hle/kernel/writable_event.cpp hle/kernel/writable_event.h hle/lock.cpp @@ -591,11 +609,8 @@ target_link_libraries(core PUBLIC common PRIVATE audio_core video_core) target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt json-headers mbedtls opus unicorn) if (YUZU_ENABLE_BOXCAT) - get_directory_property(OPENSSL_LIBS - DIRECTORY ${PROJECT_SOURCE_DIR}/externals/libressl - DEFINITION OPENSSL_LIBS) - target_compile_definitions(core PRIVATE -DCPPHTTPLIB_OPENSSL_SUPPORT -DYUZU_ENABLE_BOXCAT) - target_link_libraries(core PRIVATE httplib json-headers ${OPENSSL_LIBS} zip) + target_compile_definitions(core PRIVATE -DYUZU_ENABLE_BOXCAT) + target_link_libraries(core PRIVATE httplib json-headers zip) endif() if (ENABLE_WEB_SERVICE) diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp index 7e846ddd5..d079a1bc8 100644 --- a/src/core/arm/arm_interface.cpp +++ b/src/core/arm/arm_interface.cpp @@ -60,7 +60,7 @@ static_assert(sizeof(ELFSymbol) == 0x18, "ELFSymbol has incorrect size."); using Symbols = std::vector<std::pair<ELFSymbol, std::string>>; -Symbols GetSymbols(VAddr text_offset, Memory::Memory& memory) { +Symbols GetSymbols(VAddr text_offset, Core::Memory::Memory& memory) { const auto mod_offset = text_offset + memory.Read32(text_offset + 4); if (mod_offset < text_offset || (mod_offset & 0b11) != 0 || @@ -123,7 +123,7 @@ Symbols GetSymbols(VAddr text_offset, Memory::Memory& memory) { std::optional<std::string> GetSymbolName(const Symbols& symbols, VAddr func_address) { const auto iter = std::find_if(symbols.begin(), symbols.end(), [func_address](const auto& pair) { - const auto& [symbol, name] = pair; + const auto& symbol = pair.first; const auto end_address = symbol.value + symbol.size; return func_address >= symbol.value && func_address < end_address; }); @@ -146,7 +146,7 @@ std::vector<ARM_Interface::BacktraceEntry> ARM_Interface::GetBacktrace() const { auto fp = GetReg(29); auto lr = GetReg(30); while (true) { - out.push_back({"", 0, lr, 0}); + out.push_back({"", 0, lr, 0, ""}); if (!fp) { break; } diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h index 57eae839e..cb2e640e2 100644 --- a/src/core/arm/arm_interface.h +++ b/src/core/arm/arm_interface.h @@ -26,28 +26,28 @@ public: virtual ~ARM_Interface() = default; struct ThreadContext32 { - std::array<u32, 16> cpu_registers; - u32 cpsr; - std::array<u8, 4> padding; - std::array<u64, 32> fprs; - u32 fpscr; - u32 fpexc; - u32 tpidr; + std::array<u32, 16> cpu_registers{}; + u32 cpsr{}; + std::array<u8, 4> padding{}; + std::array<u64, 32> fprs{}; + u32 fpscr{}; + u32 fpexc{}; + u32 tpidr{}; }; // Internally within the kernel, it expects the AArch32 version of the // thread context to be 344 bytes in size. static_assert(sizeof(ThreadContext32) == 0x158); struct ThreadContext64 { - std::array<u64, 31> cpu_registers; - u64 sp; - u64 pc; - u32 pstate; - std::array<u8, 4> padding; - std::array<u128, 32> vector_registers; - u32 fpcr; - u32 fpsr; - u64 tpidr; + std::array<u64, 31> cpu_registers{}; + u64 sp{}; + u64 pc{}; + u32 pstate{}; + std::array<u8, 4> padding{}; + std::array<u128, 32> vector_registers{}; + u32 fpcr{}; + u32 fpsr{}; + u64 tpidr{}; }; // Internally within the kernel, it expects the AArch64 version of the // thread context to be 800 bytes in size. diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp index 187a972ac..9bc86e3b9 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp @@ -67,7 +67,7 @@ public: } void CallSVC(u32 swi) override { - Kernel::CallSVC(parent.system, swi); + Kernel::Svc::Call(parent.system, swi); } void AddTicks(u64 ticks) override { diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.h b/src/core/arm/dynarmic/arm_dynarmic_32.h index 143e46e4d..8ba9cea8f 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.h +++ b/src/core/arm/dynarmic/arm_dynarmic_32.h @@ -15,7 +15,7 @@ #include "core/arm/arm_interface.h" #include "core/arm/exclusive_monitor.h" -namespace Memory { +namespace Core::Memory { class Memory; } diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp index a53a58ba0..9add5d363 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp @@ -8,6 +8,7 @@ #include <dynarmic/A64/config.h> #include "common/logging/log.h" #include "common/microprofile.h" +#include "common/page_table.h" #include "core/arm/dynarmic/arm_dynarmic_64.h" #include "core/core.h" #include "core/core_manager.h" @@ -18,7 +19,6 @@ #include "core/hle/kernel/process.h" #include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/svc.h" -#include "core/hle/kernel/vm_manager.h" #include "core/memory.h" namespace Core { @@ -103,7 +103,7 @@ public: } void CallSVC(u32 swi) override { - Kernel::CallSVC(parent.system, swi); + Kernel::Svc::Call(parent.system, swi); } void AddTicks(u64 ticks) override { @@ -159,6 +159,9 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable& // Unpredictable instructions config.define_unpredictable_behaviour = true; + config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128; + config.only_detect_misalignment_via_page_table_on_page_boundary = true; + return std::make_shared<Dynarmic::A64::Jit>(config); } diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.h b/src/core/arm/dynarmic/arm_dynarmic_64.h index e71240a96..647cecaf0 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.h +++ b/src/core/arm/dynarmic/arm_dynarmic_64.h @@ -15,7 +15,7 @@ #include "core/arm/exclusive_monitor.h" #include "core/arm/unicorn/arm_unicorn.h" -namespace Memory { +namespace Core::Memory { class Memory; } @@ -92,7 +92,7 @@ public: private: friend class ARM_Dynarmic_64; Dynarmic::A64::ExclusiveMonitor monitor; - Memory::Memory& memory; + Core::Memory::Memory& memory; }; } // namespace Core diff --git a/src/core/arm/exclusive_monitor.h b/src/core/arm/exclusive_monitor.h index 4ef418b90..ccd73b80f 100644 --- a/src/core/arm/exclusive_monitor.h +++ b/src/core/arm/exclusive_monitor.h @@ -8,7 +8,7 @@ #include "common/common_types.h" -namespace Memory { +namespace Core::Memory { class Memory; } diff --git a/src/core/arm/unicorn/arm_unicorn.cpp b/src/core/arm/unicorn/arm_unicorn.cpp index 8a9800a96..d189efb63 100644 --- a/src/core/arm/unicorn/arm_unicorn.cpp +++ b/src/core/arm/unicorn/arm_unicorn.cpp @@ -266,7 +266,7 @@ void ARM_Unicorn::InterruptHook(uc_engine* uc, u32 int_no, void* user_data) { switch (ec) { case 0x15: // SVC - Kernel::CallSVC(arm_instance->system, iss); + Kernel::Svc::Call(arm_instance->system, iss); break; } } diff --git a/src/core/core.cpp b/src/core/core.cpp index 3bd90d79f..f9f8a3000 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp @@ -14,6 +14,7 @@ #include "core/core_manager.h" #include "core/core_timing.h" #include "core/cpu_manager.h" +#include "core/device_memory.h" #include "core/file_sys/bis_factory.h" #include "core/file_sys/card_image.h" #include "core/file_sys/mode.h" @@ -140,6 +141,8 @@ struct System::Impl { ResultStatus Init(System& system, Frontend::EmuWindow& emu_window) { LOG_DEBUG(HW_Memory, "initialized OK"); + device_memory = std::make_unique<Core::DeviceMemory>(system); + core_timing.Initialize(); kernel.Initialize(); cpu_manager.Initialize(); @@ -276,6 +279,7 @@ struct System::Impl { telemetry_session.reset(); perf_stats.reset(); gpu_core.reset(); + device_memory.reset(); // Close all CPU/threading state cpu_manager.Shutdown(); @@ -346,7 +350,8 @@ struct System::Impl { std::unique_ptr<Loader::AppLoader> app_loader; std::unique_ptr<Tegra::GPU> gpu_core; std::unique_ptr<Hardware::InterruptManager> interrupt_manager; - Memory::Memory memory; + std::unique_ptr<Core::DeviceMemory> device_memory; + Core::Memory::Memory memory; CpuManager cpu_manager; bool is_powered_on = false; bool exit_lock = false; @@ -472,6 +477,14 @@ Kernel::Process* System::CurrentProcess() { return impl->kernel.CurrentProcess(); } +Core::DeviceMemory& System::DeviceMemory() { + return *impl->device_memory; +} + +const Core::DeviceMemory& System::DeviceMemory() const { + return *impl->device_memory; +} + const Kernel::Process* System::CurrentProcess() const { return impl->kernel.CurrentProcess(); } @@ -505,7 +518,7 @@ Memory::Memory& System::Memory() { return impl->memory; } -const Memory::Memory& System::Memory() const { +const Core::Memory::Memory& System::Memory() const { return impl->memory; } diff --git a/src/core/core.h b/src/core/core.h index 8d862a8e6..acc53d6a1 100644 --- a/src/core/core.h +++ b/src/core/core.h @@ -36,9 +36,10 @@ class AppLoader; enum class ResultStatus : u16; } // namespace Loader -namespace Memory { +namespace Core::Memory { struct CheatEntry; -} // namespace Memory +class Memory; +} // namespace Core::Memory namespace Service { @@ -86,14 +87,11 @@ namespace Core::Hardware { class InterruptManager; } -namespace Memory { -class Memory; -} - namespace Core { class ARM_Interface; class CoreManager; +class DeviceMemory; class ExclusiveMonitor; class FrameLimiter; class PerfStats; @@ -230,10 +228,10 @@ public: const ExclusiveMonitor& Monitor() const; /// Gets a mutable reference to the system memory instance. - Memory::Memory& Memory(); + Core::Memory::Memory& Memory(); /// Gets a constant reference to the system memory instance. - const Memory::Memory& Memory() const; + const Core::Memory::Memory& Memory() const; /// Gets a mutable reference to the GPU interface Tegra::GPU& GPU(); @@ -259,6 +257,12 @@ public: /// Gets the global scheduler const Kernel::GlobalScheduler& GlobalScheduler() const; + /// Gets the manager for the guest device memory + Core::DeviceMemory& DeviceMemory(); + + /// Gets the manager for the guest device memory + const Core::DeviceMemory& DeviceMemory() const; + /// Provides a pointer to the current process Kernel::Process* CurrentProcess(); diff --git a/src/core/core_manager.h b/src/core/core_manager.h index b14e723d7..d525de00a 100644 --- a/src/core/core_manager.h +++ b/src/core/core_manager.h @@ -22,7 +22,7 @@ namespace Core::Timing { class CoreTiming; } -namespace Memory { +namespace Core::Memory { class Memory; } diff --git a/src/core/device_memory.cpp b/src/core/device_memory.cpp new file mode 100644 index 000000000..51097ced3 --- /dev/null +++ b/src/core/device_memory.cpp @@ -0,0 +1,15 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/core.h" +#include "core/device_memory.h" +#include "core/memory.h" + +namespace Core { + +DeviceMemory::DeviceMemory(System& system) : buffer{DramMemoryMap::Size}, system{system} {} + +DeviceMemory::~DeviceMemory() = default; + +} // namespace Core diff --git a/src/core/device_memory.h b/src/core/device_memory.h new file mode 100644 index 000000000..9efa088d0 --- /dev/null +++ b/src/core/device_memory.h @@ -0,0 +1,51 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/assert.h" +#include "common/common_funcs.h" +#include "common/virtual_buffer.h" + +namespace Core { + +class System; + +namespace DramMemoryMap { +enum : u64 { + Base = 0x80000000ULL, + Size = 0x100000000ULL, + End = Base + Size, + KernelReserveBase = Base + 0x60000, + SlabHeapBase = KernelReserveBase + 0x85000, + SlapHeapSize = 0xa21000, + SlabHeapEnd = SlabHeapBase + SlapHeapSize, +}; +}; // namespace DramMemoryMap + +class DeviceMemory : NonCopyable { +public: + explicit DeviceMemory(Core::System& system); + ~DeviceMemory(); + + template <typename T> + PAddr GetPhysicalAddr(const T* ptr) const { + return (reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(buffer.data())) + + DramMemoryMap::Base; + } + + u8* GetPointer(PAddr addr) { + return buffer.data() + (addr - DramMemoryMap::Base); + } + + const u8* GetPointer(PAddr addr) const { + return buffer.data() + (addr - DramMemoryMap::Base); + } + +private: + Common::VirtualBuffer<u8> buffer; + Core::System& system; +}; + +} // namespace Core diff --git a/src/core/file_sys/patch_manager.cpp b/src/core/file_sys/patch_manager.cpp index e226e9711..b93aa6935 100644 --- a/src/core/file_sys/patch_manager.cpp +++ b/src/core/file_sys/patch_manager.cpp @@ -249,7 +249,7 @@ bool PatchManager::HasNSOPatch(const std::array<u8, 32>& build_id_) const { } namespace { -std::optional<std::vector<Memory::CheatEntry>> ReadCheatFileFromFolder( +std::optional<std::vector<Core::Memory::CheatEntry>> ReadCheatFileFromFolder( const Core::System& system, u64 title_id, const std::array<u8, 0x20>& build_id_, const VirtualDir& base_path, bool upper) { const auto build_id_raw = Common::HexToString(build_id_, upper); @@ -269,14 +269,14 @@ std::optional<std::vector<Memory::CheatEntry>> ReadCheatFileFromFolder( return std::nullopt; } - Memory::TextCheatParser parser; + Core::Memory::TextCheatParser parser; return parser.Parse( system, std::string_view(reinterpret_cast<const char* const>(data.data()), data.size())); } } // Anonymous namespace -std::vector<Memory::CheatEntry> PatchManager::CreateCheatList( +std::vector<Core::Memory::CheatEntry> PatchManager::CreateCheatList( const Core::System& system, const std::array<u8, 32>& build_id_) const { const auto load_dir = system.GetFileSystemController().GetModificationLoadRoot(title_id); if (load_dir == nullptr) { @@ -289,7 +289,7 @@ std::vector<Memory::CheatEntry> PatchManager::CreateCheatList( std::sort(patch_dirs.begin(), patch_dirs.end(), [](const VirtualDir& l, const VirtualDir& r) { return l->GetName() < r->GetName(); }); - std::vector<Memory::CheatEntry> out; + std::vector<Core::Memory::CheatEntry> out; for (const auto& subdir : patch_dirs) { if (std::find(disabled.cbegin(), disabled.cend(), subdir->GetName()) != disabled.cend()) { continue; @@ -348,6 +348,12 @@ static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType t if (ext_dir != nullptr) layers_ext.push_back(std::move(ext_dir)); } + + // When there are no layers to apply, return early as there is no need to rebuild the RomFS + if (layers.empty() && layers_ext.empty()) { + return; + } + layers.push_back(std::move(extracted)); auto layered = LayeredVfsDirectory::MakeLayeredDirectory(std::move(layers)); @@ -434,7 +440,8 @@ std::map<std::string, std::string, std::less<>> PatchManager::GetPatchVersionNam // Game Updates const auto update_tid = GetUpdateTitleID(title_id); PatchManager update{update_tid}; - auto [nacp, discard_icon_file] = update.GetControlMetadata(); + const auto metadata = update.GetControlMetadata(); + const auto& nacp = metadata.first; const auto update_disabled = std::find(disabled.cbegin(), disabled.cend(), "Update") != disabled.cend(); diff --git a/src/core/file_sys/patch_manager.h b/src/core/file_sys/patch_manager.h index e857e6e82..ec6db524d 100644 --- a/src/core/file_sys/patch_manager.h +++ b/src/core/file_sys/patch_manager.h @@ -51,8 +51,8 @@ public: bool HasNSOPatch(const std::array<u8, 0x20>& build_id) const; // Creates a CheatList object with all - std::vector<Memory::CheatEntry> CreateCheatList(const Core::System& system, - const std::array<u8, 0x20>& build_id) const; + std::vector<Core::Memory::CheatEntry> CreateCheatList( + const Core::System& system, const std::array<u8, 0x20>& build_id) const; // Currently tracked RomFS patches: // - Game Updates diff --git a/src/core/file_sys/registered_cache.cpp b/src/core/file_sys/registered_cache.cpp index 6e9cf67ef..ba5f76288 100644 --- a/src/core/file_sys/registered_cache.cpp +++ b/src/core/file_sys/registered_cache.cpp @@ -591,14 +591,18 @@ InstallResult RegisteredCache::InstallEntry(const NSP& nsp, bool overwrite_if_ex InstallResult RegisteredCache::InstallEntry(const NCA& nca, TitleType type, bool overwrite_if_exists, const VfsCopyFunction& copy) { CNMTHeader header{ - nca.GetTitleId(), ///< Title ID - 0, ///< Ignore/Default title version - type, ///< Type - {}, ///< Padding - 0x10, ///< Default table offset - 1, ///< 1 Content Entry - 0, ///< No Meta Entries - {}, ///< Padding + nca.GetTitleId(), // Title ID + 0, // Ignore/Default title version + type, // Type + {}, // Padding + 0x10, // Default table offset + 1, // 1 Content Entry + 0, // No Meta Entries + {}, // Padding + {}, // Reserved 1 + 0, // Is committed + 0, // Required download system version + {}, // Reserved 2 }; OptionalHeader opt_header{0, 0}; ContentRecord c_rec{{}, {}, {}, GetCRTypeFromNCAType(nca.GetType()), {}}; @@ -848,7 +852,8 @@ VirtualFile ManualContentProvider::GetEntryUnparsed(u64 title_id, ContentRecordT VirtualFile ManualContentProvider::GetEntryRaw(u64 title_id, ContentRecordType type) const { const auto iter = std::find_if(entries.begin(), entries.end(), [title_id, type](const auto& entry) { - const auto [title_type, content_type, e_title_id] = entry.first; + const auto content_type = std::get<1>(entry.first); + const auto e_title_id = std::get<2>(entry.first); return content_type == type && e_title_id == title_id; }); if (iter == entries.end()) diff --git a/src/core/file_sys/vfs_libzip.cpp b/src/core/file_sys/vfs_libzip.cpp index 11d1978ea..d69952940 100644 --- a/src/core/file_sys/vfs_libzip.cpp +++ b/src/core/file_sys/vfs_libzip.cpp @@ -42,11 +42,11 @@ VirtualDir ExtractZIP(VirtualFile file) { continue; if (name.back() != '/') { - std::unique_ptr<zip_file_t, decltype(&zip_fclose)> file{ + std::unique_ptr<zip_file_t, decltype(&zip_fclose)> file2{ zip_fopen_index(zip.get(), i, 0), zip_fclose}; std::vector<u8> buf(stat.size); - if (zip_fread(file.get(), buf.data(), buf.size()) != buf.size()) + if (zip_fread(file2.get(), buf.data(), buf.size()) != s64(buf.size())) return nullptr; const auto parts = FileUtil::SplitPathComponents(stat.name); diff --git a/src/core/frontend/framebuffer_layout.cpp b/src/core/frontend/framebuffer_layout.cpp index 68a0e0906..d0c43447c 100644 --- a/src/core/frontend/framebuffer_layout.cpp +++ b/src/core/frontend/framebuffer_layout.cpp @@ -25,7 +25,7 @@ FramebufferLayout DefaultFrameLayout(u32 width, u32 height) { ASSERT(height > 0); // The drawing code needs at least somewhat valid values for both screens // so just calculate them both even if the other isn't showing. - FramebufferLayout res{width, height}; + FramebufferLayout res{width, height, false, {}}; const float window_aspect_ratio = static_cast<float>(height) / width; const float emulation_aspect_ratio = EmulationAspectRatio( diff --git a/src/core/gdbstub/gdbstub.cpp b/src/core/gdbstub/gdbstub.cpp index 6d15aeed9..2f15635c5 100644 --- a/src/core/gdbstub/gdbstub.cpp +++ b/src/core/gdbstub/gdbstub.cpp @@ -37,9 +37,9 @@ #include "core/core.h" #include "core/core_manager.h" #include "core/gdbstub/gdbstub.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/scheduler.h" -#include "core/hle/kernel/vm_manager.h" #include "core/loader/loader.h" #include "core/memory.h" @@ -643,7 +643,7 @@ static void HandleQuery() { SendReply(target_xml); } else if (strncmp(query, "Offsets", strlen("Offsets")) == 0) { const VAddr base_address = - Core::System::GetInstance().CurrentProcess()->VMManager().GetCodeRegionBaseAddress(); + Core::System::GetInstance().CurrentProcess()->PageTable().GetCodeRegionStart(); std::string buffer = fmt::format("TextSeg={:0x}", base_address); SendReply(buffer.c_str()); } else if (strncmp(query, "fThreadInfo", strlen("fThreadInfo")) == 0) { diff --git a/src/core/hle/kernel/client_session.cpp b/src/core/hle/kernel/client_session.cpp index 6d66276bc..5ab204b9b 100644 --- a/src/core/hle/kernel/client_session.cpp +++ b/src/core/hle/kernel/client_session.cpp @@ -47,7 +47,8 @@ ResultVal<std::shared_ptr<ClientSession>> ClientSession::Create(KernelCore& kern return MakeResult(std::move(client_session)); } -ResultCode ClientSession::SendSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory) { +ResultCode ClientSession::SendSyncRequest(std::shared_ptr<Thread> thread, + Core::Memory::Memory& memory) { // Keep ServerSession alive until we're done working with it. if (!parent->Server()) { return ERR_SESSION_CLOSED_BY_REMOTE; diff --git a/src/core/hle/kernel/client_session.h b/src/core/hle/kernel/client_session.h index d15b09554..c5f760d7d 100644 --- a/src/core/hle/kernel/client_session.h +++ b/src/core/hle/kernel/client_session.h @@ -12,7 +12,7 @@ union ResultCode; -namespace Memory { +namespace Core::Memory { class Memory; } @@ -42,7 +42,7 @@ public: return HANDLE_TYPE; } - ResultCode SendSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory); + ResultCode SendSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory); bool ShouldWait(const Thread* thread) const override; diff --git a/src/core/hle/kernel/errors.h b/src/core/hle/kernel/errors.h index 8097b3863..29bfa3621 100644 --- a/src/core/hle/kernel/errors.h +++ b/src/core/hle/kernel/errors.h @@ -14,6 +14,7 @@ constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7}; constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14}; constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101}; constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102}; +constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103}; constexpr ResultCode ERR_OUT_OF_MEMORY{ErrorModule::Kernel, 104}; constexpr ResultCode ERR_HANDLE_TABLE_FULL{ErrorModule::Kernel, 105}; constexpr ResultCode ERR_INVALID_ADDRESS_STATE{ErrorModule::Kernel, 106}; diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index c558a2f33..d65dae3ae 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp @@ -284,17 +284,17 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(Thread& thread) { std::vector<u8> HLERequestContext::ReadBuffer(int buffer_index) const { std::vector<u8> buffer; - const bool is_buffer_a{BufferDescriptorA().size() > buffer_index && + const bool is_buffer_a{BufferDescriptorA().size() > std::size_t(buffer_index) && BufferDescriptorA()[buffer_index].Size()}; auto& memory = Core::System::GetInstance().Memory(); if (is_buffer_a) { - ASSERT_MSG(BufferDescriptorA().size() > buffer_index, + ASSERT_MSG(BufferDescriptorA().size() > std::size_t(buffer_index), "BufferDescriptorA invalid buffer_index {}", buffer_index); buffer.resize(BufferDescriptorA()[buffer_index].Size()); memory.ReadBlock(BufferDescriptorA()[buffer_index].Address(), buffer.data(), buffer.size()); } else { - ASSERT_MSG(BufferDescriptorX().size() > buffer_index, + ASSERT_MSG(BufferDescriptorX().size() > std::size_t(buffer_index), "BufferDescriptorX invalid buffer_index {}", buffer_index); buffer.resize(BufferDescriptorX()[buffer_index].Size()); memory.ReadBlock(BufferDescriptorX()[buffer_index].Address(), buffer.data(), buffer.size()); @@ -310,7 +310,7 @@ std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size, return 0; } - const bool is_buffer_b{BufferDescriptorB().size() > buffer_index && + const bool is_buffer_b{BufferDescriptorB().size() > std::size_t(buffer_index) && BufferDescriptorB()[buffer_index].Size()}; const std::size_t buffer_size{GetWriteBufferSize(buffer_index)}; if (size > buffer_size) { @@ -321,13 +321,13 @@ std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size, auto& memory = Core::System::GetInstance().Memory(); if (is_buffer_b) { - ASSERT_MSG(BufferDescriptorB().size() > buffer_index, + ASSERT_MSG(BufferDescriptorB().size() > std::size_t(buffer_index), "BufferDescriptorB invalid buffer_index {}", buffer_index); ASSERT_MSG(BufferDescriptorB()[buffer_index].Size() >= size, "BufferDescriptorB buffer_index {} is not large enough", buffer_index); memory.WriteBlock(BufferDescriptorB()[buffer_index].Address(), buffer, size); } else { - ASSERT_MSG(BufferDescriptorC().size() > buffer_index, + ASSERT_MSG(BufferDescriptorC().size() > std::size_t(buffer_index), "BufferDescriptorC invalid buffer_index {}", buffer_index); ASSERT_MSG(BufferDescriptorC()[buffer_index].Size() >= size, "BufferDescriptorC buffer_index {} is not large enough", buffer_index); @@ -338,16 +338,16 @@ std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size, } std::size_t HLERequestContext::GetReadBufferSize(int buffer_index) const { - const bool is_buffer_a{BufferDescriptorA().size() > buffer_index && + const bool is_buffer_a{BufferDescriptorA().size() > std::size_t(buffer_index) && BufferDescriptorA()[buffer_index].Size()}; if (is_buffer_a) { - ASSERT_MSG(BufferDescriptorA().size() > buffer_index, + ASSERT_MSG(BufferDescriptorA().size() > std::size_t(buffer_index), "BufferDescriptorA invalid buffer_index {}", buffer_index); ASSERT_MSG(BufferDescriptorA()[buffer_index].Size() > 0, "BufferDescriptorA buffer_index {} is empty", buffer_index); return BufferDescriptorA()[buffer_index].Size(); } else { - ASSERT_MSG(BufferDescriptorX().size() > buffer_index, + ASSERT_MSG(BufferDescriptorX().size() > std::size_t(buffer_index), "BufferDescriptorX invalid buffer_index {}", buffer_index); ASSERT_MSG(BufferDescriptorX()[buffer_index].Size() > 0, "BufferDescriptorX buffer_index {} is empty", buffer_index); @@ -356,14 +356,14 @@ std::size_t HLERequestContext::GetReadBufferSize(int buffer_index) const { } std::size_t HLERequestContext::GetWriteBufferSize(int buffer_index) const { - const bool is_buffer_b{BufferDescriptorB().size() > buffer_index && + const bool is_buffer_b{BufferDescriptorB().size() > std::size_t(buffer_index) && BufferDescriptorB()[buffer_index].Size()}; if (is_buffer_b) { - ASSERT_MSG(BufferDescriptorB().size() > buffer_index, + ASSERT_MSG(BufferDescriptorB().size() > std::size_t(buffer_index), "BufferDescriptorB invalid buffer_index {}", buffer_index); return BufferDescriptorB()[buffer_index].Size(); } else { - ASSERT_MSG(BufferDescriptorC().size() > buffer_index, + ASSERT_MSG(BufferDescriptorC().size() > std::size_t(buffer_index), "BufferDescriptorC invalid buffer_index {}", buffer_index); return BufferDescriptorC()[buffer_index].Size(); } diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index e47f1deed..7655382fa 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -18,15 +18,20 @@ #include "core/core.h" #include "core/core_timing.h" #include "core/core_timing_util.h" +#include "core/device_memory.h" #include "core/hardware_properties.h" #include "core/hle/kernel/client_port.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/memory/memory_layout.h" +#include "core/hle/kernel/memory/memory_manager.h" +#include "core/hle/kernel/memory/slab_heap.h" #include "core/hle/kernel/physical_core.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/resource_limit.h" #include "core/hle/kernel/scheduler.h" +#include "core/hle/kernel/shared_memory.h" #include "core/hle/kernel/synchronization.h" #include "core/hle/kernel/thread.h" #include "core/hle/kernel/time_manager.h" @@ -103,13 +108,14 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_ struct KernelCore::Impl { explicit Impl(Core::System& system, KernelCore& kernel) - : system{system}, global_scheduler{kernel}, synchronization{system}, time_manager{system} {} + : global_scheduler{kernel}, synchronization{system}, time_manager{system}, system{system} {} void Initialize(KernelCore& kernel) { Shutdown(); InitializePhysicalCores(); InitializeSystemResourceLimit(kernel); + InitializeMemoryLayout(); InitializeThreads(); InitializePreemption(); } @@ -154,12 +160,17 @@ struct KernelCore::Impl { system_resource_limit = ResourceLimit::Create(kernel); // If setting the default system values fails, then something seriously wrong has occurred. - ASSERT(system_resource_limit->SetLimitValue(ResourceType::PhysicalMemory, 0x200000000) + ASSERT(system_resource_limit->SetLimitValue(ResourceType::PhysicalMemory, 0x100000000) .IsSuccess()); ASSERT(system_resource_limit->SetLimitValue(ResourceType::Threads, 800).IsSuccess()); ASSERT(system_resource_limit->SetLimitValue(ResourceType::Events, 700).IsSuccess()); ASSERT(system_resource_limit->SetLimitValue(ResourceType::TransferMemory, 200).IsSuccess()); ASSERT(system_resource_limit->SetLimitValue(ResourceType::Sessions, 900).IsSuccess()); + + if (!system_resource_limit->Reserve(ResourceType::PhysicalMemory, 0) || + !system_resource_limit->Reserve(ResourceType::PhysicalMemory, 0x60000)) { + UNREACHABLE(); + } } void InitializeThreads() { @@ -237,6 +248,57 @@ struct KernelCore::Impl { return result; } + void InitializeMemoryLayout() { + // Initialize memory layout + constexpr Memory::MemoryLayout layout{Memory::MemoryLayout::GetDefaultLayout()}; + constexpr std::size_t hid_size{0x40000}; + constexpr std::size_t font_size{0x1100000}; + constexpr std::size_t irs_size{0x8000}; + constexpr std::size_t time_size{0x1000}; + constexpr PAddr hid_addr{layout.System().StartAddress()}; + constexpr PAddr font_pa{layout.System().StartAddress() + hid_size}; + constexpr PAddr irs_addr{layout.System().StartAddress() + hid_size + font_size}; + constexpr PAddr time_addr{layout.System().StartAddress() + hid_size + font_size + irs_size}; + + // Initialize memory manager + memory_manager = std::make_unique<Memory::MemoryManager>(); + memory_manager->InitializeManager(Memory::MemoryManager::Pool::Application, + layout.Application().StartAddress(), + layout.Application().EndAddress()); + memory_manager->InitializeManager(Memory::MemoryManager::Pool::Applet, + layout.Applet().StartAddress(), + layout.Applet().EndAddress()); + memory_manager->InitializeManager(Memory::MemoryManager::Pool::System, + layout.System().StartAddress(), + layout.System().EndAddress()); + + hid_shared_mem = Kernel::SharedMemory::Create( + system.Kernel(), system.DeviceMemory(), nullptr, + {hid_addr, hid_size / Memory::PageSize}, Memory::MemoryPermission::None, + Memory::MemoryPermission::Read, hid_addr, hid_size, "HID:SharedMemory"); + font_shared_mem = Kernel::SharedMemory::Create( + system.Kernel(), system.DeviceMemory(), nullptr, + {font_pa, font_size / Memory::PageSize}, Memory::MemoryPermission::None, + Memory::MemoryPermission::Read, font_pa, font_size, "Font:SharedMemory"); + irs_shared_mem = Kernel::SharedMemory::Create( + system.Kernel(), system.DeviceMemory(), nullptr, + {irs_addr, irs_size / Memory::PageSize}, Memory::MemoryPermission::None, + Memory::MemoryPermission::Read, irs_addr, irs_size, "IRS:SharedMemory"); + time_shared_mem = Kernel::SharedMemory::Create( + system.Kernel(), system.DeviceMemory(), nullptr, + {time_addr, time_size / Memory::PageSize}, Memory::MemoryPermission::None, + Memory::MemoryPermission::Read, time_addr, time_size, "Time:SharedMemory"); + + // Allocate slab heaps + user_slab_heap_pages = std::make_unique<Memory::SlabHeap<Memory::Page>>(); + + // Initialize slab heaps + constexpr u64 user_slab_heap_size{0x3de000}; + user_slab_heap_pages->Initialize( + system.DeviceMemory().GetPointer(Core::DramMemoryMap::SlabHeapBase), + user_slab_heap_size); + } + std::atomic<u32> next_object_id{0}; std::atomic<u64> next_kernel_process_id{Process::InitialKIPIDMin}; std::atomic<u64> next_user_process_id{Process::ProcessIDMin}; @@ -271,6 +333,16 @@ struct KernelCore::Impl { std::bitset<Core::Hardware::NUM_CPU_CORES> registered_core_threads; std::mutex register_thread_mutex; + // Kernel memory management + std::unique_ptr<Memory::MemoryManager> memory_manager; + std::unique_ptr<Memory::SlabHeap<Memory::Page>> user_slab_heap_pages; + + // Shared memory for services + std::shared_ptr<Kernel::SharedMemory> hid_shared_mem; + std::shared_ptr<Kernel::SharedMemory> font_shared_mem; + std::shared_ptr<Kernel::SharedMemory> irs_shared_mem; + std::shared_ptr<Kernel::SharedMemory> time_shared_mem; + // System context Core::System& system; }; @@ -437,4 +509,52 @@ Core::EmuThreadHandle KernelCore::GetCurrentEmuThreadID() const { return impl->GetCurrentEmuThreadID(); } +Memory::MemoryManager& KernelCore::MemoryManager() { + return *impl->memory_manager; +} + +const Memory::MemoryManager& KernelCore::MemoryManager() const { + return *impl->memory_manager; +} + +Memory::SlabHeap<Memory::Page>& KernelCore::GetUserSlabHeapPages() { + return *impl->user_slab_heap_pages; +} + +const Memory::SlabHeap<Memory::Page>& KernelCore::GetUserSlabHeapPages() const { + return *impl->user_slab_heap_pages; +} + +Kernel::SharedMemory& KernelCore::GetHidSharedMem() { + return *impl->hid_shared_mem; +} + +const Kernel::SharedMemory& KernelCore::GetHidSharedMem() const { + return *impl->hid_shared_mem; +} + +Kernel::SharedMemory& KernelCore::GetFontSharedMem() { + return *impl->font_shared_mem; +} + +const Kernel::SharedMemory& KernelCore::GetFontSharedMem() const { + return *impl->font_shared_mem; +} + +Kernel::SharedMemory& KernelCore::GetIrsSharedMem() { + return *impl->irs_shared_mem; +} + +const Kernel::SharedMemory& KernelCore::GetIrsSharedMem() const { + return *impl->irs_shared_mem; +} + +Kernel::SharedMemory& KernelCore::GetTimeSharedMem() { + return *impl->time_shared_mem; +} + +const Kernel::SharedMemory& KernelCore::GetTimeSharedMem() const { + return *impl->time_shared_mem; +} + } // namespace Kernel diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index c4f78ab71..83de1f542 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -8,6 +8,7 @@ #include <string> #include <unordered_map> #include <vector> +#include "core/hle/kernel/memory/memory_types.h" #include "core/hle/kernel/object.h" namespace Core { @@ -23,6 +24,12 @@ struct EventType; namespace Kernel { +namespace Memory { +class MemoryManager; +template <typename T> +class SlabHeap; +} // namespace Memory + class AddressArbiter; class ClientPort; class GlobalScheduler; @@ -31,6 +38,7 @@ class PhysicalCore; class Process; class ResourceLimit; class Scheduler; +class SharedMemory; class Synchronization; class Thread; class TimeManager; @@ -147,6 +155,42 @@ public: /// Register the current thread as a non CPU core thread. void RegisterHostThread(); + /// Gets the virtual memory manager for the kernel. + Memory::MemoryManager& MemoryManager(); + + /// Gets the virtual memory manager for the kernel. + const Memory::MemoryManager& MemoryManager() const; + + /// Gets the slab heap allocated for user space pages. + Memory::SlabHeap<Memory::Page>& GetUserSlabHeapPages(); + + /// Gets the slab heap allocated for user space pages. + const Memory::SlabHeap<Memory::Page>& GetUserSlabHeapPages() const; + + /// Gets the shared memory object for HID services. + Kernel::SharedMemory& GetHidSharedMem(); + + /// Gets the shared memory object for HID services. + const Kernel::SharedMemory& GetHidSharedMem() const; + + /// Gets the shared memory object for font services. + Kernel::SharedMemory& GetFontSharedMem(); + + /// Gets the shared memory object for font services. + const Kernel::SharedMemory& GetFontSharedMem() const; + + /// Gets the shared memory object for IRS services. + Kernel::SharedMemory& GetIrsSharedMem(); + + /// Gets the shared memory object for IRS services. + const Kernel::SharedMemory& GetIrsSharedMem() const; + + /// Gets the shared memory object for Time services. + Kernel::SharedMemory& GetTimeSharedMem(); + + /// Gets the shared memory object for Time services. + const Kernel::SharedMemory& GetTimeSharedMem() const; + private: friend class Object; friend class Process; diff --git a/src/core/hle/kernel/memory/address_space_info.cpp b/src/core/hle/kernel/memory/address_space_info.cpp new file mode 100644 index 000000000..27fae05e7 --- /dev/null +++ b/src/core/hle/kernel/memory/address_space_info.cpp @@ -0,0 +1,118 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphère, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX. + +#include <array> + +#include "common/assert.h" +#include "core/hle/kernel/memory/address_space_info.h" + +namespace Kernel::Memory { + +namespace { + +enum : u64 { + Size_1_MB = 0x100000, + Size_2_MB = 2 * Size_1_MB, + Size_128_MB = 128 * Size_1_MB, + Size_1_GB = 0x40000000, + Size_2_GB = 2 * Size_1_GB, + Size_4_GB = 4 * Size_1_GB, + Size_6_GB = 6 * Size_1_GB, + Size_64_GB = 64 * Size_1_GB, + Size_512_GB = 512 * Size_1_GB, + Invalid = std::numeric_limits<u64>::max(), +}; + +// clang-format off +constexpr std::array<AddressSpaceInfo, 13> AddressSpaceInfos{{ + { 32 /*bit_width*/, Size_2_MB /*addr*/, Size_1_GB - Size_2_MB /*size*/, AddressSpaceInfo::Type::Is32Bit, }, + { 32 /*bit_width*/, Size_1_GB /*addr*/, Size_4_GB - Size_1_GB /*size*/, AddressSpaceInfo::Type::Small64Bit, }, + { 32 /*bit_width*/, Invalid /*addr*/, Size_1_GB /*size*/, AddressSpaceInfo::Type::Heap, }, + { 32 /*bit_width*/, Invalid /*addr*/, Size_1_GB /*size*/, AddressSpaceInfo::Type::Alias, }, + { 36 /*bit_width*/, Size_128_MB /*addr*/, Size_2_GB - Size_128_MB /*size*/, AddressSpaceInfo::Type::Is32Bit, }, + { 36 /*bit_width*/, Size_2_GB /*addr*/, Size_64_GB - Size_2_GB /*size*/, AddressSpaceInfo::Type::Small64Bit, }, + { 36 /*bit_width*/, Invalid /*addr*/, Size_6_GB /*size*/, AddressSpaceInfo::Type::Heap, }, + { 36 /*bit_width*/, Invalid /*addr*/, Size_6_GB /*size*/, AddressSpaceInfo::Type::Alias, }, + { 39 /*bit_width*/, Size_128_MB /*addr*/, Size_512_GB - Size_128_MB /*size*/, AddressSpaceInfo::Type::Large64Bit, }, + { 39 /*bit_width*/, Invalid /*addr*/, Size_64_GB /*size*/, AddressSpaceInfo::Type::Is32Bit }, + { 39 /*bit_width*/, Invalid /*addr*/, Size_6_GB /*size*/, AddressSpaceInfo::Type::Heap, }, + { 39 /*bit_width*/, Invalid /*addr*/, Size_64_GB /*size*/, AddressSpaceInfo::Type::Alias, }, + { 39 /*bit_width*/, Invalid /*addr*/, Size_2_GB /*size*/, AddressSpaceInfo::Type::Stack, }, +}}; +// clang-format on + +constexpr bool IsAllowedIndexForAddress(std::size_t index) { + return index < std::size(AddressSpaceInfos) && AddressSpaceInfos[index].GetAddress() != Invalid; +} + +constexpr std::size_t + AddressSpaceIndices32Bit[static_cast<std::size_t>(AddressSpaceInfo::Type::Count)]{ + 0, 1, 0, 2, 0, 3, + }; + +constexpr std::size_t + AddressSpaceIndices36Bit[static_cast<std::size_t>(AddressSpaceInfo::Type::Count)]{ + 4, 5, 4, 6, 4, 7, + }; + +constexpr std::size_t + AddressSpaceIndices39Bit[static_cast<std::size_t>(AddressSpaceInfo::Type::Count)]{ + 9, 8, 8, 10, 12, 11, + }; + +constexpr bool IsAllowed32BitType(AddressSpaceInfo::Type type) { + return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Large64Bit && + type != AddressSpaceInfo::Type::Stack; +} + +constexpr bool IsAllowed36BitType(AddressSpaceInfo::Type type) { + return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Large64Bit && + type != AddressSpaceInfo::Type::Stack; +} + +constexpr bool IsAllowed39BitType(AddressSpaceInfo::Type type) { + return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Small64Bit; +} + +} // namespace + +u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, AddressSpaceInfo::Type type) { + const std::size_t index{static_cast<std::size_t>(type)}; + switch (width) { + case 32: + ASSERT(IsAllowed32BitType(type)); + ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices32Bit[index])); + return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].GetAddress(); + case 36: + ASSERT(IsAllowed36BitType(type)); + ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices36Bit[index])); + return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].GetAddress(); + case 39: + ASSERT(IsAllowed39BitType(type)); + ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices39Bit[index])); + return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].GetAddress(); + } + UNREACHABLE(); +} + +std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, AddressSpaceInfo::Type type) { + const std::size_t index{static_cast<std::size_t>(type)}; + switch (width) { + case 32: + ASSERT(IsAllowed32BitType(type)); + return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].GetSize(); + case 36: + ASSERT(IsAllowed36BitType(type)); + return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].GetSize(); + case 39: + ASSERT(IsAllowed39BitType(type)); + return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].GetSize(); + } + UNREACHABLE(); +} + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/address_space_info.h b/src/core/hle/kernel/memory/address_space_info.h new file mode 100644 index 000000000..cc9a6421e --- /dev/null +++ b/src/core/hle/kernel/memory/address_space_info.h @@ -0,0 +1,54 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphère, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX. + +#pragma once + +#include "common/common_funcs.h" +#include "common/common_types.h" + +namespace Kernel::Memory { + +class AddressSpaceInfo final : NonCopyable { +public: + enum class Type : u32 { + Is32Bit = 0, + Small64Bit = 1, + Large64Bit = 2, + Heap = 3, + Stack = 4, + Alias = 5, + Count, + }; + +private: + std::size_t bit_width{}; + std::size_t addr{}; + std::size_t size{}; + Type type{}; + +public: + static u64 GetAddressSpaceStart(std::size_t width, Type type); + static std::size_t GetAddressSpaceSize(std::size_t width, Type type); + + constexpr AddressSpaceInfo(std::size_t bit_width, std::size_t addr, std::size_t size, Type type) + : bit_width{bit_width}, addr{addr}, size{size}, type{type} {} + + constexpr std::size_t GetWidth() const { + return bit_width; + } + constexpr std::size_t GetAddress() const { + return addr; + } + constexpr std::size_t GetSize() const { + return size; + } + constexpr Type GetType() const { + return type; + } +}; + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/memory_block.h b/src/core/hle/kernel/memory/memory_block.h new file mode 100644 index 000000000..e11043b60 --- /dev/null +++ b/src/core/hle/kernel/memory/memory_block.h @@ -0,0 +1,318 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphère, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX. + +#pragma once + +#include "common/alignment.h" +#include "common/assert.h" +#include "common/common_types.h" +#include "core/hle/kernel/memory/memory_types.h" +#include "core/hle/kernel/svc_types.h" + +namespace Kernel::Memory { + +enum class MemoryState : u32 { + None = 0, + Mask = 0xFFFFFFFF, // TODO(bunnei): This should probable be 0xFF + All = ~None, + + FlagCanReprotect = (1 << 8), + FlagCanDebug = (1 << 9), + FlagCanUseIpc = (1 << 10), + FlagCanUseNonDeviceIpc = (1 << 11), + FlagCanUseNonSecureIpc = (1 << 12), + FlagMapped = (1 << 13), + FlagCode = (1 << 14), + FlagCanAlias = (1 << 15), + FlagCanCodeAlias = (1 << 16), + FlagCanTransfer = (1 << 17), + FlagCanQueryPhysical = (1 << 18), + FlagCanDeviceMap = (1 << 19), + FlagCanAlignedDeviceMap = (1 << 20), + FlagCanIpcUserBuffer = (1 << 21), + FlagReferenceCounted = (1 << 22), + FlagCanMapProcess = (1 << 23), + FlagCanChangeAttribute = (1 << 24), + FlagCanCodeMemory = (1 << 25), + + FlagsData = FlagCanReprotect | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | + FlagMapped | FlagCanAlias | FlagCanTransfer | FlagCanQueryPhysical | + FlagCanDeviceMap | FlagCanAlignedDeviceMap | FlagCanIpcUserBuffer | + FlagReferenceCounted | FlagCanChangeAttribute, + + FlagsCode = FlagCanDebug | FlagCanUseIpc | FlagCanUseNonDeviceIpc | FlagCanUseNonSecureIpc | + FlagMapped | FlagCode | FlagCanQueryPhysical | FlagCanDeviceMap | + FlagCanAlignedDeviceMap | FlagReferenceCounted, + + FlagsMisc = FlagMapped | FlagReferenceCounted | FlagCanQueryPhysical | FlagCanDeviceMap, + + Free = static_cast<u32>(Svc::MemoryState::Free), + Io = static_cast<u32>(Svc::MemoryState::Io) | FlagMapped, + Static = static_cast<u32>(Svc::MemoryState::Static) | FlagMapped | FlagCanQueryPhysical, + Code = static_cast<u32>(Svc::MemoryState::Code) | FlagsCode | FlagCanMapProcess, + CodeData = static_cast<u32>(Svc::MemoryState::CodeData) | FlagsData | FlagCanMapProcess | + FlagCanCodeMemory, + Shared = static_cast<u32>(Svc::MemoryState::Shared) | FlagMapped | FlagReferenceCounted, + Normal = static_cast<u32>(Svc::MemoryState::Normal) | FlagsData | FlagCanCodeMemory, + + AliasCode = static_cast<u32>(Svc::MemoryState::AliasCode) | FlagsCode | FlagCanMapProcess | + FlagCanCodeAlias, + AliasCodeData = static_cast<u32>(Svc::MemoryState::AliasCodeData) | FlagsData | + FlagCanMapProcess | FlagCanCodeAlias | FlagCanCodeMemory, + + Ipc = static_cast<u32>(Svc::MemoryState::Ipc) | FlagsMisc | FlagCanAlignedDeviceMap | + FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, + + Stack = static_cast<u32>(Svc::MemoryState::Stack) | FlagsMisc | FlagCanAlignedDeviceMap | + FlagCanUseIpc | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, + + ThreadLocal = + static_cast<u32>(Svc::MemoryState::ThreadLocal) | FlagMapped | FlagReferenceCounted, + + Transfered = static_cast<u32>(Svc::MemoryState::Transfered) | FlagsMisc | + FlagCanAlignedDeviceMap | FlagCanChangeAttribute | FlagCanUseIpc | + FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, + + SharedTransfered = static_cast<u32>(Svc::MemoryState::SharedTransfered) | FlagsMisc | + FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, + + SharedCode = static_cast<u32>(Svc::MemoryState::SharedCode) | FlagMapped | + FlagReferenceCounted | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, + + Inaccessible = static_cast<u32>(Svc::MemoryState::Inaccessible), + + NonSecureIpc = static_cast<u32>(Svc::MemoryState::NonSecureIpc) | FlagsMisc | + FlagCanAlignedDeviceMap | FlagCanUseNonSecureIpc | FlagCanUseNonDeviceIpc, + + NonDeviceIpc = + static_cast<u32>(Svc::MemoryState::NonDeviceIpc) | FlagsMisc | FlagCanUseNonDeviceIpc, + + Kernel = static_cast<u32>(Svc::MemoryState::Kernel) | FlagMapped, + + GeneratedCode = static_cast<u32>(Svc::MemoryState::GeneratedCode) | FlagMapped | + FlagReferenceCounted | FlagCanDebug, + CodeOut = static_cast<u32>(Svc::MemoryState::CodeOut) | FlagMapped | FlagReferenceCounted, +}; +DECLARE_ENUM_FLAG_OPERATORS(MemoryState); + +static_assert(static_cast<u32>(MemoryState::Free) == 0x00000000); +static_assert(static_cast<u32>(MemoryState::Io) == 0x00002001); +static_assert(static_cast<u32>(MemoryState::Static) == 0x00042002); +static_assert(static_cast<u32>(MemoryState::Code) == 0x00DC7E03); +static_assert(static_cast<u32>(MemoryState::CodeData) == 0x03FEBD04); +static_assert(static_cast<u32>(MemoryState::Normal) == 0x037EBD05); +static_assert(static_cast<u32>(MemoryState::Shared) == 0x00402006); +static_assert(static_cast<u32>(MemoryState::AliasCode) == 0x00DD7E08); +static_assert(static_cast<u32>(MemoryState::AliasCodeData) == 0x03FFBD09); +static_assert(static_cast<u32>(MemoryState::Ipc) == 0x005C3C0A); +static_assert(static_cast<u32>(MemoryState::Stack) == 0x005C3C0B); +static_assert(static_cast<u32>(MemoryState::ThreadLocal) == 0x0040200C); +static_assert(static_cast<u32>(MemoryState::Transfered) == 0x015C3C0D); +static_assert(static_cast<u32>(MemoryState::SharedTransfered) == 0x005C380E); +static_assert(static_cast<u32>(MemoryState::SharedCode) == 0x0040380F); +static_assert(static_cast<u32>(MemoryState::Inaccessible) == 0x00000010); +static_assert(static_cast<u32>(MemoryState::NonSecureIpc) == 0x005C3811); +static_assert(static_cast<u32>(MemoryState::NonDeviceIpc) == 0x004C2812); +static_assert(static_cast<u32>(MemoryState::Kernel) == 0x00002013); +static_assert(static_cast<u32>(MemoryState::GeneratedCode) == 0x00402214); +static_assert(static_cast<u32>(MemoryState::CodeOut) == 0x00402015); + +enum class MemoryPermission : u8 { + None = 0, + Mask = static_cast<u8>(~None), + + Read = 1 << 0, + Write = 1 << 1, + Execute = 1 << 2, + + ReadAndWrite = Read | Write, + ReadAndExecute = Read | Execute, + + UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write | + Svc::MemoryPermission::Execute), +}; +DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission); + +enum class MemoryAttribute : u8 { + None = 0x00, + Mask = 0x7F, + All = Mask, + DontCareMask = 0x80, + + Locked = static_cast<u8>(Svc::MemoryAttribute::Locked), + IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked), + DeviceShared = static_cast<u8>(Svc::MemoryAttribute::DeviceShared), + Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached), + + IpcAndDeviceMapped = IpcLocked | DeviceShared, + LockedAndIpcLocked = Locked | IpcLocked, + DeviceSharedAndUncached = DeviceShared | Uncached +}; +DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute); + +static_assert((static_cast<u8>(MemoryAttribute::Mask) & + static_cast<u8>(MemoryAttribute::DontCareMask)) == 0); + +struct MemoryInfo { + VAddr addr{}; + std::size_t size{}; + MemoryState state{}; + MemoryPermission perm{}; + MemoryAttribute attribute{}; + MemoryPermission original_perm{}; + u16 ipc_lock_count{}; + u16 device_use_count{}; + + constexpr Svc::MemoryInfo GetSvcMemoryInfo() const { + return { + addr, + size, + static_cast<Svc::MemoryState>(state & MemoryState::Mask), + static_cast<Svc::MemoryAttribute>(attribute & MemoryAttribute::Mask), + static_cast<Svc::MemoryPermission>(perm & MemoryPermission::UserMask), + ipc_lock_count, + device_use_count, + }; + } + + constexpr VAddr GetAddress() const { + return addr; + } + constexpr std::size_t GetSize() const { + return size; + } + constexpr std::size_t GetNumPages() const { + return GetSize() / PageSize; + } + constexpr VAddr GetEndAddress() const { + return GetAddress() + GetSize(); + } + constexpr VAddr GetLastAddress() const { + return GetEndAddress() - 1; + } +}; + +class MemoryBlock final { + friend class MemoryBlockManager; + +private: + VAddr addr{}; + std::size_t num_pages{}; + MemoryState state{MemoryState::None}; + u16 ipc_lock_count{}; + u16 device_use_count{}; + MemoryPermission perm{MemoryPermission::None}; + MemoryPermission original_perm{MemoryPermission::None}; + MemoryAttribute attribute{MemoryAttribute::None}; + +public: + static constexpr int Compare(const MemoryBlock& lhs, const MemoryBlock& rhs) { + if (lhs.GetAddress() < rhs.GetAddress()) { + return -1; + } else if (lhs.GetAddress() <= rhs.GetLastAddress()) { + return 0; + } else { + return 1; + } + } + +public: + constexpr MemoryBlock() = default; + constexpr MemoryBlock(VAddr addr, std::size_t num_pages, MemoryState state, + MemoryPermission perm, MemoryAttribute attribute) + : addr{addr}, num_pages(num_pages), state{state}, perm{perm}, attribute{attribute} {} + + constexpr VAddr GetAddress() const { + return addr; + } + + constexpr std::size_t GetNumPages() const { + return num_pages; + } + + constexpr std::size_t GetSize() const { + return GetNumPages() * PageSize; + } + + constexpr VAddr GetEndAddress() const { + return GetAddress() + GetSize(); + } + + constexpr VAddr GetLastAddress() const { + return GetEndAddress() - 1; + } + + constexpr MemoryInfo GetMemoryInfo() const { + return { + GetAddress(), GetSize(), state, perm, + attribute, original_perm, ipc_lock_count, device_use_count, + }; + } + +private: + constexpr bool HasProperties(MemoryState s, MemoryPermission p, MemoryAttribute a) const { + constexpr MemoryAttribute AttributeIgnoreMask{MemoryAttribute::DontCareMask | + MemoryAttribute::IpcLocked | + MemoryAttribute::DeviceShared}; + return state == s && perm == p && + (attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask); + } + + constexpr bool HasSameProperties(const MemoryBlock& rhs) const { + return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm && + attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count && + device_use_count == rhs.device_use_count; + } + + constexpr bool Contains(VAddr start) const { + return GetAddress() <= start && start <= GetEndAddress(); + } + + constexpr void Add(std::size_t count) { + ASSERT(count > 0); + ASSERT(GetAddress() + count * PageSize - 1 < GetEndAddress() + count * PageSize - 1); + + num_pages += count; + } + + constexpr void Update(MemoryState new_state, MemoryPermission new_perm, + MemoryAttribute new_attribute) { + ASSERT(original_perm == MemoryPermission::None); + ASSERT((attribute & MemoryAttribute::IpcLocked) == MemoryAttribute::None); + + state = new_state; + perm = new_perm; + + // TODO(bunnei): Is this right? + attribute = static_cast<MemoryAttribute>( + new_attribute /*| (attribute & (MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared))*/); + } + + constexpr MemoryBlock Split(VAddr split_addr) { + ASSERT(GetAddress() < split_addr); + ASSERT(Contains(split_addr)); + ASSERT(Common::IsAligned(split_addr, PageSize)); + + MemoryBlock block; + block.addr = addr; + block.num_pages = (split_addr - GetAddress()) / PageSize; + block.state = state; + block.ipc_lock_count = ipc_lock_count; + block.device_use_count = device_use_count; + block.perm = perm; + block.original_perm = original_perm; + block.attribute = attribute; + + addr = split_addr; + num_pages -= block.num_pages; + + return block; + } +}; +static_assert(std::is_trivially_destructible<MemoryBlock>::value); + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/memory_block_manager.cpp b/src/core/hle/kernel/memory/memory_block_manager.cpp new file mode 100644 index 000000000..1ebc126c0 --- /dev/null +++ b/src/core/hle/kernel/memory/memory_block_manager.cpp @@ -0,0 +1,190 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/kernel/memory/memory_block_manager.h" +#include "core/hle/kernel/memory/memory_types.h" + +namespace Kernel::Memory { + +MemoryBlockManager::MemoryBlockManager(VAddr start_addr, VAddr end_addr) + : start_addr{start_addr}, end_addr{end_addr} { + const u64 num_pages{(end_addr - start_addr) / PageSize}; + memory_block_tree.emplace_back(start_addr, num_pages, MemoryState::Free, MemoryPermission::None, + MemoryAttribute::None); +} + +MemoryBlockManager::iterator MemoryBlockManager::FindIterator(VAddr addr) { + auto node{memory_block_tree.begin()}; + while (node != end()) { + const VAddr end_addr{node->GetNumPages() * PageSize + node->GetAddress()}; + if (node->GetAddress() <= addr && end_addr - 1 >= addr) { + return node; + } + node = std::next(node); + } + return end(); +} + +VAddr MemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages, + std::size_t num_pages, std::size_t align, std::size_t offset, + std::size_t guard_pages) { + if (num_pages == 0) { + return {}; + } + + const VAddr region_end{region_start + region_num_pages * PageSize}; + const VAddr region_last{region_end - 1}; + for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) { + const auto info{it->GetMemoryInfo()}; + if (region_last < info.GetAddress()) { + break; + } + + if (info.state != MemoryState::Free) { + continue; + } + + VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()}; + area += guard_pages * PageSize; + + const VAddr offset_area{Common::AlignDown(area, align) + offset}; + area = (area <= offset_area) ? offset_area : offset_area + align; + + const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize}; + const VAddr area_last{area_end - 1}; + + if (info.GetAddress() <= area && area < area_last && area_last <= region_last && + area_last <= info.GetLastAddress()) { + return area; + } + } + + return {}; +} + +void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState prev_state, + MemoryPermission prev_perm, MemoryAttribute prev_attribute, + MemoryState state, MemoryPermission perm, + MemoryAttribute attribute) { + const std::size_t prev_count{memory_block_tree.size()}; + const VAddr end_addr{addr + num_pages * PageSize}; + iterator node{memory_block_tree.begin()}; + + prev_attribute |= MemoryAttribute::IpcAndDeviceMapped; + + while (node != memory_block_tree.end()) { + MemoryBlock* block{&(*node)}; + iterator next_node{std::next(node)}; + const VAddr cur_addr{block->GetAddress()}; + const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; + + if (addr < cur_end_addr && cur_addr < end_addr) { + if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) { + node = next_node; + continue; + } + + iterator new_node{node}; + if (addr > cur_addr) { + memory_block_tree.insert(node, block->Split(addr)); + } + + if (end_addr < cur_end_addr) { + new_node = memory_block_tree.insert(node, block->Split(end_addr)); + } + + new_node->Update(state, perm, attribute); + + MergeAdjacent(new_node, next_node); + } + + if (cur_end_addr - 1 >= end_addr - 1) { + break; + } + + node = next_node; + } +} + +void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState state, + MemoryPermission perm, MemoryAttribute attribute) { + const std::size_t prev_count{memory_block_tree.size()}; + const VAddr end_addr{addr + num_pages * PageSize}; + iterator node{memory_block_tree.begin()}; + + while (node != memory_block_tree.end()) { + MemoryBlock* block{&(*node)}; + iterator next_node{std::next(node)}; + const VAddr cur_addr{block->GetAddress()}; + const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr}; + + if (addr < cur_end_addr && cur_addr < end_addr) { + iterator new_node{node}; + + if (addr > cur_addr) { + memory_block_tree.insert(node, block->Split(addr)); + } + + if (end_addr < cur_end_addr) { + new_node = memory_block_tree.insert(node, block->Split(end_addr)); + } + + new_node->Update(state, perm, attribute); + + MergeAdjacent(new_node, next_node); + } + + if (cur_end_addr - 1 >= end_addr - 1) { + break; + } + + node = next_node; + } +} + +void MemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) { + const_iterator it{FindIterator(start)}; + MemoryInfo info{}; + do { + info = it->GetMemoryInfo(); + func(info); + it = std::next(it); + } while (info.addr + info.size - 1 < end - 1 && it != cend()); +} + +void MemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) { + MemoryBlock* block{&(*it)}; + + auto EraseIt = [&](const iterator it_to_erase) { + if (next_it == it_to_erase) { + next_it = std::next(next_it); + } + memory_block_tree.erase(it_to_erase); + }; + + if (it != memory_block_tree.begin()) { + MemoryBlock* prev{&(*std::prev(it))}; + + if (block->HasSameProperties(*prev)) { + const iterator prev_it{std::prev(it)}; + + prev->Add(block->GetNumPages()); + EraseIt(it); + + it = prev_it; + block = prev; + } + } + + if (it != cend()) { + const MemoryBlock* const next{&(*std::next(it))}; + + if (block->HasSameProperties(*next)) { + block->Add(next->GetNumPages()); + EraseIt(std::next(it)); + } + } +} + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/memory_block_manager.h b/src/core/hle/kernel/memory/memory_block_manager.h new file mode 100644 index 000000000..0f2270f0f --- /dev/null +++ b/src/core/hle/kernel/memory/memory_block_manager.h @@ -0,0 +1,64 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <functional> +#include <list> +#include <memory> + +#include "common/common_types.h" +#include "core/hle/kernel/memory/memory_block.h" + +namespace Kernel::Memory { + +class MemoryBlockManager final { +public: + using MemoryBlockTree = std::list<MemoryBlock>; + using iterator = MemoryBlockTree::iterator; + using const_iterator = MemoryBlockTree::const_iterator; + +public: + MemoryBlockManager(VAddr start_addr, VAddr end_addr); + + iterator end() { + return memory_block_tree.end(); + } + const_iterator end() const { + return memory_block_tree.end(); + } + const_iterator cend() const { + return memory_block_tree.cend(); + } + + iterator FindIterator(VAddr addr); + + VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages, + std::size_t align, std::size_t offset, std::size_t guard_pages); + + void Update(VAddr addr, std::size_t num_pages, MemoryState prev_state, + MemoryPermission prev_perm, MemoryAttribute prev_attribute, MemoryState state, + MemoryPermission perm, MemoryAttribute attribute); + + void Update(VAddr addr, std::size_t num_pages, MemoryState state, + MemoryPermission perm = MemoryPermission::None, + MemoryAttribute attribute = MemoryAttribute::None); + + using IterateFunc = std::function<void(const MemoryInfo&)>; + void IterateForRange(VAddr start, VAddr end, IterateFunc&& func); + + MemoryBlock& FindBlock(VAddr addr) { + return *FindIterator(addr); + } + +private: + void MergeAdjacent(iterator it, iterator& next_it); + + const VAddr start_addr; + const VAddr end_addr; + + MemoryBlockTree memory_block_tree; +}; + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/memory_layout.h b/src/core/hle/kernel/memory/memory_layout.h new file mode 100644 index 000000000..830c6f0d7 --- /dev/null +++ b/src/core/hle/kernel/memory/memory_layout.h @@ -0,0 +1,73 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_types.h" + +namespace Kernel::Memory { + +class MemoryRegion final { + friend class MemoryLayout; + +public: + constexpr PAddr StartAddress() const { + return start_address; + } + + constexpr PAddr EndAddress() const { + return end_address; + } + +private: + constexpr MemoryRegion() = default; + constexpr MemoryRegion(PAddr start_address, PAddr end_address) + : start_address{start_address}, end_address{end_address} {} + + const PAddr start_address{}; + const PAddr end_address{}; +}; + +class MemoryLayout final { +public: + constexpr const MemoryRegion& Application() const { + return application; + } + + constexpr const MemoryRegion& Applet() const { + return applet; + } + + constexpr const MemoryRegion& System() const { + return system; + } + + static constexpr MemoryLayout GetDefaultLayout() { + constexpr std::size_t application_size{0xcd500000}; + constexpr std::size_t applet_size{0x1fb00000}; + constexpr PAddr application_start_address{Core::DramMemoryMap::End - application_size}; + constexpr PAddr application_end_address{Core::DramMemoryMap::End}; + constexpr PAddr applet_start_address{application_start_address - applet_size}; + constexpr PAddr applet_end_address{applet_start_address + applet_size}; + constexpr PAddr system_start_address{Core::DramMemoryMap::SlabHeapEnd}; + constexpr PAddr system_end_address{applet_start_address}; + return {application_start_address, application_end_address, applet_start_address, + applet_end_address, system_start_address, system_end_address}; + } + +private: + constexpr MemoryLayout(PAddr application_start_address, std::size_t application_size, + PAddr applet_start_address, std::size_t applet_size, + PAddr system_start_address, std::size_t system_size) + : application{application_start_address, application_size}, + applet{applet_start_address, applet_size}, system{system_start_address, system_size} {} + + const MemoryRegion application; + const MemoryRegion applet; + const MemoryRegion system; + + const PAddr start_address{}; +}; + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/memory_manager.cpp b/src/core/hle/kernel/memory/memory_manager.cpp new file mode 100644 index 000000000..3cd4f9e85 --- /dev/null +++ b/src/core/hle/kernel/memory/memory_manager.cpp @@ -0,0 +1,176 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include <algorithm> + +#include "common/alignment.h" +#include "common/assert.h" +#include "common/common_types.h" +#include "common/scope_exit.h" +#include "core/hle/kernel/errors.h" +#include "core/hle/kernel/memory/memory_manager.h" +#include "core/hle/kernel/memory/page_linked_list.h" + +namespace Kernel::Memory { + +std::size_t MemoryManager::Impl::Initialize(Pool new_pool, u64 start_address, u64 end_address) { + const auto size{end_address - start_address}; + + // Calculate metadata sizes + const auto ref_count_size{(size / PageSize) * sizeof(u16)}; + const auto optimize_map_size{(Common::AlignUp((size / PageSize), 64) / 64) * sizeof(u64)}; + const auto manager_size{Common::AlignUp(optimize_map_size + ref_count_size, PageSize)}; + const auto page_heap_size{PageHeap::CalculateMetadataOverheadSize(size)}; + const auto total_metadata_size{manager_size + page_heap_size}; + ASSERT(manager_size <= total_metadata_size); + ASSERT(Common::IsAligned(total_metadata_size, PageSize)); + + // Setup region + pool = new_pool; + + // Initialize the manager's KPageHeap + heap.Initialize(start_address, size, page_heap_size); + + // Free the memory to the heap + heap.Free(start_address, size / PageSize); + + // Update the heap's used size + heap.UpdateUsedSize(); + + return total_metadata_size; +} + +void MemoryManager::InitializeManager(Pool pool, u64 start_address, u64 end_address) { + ASSERT(pool < Pool::Count); + managers[static_cast<std::size_t>(pool)].Initialize(pool, start_address, end_address); +} + +VAddr MemoryManager::AllocateContinuous(std::size_t num_pages, std::size_t align_pages, Pool pool, + Direction dir) { + // Early return if we're allocating no pages + if (num_pages == 0) { + return {}; + } + + // Lock the pool that we're allocating from + const auto pool_index{static_cast<std::size_t>(pool)}; + std::lock_guard lock{pool_locks[pool_index]}; + + // Choose a heap based on our page size request + const s32 heap_index{PageHeap::GetAlignedBlockIndex(num_pages, align_pages)}; + + // Loop, trying to iterate from each block + // TODO (bunnei): Support multiple managers + Impl& chosen_manager{managers[pool_index]}; + VAddr allocated_block{chosen_manager.AllocateBlock(heap_index)}; + + // If we failed to allocate, quit now + if (!allocated_block) { + return {}; + } + + // If we allocated more than we need, free some + const auto allocated_pages{PageHeap::GetBlockNumPages(heap_index)}; + if (allocated_pages > num_pages) { + chosen_manager.Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages); + } + + return allocated_block; +} + +ResultCode MemoryManager::Allocate(PageLinkedList& page_list, std::size_t num_pages, Pool pool, + Direction dir) { + ASSERT(page_list.GetNumPages() == 0); + + // Early return if we're allocating no pages + if (num_pages == 0) { + return RESULT_SUCCESS; + } + + // Lock the pool that we're allocating from + const auto pool_index{static_cast<std::size_t>(pool)}; + std::lock_guard lock{pool_locks[pool_index]}; + + // Choose a heap based on our page size request + const s32 heap_index{PageHeap::GetBlockIndex(num_pages)}; + if (heap_index < 0) { + return ERR_OUT_OF_MEMORY; + } + + // TODO (bunnei): Support multiple managers + Impl& chosen_manager{managers[pool_index]}; + + // Ensure that we don't leave anything un-freed + auto group_guard = detail::ScopeExit([&] { + for (const auto& it : page_list.Nodes()) { + const auto num_pages{std::min( + it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)}; + chosen_manager.Free(it.GetAddress(), num_pages); + } + }); + + // Keep allocating until we've allocated all our pages + for (s32 index{heap_index}; index >= 0 && num_pages > 0; index--) { + const auto pages_per_alloc{PageHeap::GetBlockNumPages(index)}; + + while (num_pages >= pages_per_alloc) { + // Allocate a block + VAddr allocated_block{chosen_manager.AllocateBlock(index)}; + if (!allocated_block) { + break; + } + + // Safely add it to our group + { + auto block_guard = detail::ScopeExit( + [&] { chosen_manager.Free(allocated_block, pages_per_alloc); }); + + if (const ResultCode result{page_list.AddBlock(allocated_block, pages_per_alloc)}; + result.IsError()) { + return result; + } + + block_guard.Cancel(); + } + + num_pages -= pages_per_alloc; + } + } + + // Only succeed if we allocated as many pages as we wanted + ASSERT(num_pages >= 0); + if (num_pages) { + return ERR_OUT_OF_MEMORY; + } + + // We succeeded! + group_guard.Cancel(); + return RESULT_SUCCESS; +} + +ResultCode MemoryManager::Free(PageLinkedList& page_list, std::size_t num_pages, Pool pool, + Direction dir) { + // Early return if we're freeing no pages + if (!num_pages) { + return RESULT_SUCCESS; + } + + // Lock the pool that we're freeing from + const auto pool_index{static_cast<std::size_t>(pool)}; + std::lock_guard lock{pool_locks[pool_index]}; + + // TODO (bunnei): Support multiple managers + Impl& chosen_manager{managers[pool_index]}; + + // Free all of the pages + for (const auto& it : page_list.Nodes()) { + const auto num_pages{std::min( + it.GetNumPages(), (chosen_manager.GetEndAddress() - it.GetAddress()) / PageSize)}; + chosen_manager.Free(it.GetAddress(), num_pages); + } + + return RESULT_SUCCESS; +} + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/memory_manager.h b/src/core/hle/kernel/memory/memory_manager.h new file mode 100644 index 000000000..b078d7a5e --- /dev/null +++ b/src/core/hle/kernel/memory/memory_manager.h @@ -0,0 +1,97 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <array> +#include <mutex> + +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "core/hle/kernel/memory/page_heap.h" +#include "core/hle/result.h" + +namespace Kernel::Memory { + +class PageLinkedList; + +class MemoryManager final : NonCopyable { +public: + enum class Pool : u32 { + Application = 0, + Applet = 1, + System = 2, + SystemNonSecure = 3, + + Count, + + Shift = 4, + Mask = (0xF << Shift), + }; + + enum class Direction : u32 { + FromFront = 0, + FromBack = 1, + + Shift = 0, + Mask = (0xF << Shift), + }; + + MemoryManager() = default; + + constexpr std::size_t GetSize(Pool pool) const { + return managers[static_cast<std::size_t>(pool)].GetSize(); + } + + void InitializeManager(Pool pool, u64 start_address, u64 end_address); + VAddr AllocateContinuous(std::size_t num_pages, std::size_t align_pages, Pool pool, + Direction dir = Direction::FromFront); + ResultCode Allocate(PageLinkedList& page_list, std::size_t num_pages, Pool pool, + Direction dir = Direction::FromFront); + ResultCode Free(PageLinkedList& page_list, std::size_t num_pages, Pool pool, + Direction dir = Direction::FromFront); + + static constexpr std::size_t MaxManagerCount = 10; + +private: + class Impl final : NonCopyable { + private: + using RefCount = u16; + + private: + PageHeap heap; + Pool pool{}; + + public: + Impl() = default; + + std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address); + + VAddr AllocateBlock(s32 index) { + return heap.AllocateBlock(index); + } + + void Free(VAddr addr, std::size_t num_pages) { + heap.Free(addr, num_pages); + } + + constexpr std::size_t GetSize() const { + return heap.GetSize(); + } + + constexpr VAddr GetAddress() const { + return heap.GetAddress(); + } + + constexpr VAddr GetEndAddress() const { + return heap.GetEndAddress(); + } + }; + +private: + std::array<std::mutex, static_cast<std::size_t>(Pool::Count)> pool_locks; + std::array<Impl, MaxManagerCount> managers; +}; + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/memory_types.h b/src/core/hle/kernel/memory/memory_types.h new file mode 100644 index 000000000..a75bf77c0 --- /dev/null +++ b/src/core/hle/kernel/memory/memory_types.h @@ -0,0 +1,18 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <array> + +#include "common/common_types.h" + +namespace Kernel::Memory { + +constexpr std::size_t PageBits{12}; +constexpr std::size_t PageSize{1 << PageBits}; + +using Page = std::array<u8, PageSize>; + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/page_heap.cpp b/src/core/hle/kernel/memory/page_heap.cpp new file mode 100644 index 000000000..efcbb3cad --- /dev/null +++ b/src/core/hle/kernel/memory/page_heap.cpp @@ -0,0 +1,119 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphère, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX. + +#include "core/core.h" +#include "core/hle/kernel/memory/page_heap.h" +#include "core/memory.h" + +namespace Kernel::Memory { + +void PageHeap::Initialize(VAddr address, std::size_t size, std::size_t metadata_size) { + // Check our assumptions + ASSERT(Common::IsAligned((address), PageSize)); + ASSERT(Common::IsAligned(size, PageSize)); + + // Set our members + heap_address = address; + heap_size = size; + + // Setup bitmaps + metadata.resize(metadata_size / sizeof(u64)); + u64* cur_bitmap_storage{metadata.data()}; + for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) { + const std::size_t cur_block_shift{MemoryBlockPageShifts[i]}; + const std::size_t next_block_shift{ + (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0}; + cur_bitmap_storage = blocks[i].Initialize(heap_address, heap_size, cur_block_shift, + next_block_shift, cur_bitmap_storage); + } +} + +VAddr PageHeap::AllocateBlock(s32 index) { + const std::size_t needed_size{blocks[index].GetSize()}; + + for (s32 i{index}; i < static_cast<s32>(MemoryBlockPageShifts.size()); i++) { + if (const VAddr addr{blocks[i].PopBlock()}; addr) { + if (const std::size_t allocated_size{blocks[i].GetSize()}; + allocated_size > needed_size) { + Free(addr + needed_size, (allocated_size - needed_size) / PageSize); + } + return addr; + } + } + + return 0; +} + +void PageHeap::FreeBlock(VAddr block, s32 index) { + do { + block = blocks[index++].PushBlock(block); + } while (block != 0); +} + +void PageHeap::Free(VAddr addr, std::size_t num_pages) { + // Freeing no pages is a no-op + if (num_pages == 0) { + return; + } + + // Find the largest block size that we can free, and free as many as possible + s32 big_index{static_cast<s32>(MemoryBlockPageShifts.size()) - 1}; + const VAddr start{addr}; + const VAddr end{(num_pages * PageSize) + addr}; + VAddr before_start{start}; + VAddr before_end{start}; + VAddr after_start{end}; + VAddr after_end{end}; + while (big_index >= 0) { + const std::size_t block_size{blocks[big_index].GetSize()}; + const VAddr big_start{Common::AlignUp((start), block_size)}; + const VAddr big_end{Common::AlignDown((end), block_size)}; + if (big_start < big_end) { + // Free as many big blocks as we can + for (auto block{big_start}; block < big_end; block += block_size) { + FreeBlock(block, big_index); + } + before_end = big_start; + after_start = big_end; + break; + } + big_index--; + } + ASSERT(big_index >= 0); + + // Free space before the big blocks + for (s32 i{big_index - 1}; i >= 0; i--) { + const std::size_t block_size{blocks[i].GetSize()}; + while (before_start + block_size <= before_end) { + before_end -= block_size; + FreeBlock(before_end, i); + } + } + + // Free space after the big blocks + for (s32 i{big_index - 1}; i >= 0; i--) { + const std::size_t block_size{blocks[i].GetSize()}; + while (after_start + block_size <= after_end) { + FreeBlock(after_start, i); + after_start += block_size; + } + } +} + +std::size_t PageHeap::CalculateMetadataOverheadSize(std::size_t region_size) { + std::size_t overhead_size = 0; + for (std::size_t i = 0; i < MemoryBlockPageShifts.size(); i++) { + const std::size_t cur_block_shift{MemoryBlockPageShifts[i]}; + const std::size_t next_block_shift{ + (i != MemoryBlockPageShifts.size() - 1) ? MemoryBlockPageShifts[i + 1] : 0}; + overhead_size += PageHeap::Block::CalculateMetadataOverheadSize( + region_size, cur_block_shift, next_block_shift); + } + return Common::AlignUp(overhead_size, PageSize); +} + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/page_heap.h b/src/core/hle/kernel/memory/page_heap.h new file mode 100644 index 000000000..380c3f5a1 --- /dev/null +++ b/src/core/hle/kernel/memory/page_heap.h @@ -0,0 +1,370 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphère, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX. + +#pragma once + +#include <array> +#include <vector> + +#include "common/alignment.h" +#include "common/assert.h" +#include "common/bit_util.h" +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "core/hle/kernel/memory/memory_types.h" + +namespace Kernel::Memory { + +class PageHeap final : NonCopyable { +public: + static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) { + const auto target_pages{std::max(num_pages, align_pages)}; + for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) { + if (target_pages <= + (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) { + return static_cast<s32>(i); + } + } + return -1; + } + + static constexpr s32 GetBlockIndex(std::size_t num_pages) { + for (s32 i{static_cast<s32>(NumMemoryBlockPageShifts) - 1}; i >= 0; i--) { + if (num_pages >= (static_cast<std::size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) { + return i; + } + } + return -1; + } + + static constexpr std::size_t GetBlockSize(std::size_t index) { + return static_cast<std::size_t>(1) << MemoryBlockPageShifts[index]; + } + + static constexpr std::size_t GetBlockNumPages(std::size_t index) { + return GetBlockSize(index) / PageSize; + } + +private: + static constexpr std::size_t NumMemoryBlockPageShifts{7}; + static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{ + 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E, + }; + + class Block final : NonCopyable { + private: + class Bitmap final : NonCopyable { + public: + static constexpr std::size_t MaxDepth{4}; + + private: + std::array<u64*, MaxDepth> bit_storages{}; + std::size_t num_bits{}; + std::size_t used_depths{}; + + public: + constexpr Bitmap() = default; + + constexpr std::size_t GetNumBits() const { + return num_bits; + } + constexpr s32 GetHighestDepthIndex() const { + return static_cast<s32>(used_depths) - 1; + } + + constexpr u64* Initialize(u64* storage, std::size_t size) { + //* Initially, everything is un-set + num_bits = 0; + + // Calculate the needed bitmap depth + used_depths = static_cast<std::size_t>(GetRequiredDepth(size)); + ASSERT(used_depths <= MaxDepth); + + // Set the bitmap pointers + for (s32 depth{GetHighestDepthIndex()}; depth >= 0; depth--) { + bit_storages[depth] = storage; + size = Common::AlignUp(size, 64) / 64; + storage += size; + } + + return storage; + } + + s64 FindFreeBlock() const { + uintptr_t offset{}; + s32 depth{}; + + do { + const u64 v{bit_storages[depth][offset]}; + if (v == 0) { + // Non-zero depth indicates that a previous level had a free block + ASSERT(depth == 0); + return -1; + } + offset = offset * 64 + Common::CountTrailingZeroes64(v); + ++depth; + } while (depth < static_cast<s32>(used_depths)); + + return static_cast<s64>(offset); + } + + constexpr void SetBit(std::size_t offset) { + SetBit(GetHighestDepthIndex(), offset); + num_bits++; + } + + constexpr void ClearBit(std::size_t offset) { + ClearBit(GetHighestDepthIndex(), offset); + num_bits--; + } + + constexpr bool ClearRange(std::size_t offset, std::size_t count) { + const s32 depth{GetHighestDepthIndex()}; + const auto bit_ind{offset / 64}; + u64* bits{bit_storages[depth]}; + if (count < 64) { + const auto shift{offset % 64}; + ASSERT(shift + count <= 64); + // Check that all the bits are set + const u64 mask{((1ULL << count) - 1) << shift}; + u64 v{bits[bit_ind]}; + if ((v & mask) != mask) { + return false; + } + + // Clear the bits + v &= ~mask; + bits[bit_ind] = v; + if (v == 0) { + ClearBit(depth - 1, bit_ind); + } + } else { + ASSERT(offset % 64 == 0); + ASSERT(count % 64 == 0); + // Check that all the bits are set + std::size_t remaining{count}; + std::size_t i = 0; + do { + if (bits[bit_ind + i++] != ~u64(0)) { + return false; + } + remaining -= 64; + } while (remaining > 0); + + // Clear the bits + remaining = count; + i = 0; + do { + bits[bit_ind + i] = 0; + ClearBit(depth - 1, bit_ind + i); + i++; + remaining -= 64; + } while (remaining > 0); + } + + num_bits -= count; + return true; + } + + private: + constexpr void SetBit(s32 depth, std::size_t offset) { + while (depth >= 0) { + const auto ind{offset / 64}; + const auto which{offset % 64}; + const u64 mask{1ULL << which}; + + u64* bit{std::addressof(bit_storages[depth][ind])}; + const u64 v{*bit}; + ASSERT((v & mask) == 0); + *bit = v | mask; + if (v) { + break; + } + offset = ind; + depth--; + } + } + + constexpr void ClearBit(s32 depth, std::size_t offset) { + while (depth >= 0) { + const auto ind{offset / 64}; + const auto which{offset % 64}; + const u64 mask{1ULL << which}; + + u64* bit{std::addressof(bit_storages[depth][ind])}; + u64 v{*bit}; + ASSERT((v & mask) != 0); + v &= ~mask; + *bit = v; + if (v) { + break; + } + offset = ind; + depth--; + } + } + + private: + static constexpr s32 GetRequiredDepth(std::size_t region_size) { + s32 depth = 0; + while (true) { + region_size /= 64; + depth++; + if (region_size == 0) { + return depth; + } + } + } + + public: + static constexpr std::size_t CalculateMetadataOverheadSize(std::size_t region_size) { + std::size_t overhead_bits = 0; + for (s32 depth{GetRequiredDepth(region_size) - 1}; depth >= 0; depth--) { + region_size = Common::AlignUp(region_size, 64) / 64; + overhead_bits += region_size; + } + return overhead_bits * sizeof(u64); + } + }; + + private: + Bitmap bitmap; + VAddr heap_address{}; + uintptr_t end_offset{}; + std::size_t block_shift{}; + std::size_t next_block_shift{}; + + public: + constexpr Block() = default; + + constexpr std::size_t GetShift() const { + return block_shift; + } + constexpr std::size_t GetNextShift() const { + return next_block_shift; + } + constexpr std::size_t GetSize() const { + return static_cast<std::size_t>(1) << GetShift(); + } + constexpr std::size_t GetNumPages() const { + return GetSize() / PageSize; + } + constexpr std::size_t GetNumFreeBlocks() const { + return bitmap.GetNumBits(); + } + constexpr std::size_t GetNumFreePages() const { + return GetNumFreeBlocks() * GetNumPages(); + } + + constexpr u64* Initialize(VAddr addr, std::size_t size, std::size_t bs, std::size_t nbs, + u64* bit_storage) { + // Set shifts + block_shift = bs; + next_block_shift = nbs; + + // Align up the address + VAddr end{addr + size}; + const auto align{(next_block_shift != 0) ? (1ULL << next_block_shift) + : (1ULL << block_shift)}; + addr = Common::AlignDown((addr), align); + end = Common::AlignUp((end), align); + + heap_address = addr; + end_offset = (end - addr) / (1ULL << block_shift); + return bitmap.Initialize(bit_storage, end_offset); + } + + constexpr VAddr PushBlock(VAddr address) { + // Set the bit for the free block + std::size_t offset{(address - heap_address) >> GetShift()}; + bitmap.SetBit(offset); + + // If we have a next shift, try to clear the blocks below and return the address + if (GetNextShift()) { + const auto diff{1ULL << (GetNextShift() - GetShift())}; + offset = Common::AlignDown(offset, diff); + if (bitmap.ClearRange(offset, diff)) { + return heap_address + (offset << GetShift()); + } + } + + // We couldn't coalesce, or we're already as big as possible + return 0; + } + + VAddr PopBlock() { + // Find a free block + const s64 soffset{bitmap.FindFreeBlock()}; + if (soffset < 0) { + return 0; + } + const auto offset{static_cast<std::size_t>(soffset)}; + + // Update our tracking and return it + bitmap.ClearBit(offset); + return heap_address + (offset << GetShift()); + } + + public: + static constexpr std::size_t CalculateMetadataOverheadSize(std::size_t region_size, + std::size_t cur_block_shift, + std::size_t next_block_shift) { + const auto cur_block_size{(1ULL << cur_block_shift)}; + const auto next_block_size{(1ULL << next_block_shift)}; + const auto align{(next_block_shift != 0) ? next_block_size : cur_block_size}; + return Bitmap::CalculateMetadataOverheadSize( + (align * 2 + Common::AlignUp(region_size, align)) / cur_block_size); + } + }; + +public: + PageHeap() = default; + + constexpr VAddr GetAddress() const { + return heap_address; + } + constexpr std::size_t GetSize() const { + return heap_size; + } + constexpr VAddr GetEndAddress() const { + return GetAddress() + GetSize(); + } + constexpr std::size_t GetPageOffset(VAddr block) const { + return (block - GetAddress()) / PageSize; + } + + void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size); + VAddr AllocateBlock(s32 index); + void Free(VAddr addr, std::size_t num_pages); + + void UpdateUsedSize() { + used_size = heap_size - (GetNumFreePages() * PageSize); + } + + static std::size_t CalculateMetadataOverheadSize(std::size_t region_size); + +private: + constexpr std::size_t GetNumFreePages() const { + std::size_t num_free{}; + + for (const auto& block : blocks) { + num_free += block.GetNumFreePages(); + } + + return num_free; + } + + void FreeBlock(VAddr block, s32 index); + + VAddr heap_address{}; + std::size_t heap_size{}; + std::size_t used_size{}; + std::array<Block, NumMemoryBlockPageShifts> blocks{}; + std::vector<u64> metadata; +}; + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/page_linked_list.h b/src/core/hle/kernel/memory/page_linked_list.h new file mode 100644 index 000000000..0668d00c6 --- /dev/null +++ b/src/core/hle/kernel/memory/page_linked_list.h @@ -0,0 +1,93 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <list> + +#include "common/assert.h" +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "core/hle/kernel/memory/memory_types.h" +#include "core/hle/result.h" + +namespace Kernel::Memory { + +class PageLinkedList final { +public: + class Node final { + public: + constexpr Node(u64 addr, std::size_t num_pages) : addr{addr}, num_pages{num_pages} {} + + constexpr u64 GetAddress() const { + return addr; + } + + constexpr std::size_t GetNumPages() const { + return num_pages; + } + + private: + u64 addr{}; + std::size_t num_pages{}; + }; + +public: + PageLinkedList() = default; + PageLinkedList(u64 address, u64 num_pages) { + ASSERT(AddBlock(address, num_pages).IsSuccess()); + } + + constexpr std::list<Node>& Nodes() { + return nodes; + } + + constexpr const std::list<Node>& Nodes() const { + return nodes; + } + + std::size_t GetNumPages() const { + std::size_t num_pages = 0; + for (const Node& node : nodes) { + num_pages += node.GetNumPages(); + } + return num_pages; + } + + bool IsEqual(PageLinkedList& other) const { + auto this_node = nodes.begin(); + auto other_node = other.nodes.begin(); + while (this_node != nodes.end() && other_node != other.nodes.end()) { + if (this_node->GetAddress() != other_node->GetAddress() || + this_node->GetNumPages() != other_node->GetNumPages()) { + return false; + } + this_node = std::next(this_node); + other_node = std::next(other_node); + } + + return this_node == nodes.end() && other_node == other.nodes.end(); + } + + ResultCode AddBlock(u64 address, u64 num_pages) { + if (!num_pages) { + return RESULT_SUCCESS; + } + if (!nodes.empty()) { + const auto node = nodes.back(); + if (node.GetAddress() + node.GetNumPages() * PageSize == address) { + address = node.GetAddress(); + num_pages += node.GetNumPages(); + nodes.pop_back(); + } + } + nodes.push_back({address, num_pages}); + return RESULT_SUCCESS; + } + +private: + std::list<Node> nodes; +}; + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp new file mode 100644 index 000000000..091e52ca4 --- /dev/null +++ b/src/core/hle/kernel/memory/page_table.cpp @@ -0,0 +1,1130 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/alignment.h" +#include "common/assert.h" +#include "common/scope_exit.h" +#include "core/core.h" +#include "core/device_memory.h" +#include "core/hle/kernel/errors.h" +#include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/memory/address_space_info.h" +#include "core/hle/kernel/memory/memory_block.h" +#include "core/hle/kernel/memory/memory_block_manager.h" +#include "core/hle/kernel/memory/page_linked_list.h" +#include "core/hle/kernel/memory/page_table.h" +#include "core/hle/kernel/memory/system_control.h" +#include "core/hle/kernel/process.h" +#include "core/hle/kernel/resource_limit.h" +#include "core/memory.h" + +namespace Kernel::Memory { + +namespace { + +constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) { + switch (as_type) { + case FileSys::ProgramAddressSpaceType::Is32Bit: + case FileSys::ProgramAddressSpaceType::Is32BitNoMap: + return 32; + case FileSys::ProgramAddressSpaceType::Is36Bit: + return 36; + case FileSys::ProgramAddressSpaceType::Is39Bit: + return 39; + default: + UNREACHABLE(); + return {}; + } +} + +constexpr u64 GetAddressInRange(const MemoryInfo& info, VAddr addr) { + if (info.GetAddress() < addr) { + return addr; + } + return info.GetAddress(); +} + +constexpr std::size_t GetSizeInRange(const MemoryInfo& info, VAddr start, VAddr end) { + std::size_t size{info.GetSize()}; + if (info.GetAddress() < start) { + size -= start - info.GetAddress(); + } + if (info.GetEndAddress() > end) { + size -= info.GetEndAddress() - end; + } + return size; +} + +} // namespace + +PageTable::PageTable(Core::System& system) : system{system} {} + +ResultCode PageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, + bool enable_aslr, VAddr code_addr, std::size_t code_size, + Memory::MemoryManager::Pool pool) { + + const auto GetSpaceStart = [this](AddressSpaceInfo::Type type) { + return AddressSpaceInfo::GetAddressSpaceStart(address_space_width, type); + }; + const auto GetSpaceSize = [this](AddressSpaceInfo::Type type) { + return AddressSpaceInfo::GetAddressSpaceSize(address_space_width, type); + }; + + // Set our width and heap/alias sizes + address_space_width = GetAddressSpaceWidthFromType(as_type); + const VAddr start = 0; + const VAddr end{1ULL << address_space_width}; + std::size_t alias_region_size{GetSpaceSize(AddressSpaceInfo::Type::Alias)}; + std::size_t heap_region_size{GetSpaceSize(AddressSpaceInfo::Type::Heap)}; + + ASSERT(start <= code_addr); + ASSERT(code_addr < code_addr + code_size); + ASSERT(code_addr + code_size - 1 <= end - 1); + + // Adjust heap/alias size if we don't have an alias region + if (as_type == FileSys::ProgramAddressSpaceType::Is32BitNoMap) { + heap_region_size += alias_region_size; + alias_region_size = 0; + } + + // Set code regions and determine remaining + constexpr std::size_t RegionAlignment{2 * 1024 * 1024}; + VAddr process_code_start{}; + VAddr process_code_end{}; + std::size_t stack_region_size{}; + std::size_t kernel_map_region_size{}; + + if (address_space_width == 39) { + alias_region_size = GetSpaceSize(AddressSpaceInfo::Type::Alias); + heap_region_size = GetSpaceSize(AddressSpaceInfo::Type::Heap); + stack_region_size = GetSpaceSize(AddressSpaceInfo::Type::Stack); + kernel_map_region_size = GetSpaceSize(AddressSpaceInfo::Type::Is32Bit); + code_region_start = GetSpaceStart(AddressSpaceInfo::Type::Large64Bit); + code_region_end = code_region_start + GetSpaceSize(AddressSpaceInfo::Type::Large64Bit); + alias_code_region_start = code_region_start; + alias_code_region_end = code_region_end; + process_code_start = Common::AlignDown(code_addr, RegionAlignment); + process_code_end = Common::AlignUp(code_addr + code_size, RegionAlignment); + } else { + stack_region_size = 0; + kernel_map_region_size = 0; + code_region_start = GetSpaceStart(AddressSpaceInfo::Type::Is32Bit); + code_region_end = code_region_start + GetSpaceSize(AddressSpaceInfo::Type::Is32Bit); + stack_region_start = code_region_start; + alias_code_region_start = code_region_start; + alias_code_region_end = GetSpaceStart(AddressSpaceInfo::Type::Small64Bit) + + GetSpaceSize(AddressSpaceInfo::Type::Small64Bit); + stack_region_end = code_region_end; + kernel_map_region_start = code_region_start; + kernel_map_region_end = code_region_end; + process_code_start = code_region_start; + process_code_end = code_region_end; + } + + // Set other basic fields + is_aslr_enabled = enable_aslr; + address_space_start = start; + address_space_end = end; + is_kernel = false; + + // Determine the region we can place our undetermineds in + VAddr alloc_start{}; + std::size_t alloc_size{}; + if ((process_code_start - code_region_start) >= (end - process_code_end)) { + alloc_start = code_region_start; + alloc_size = process_code_start - code_region_start; + } else { + alloc_start = process_code_end; + alloc_size = end - process_code_end; + } + const std::size_t needed_size{ + (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)}; + if (alloc_size < needed_size) { + UNREACHABLE(); + return ERR_OUT_OF_MEMORY; + } + + const std::size_t remaining_size{alloc_size - needed_size}; + + // Determine random placements for each region + std::size_t alias_rnd{}, heap_rnd{}, stack_rnd{}, kmap_rnd{}; + if (enable_aslr) { + alias_rnd = SystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * + RegionAlignment; + heap_rnd = SystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * + RegionAlignment; + stack_rnd = SystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * + RegionAlignment; + kmap_rnd = SystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * + RegionAlignment; + } + + // Setup heap and alias regions + alias_region_start = alloc_start + alias_rnd; + alias_region_end = alias_region_start + alias_region_size; + heap_region_start = alloc_start + heap_rnd; + heap_region_end = heap_region_start + heap_region_size; + + if (alias_rnd <= heap_rnd) { + heap_region_start += alias_region_size; + heap_region_end += alias_region_size; + } else { + alias_region_start += heap_region_size; + alias_region_end += heap_region_size; + } + + // Setup stack region + if (stack_region_size) { + stack_region_start = alloc_start + stack_rnd; + stack_region_end = stack_region_start + stack_region_size; + + if (alias_rnd < stack_rnd) { + stack_region_start += alias_region_size; + stack_region_end += alias_region_size; + } else { + alias_region_start += stack_region_size; + alias_region_end += stack_region_size; + } + + if (heap_rnd < stack_rnd) { + stack_region_start += heap_region_size; + stack_region_end += heap_region_size; + } else { + heap_region_start += stack_region_size; + heap_region_end += stack_region_size; + } + } + + // Setup kernel map region + if (kernel_map_region_size) { + kernel_map_region_start = alloc_start + kmap_rnd; + kernel_map_region_end = kernel_map_region_start + kernel_map_region_size; + + if (alias_rnd < kmap_rnd) { + kernel_map_region_start += alias_region_size; + kernel_map_region_end += alias_region_size; + } else { + alias_region_start += kernel_map_region_size; + alias_region_end += kernel_map_region_size; + } + + if (heap_rnd < kmap_rnd) { + kernel_map_region_start += heap_region_size; + kernel_map_region_end += heap_region_size; + } else { + heap_region_start += kernel_map_region_size; + heap_region_end += kernel_map_region_size; + } + + if (stack_region_size) { + if (stack_rnd < kmap_rnd) { + kernel_map_region_start += stack_region_size; + kernel_map_region_end += stack_region_size; + } else { + stack_region_start += kernel_map_region_size; + stack_region_end += kernel_map_region_size; + } + } + } + + // Set heap members + current_heap_end = heap_region_start; + max_heap_size = 0; + max_physical_memory_size = 0; + + // Ensure that we regions inside our address space + auto IsInAddressSpace = [&](VAddr addr) { + return address_space_start <= addr && addr <= address_space_end; + }; + ASSERT(IsInAddressSpace(alias_region_start)); + ASSERT(IsInAddressSpace(alias_region_end)); + ASSERT(IsInAddressSpace(heap_region_start)); + ASSERT(IsInAddressSpace(heap_region_end)); + ASSERT(IsInAddressSpace(stack_region_start)); + ASSERT(IsInAddressSpace(stack_region_end)); + ASSERT(IsInAddressSpace(kernel_map_region_start)); + ASSERT(IsInAddressSpace(kernel_map_region_end)); + + // Ensure that we selected regions that don't overlap + const VAddr alias_start{alias_region_start}; + const VAddr alias_last{alias_region_end - 1}; + const VAddr heap_start{heap_region_start}; + const VAddr heap_last{heap_region_end - 1}; + const VAddr stack_start{stack_region_start}; + const VAddr stack_last{stack_region_end - 1}; + const VAddr kmap_start{kernel_map_region_start}; + const VAddr kmap_last{kernel_map_region_end - 1}; + ASSERT(alias_last < heap_start || heap_last < alias_start); + ASSERT(alias_last < stack_start || stack_last < alias_start); + ASSERT(alias_last < kmap_start || kmap_last < alias_start); + ASSERT(heap_last < stack_start || stack_last < heap_start); + ASSERT(heap_last < kmap_start || kmap_last < heap_start); + + current_heap_addr = heap_region_start; + heap_capacity = 0; + physical_memory_usage = 0; + memory_pool = pool; + + page_table_impl.Resize(address_space_width, PageBits, true); + + return InitializeMemoryLayout(start, end); +} + +ResultCode PageTable::MapProcessCode(VAddr addr, std::size_t num_pages, MemoryState state, + MemoryPermission perm) { + std::lock_guard lock{page_table_lock}; + + const u64 size{num_pages * PageSize}; + + if (!CanContain(addr, size, state)) { + return ERR_INVALID_ADDRESS_STATE; + } + + if (IsRegionMapped(addr, size)) { + return ERR_INVALID_ADDRESS_STATE; + } + + PageLinkedList page_linked_list; + CASCADE_CODE( + system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool)); + CASCADE_CODE(Operate(addr, num_pages, page_linked_list, OperationType::MapGroup)); + + block_manager->Update(addr, num_pages, state, perm); + + return RESULT_SUCCESS; +} + +ResultCode PageTable::MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + const std::size_t num_pages{size / PageSize}; + + MemoryState state{}; + MemoryPermission perm{}; + CASCADE_CODE(CheckMemoryState(&state, &perm, nullptr, src_addr, size, MemoryState::All, + MemoryState::Normal, MemoryPermission::Mask, + MemoryPermission::ReadAndWrite, MemoryAttribute::Mask, + MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped)); + + if (IsRegionMapped(dst_addr, size)) { + return ERR_INVALID_ADDRESS_STATE; + } + + PageLinkedList page_linked_list; + AddRegionToPages(src_addr, num_pages, page_linked_list); + + { + auto block_guard = detail::ScopeExit( + [&] { Operate(src_addr, num_pages, perm, OperationType::ChangePermissions); }); + + CASCADE_CODE( + Operate(src_addr, num_pages, MemoryPermission::None, OperationType::ChangePermissions)); + CASCADE_CODE(MapPages(dst_addr, page_linked_list, MemoryPermission::None)); + + block_guard.Cancel(); + } + + block_manager->Update(src_addr, num_pages, state, MemoryPermission::None, + MemoryAttribute::Locked); + block_manager->Update(dst_addr, num_pages, MemoryState::AliasCode); + + return RESULT_SUCCESS; +} + +ResultCode PageTable::UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + if (!size) { + return RESULT_SUCCESS; + } + + const std::size_t num_pages{size / PageSize}; + + CASCADE_CODE(CheckMemoryState(nullptr, nullptr, nullptr, src_addr, size, MemoryState::All, + MemoryState::Normal, MemoryPermission::None, + MemoryPermission::None, MemoryAttribute::Mask, + MemoryAttribute::Locked, MemoryAttribute::IpcAndDeviceMapped)); + + MemoryState state{}; + CASCADE_CODE(CheckMemoryState( + &state, nullptr, nullptr, dst_addr, PageSize, MemoryState::FlagCanCodeAlias, + MemoryState::FlagCanCodeAlias, MemoryPermission::None, MemoryPermission::None, + MemoryAttribute::Mask, MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped)); + CASCADE_CODE(CheckMemoryState(dst_addr, size, MemoryState::All, state, MemoryPermission::None, + MemoryPermission::None, MemoryAttribute::Mask, + MemoryAttribute::None)); + CASCADE_CODE(Operate(dst_addr, num_pages, MemoryPermission::None, OperationType::Unmap)); + + block_manager->Update(dst_addr, num_pages, MemoryState::Free); + block_manager->Update(src_addr, num_pages, MemoryState::Normal, MemoryPermission::ReadAndWrite); + + return RESULT_SUCCESS; +} + +void PageTable::MapPhysicalMemory(PageLinkedList& page_linked_list, VAddr start, VAddr end) { + auto node{page_linked_list.Nodes().begin()}; + PAddr map_addr{node->GetAddress()}; + std::size_t src_num_pages{node->GetNumPages()}; + + block_manager->IterateForRange(start, end, [&](const MemoryInfo& info) { + if (info.state != MemoryState::Free) { + return; + } + + std::size_t dst_num_pages{GetSizeInRange(info, start, end) / PageSize}; + VAddr dst_addr{GetAddressInRange(info, start)}; + + while (dst_num_pages) { + if (!src_num_pages) { + node = std::next(node); + map_addr = node->GetAddress(); + src_num_pages = node->GetNumPages(); + } + + const std::size_t num_pages{std::min(src_num_pages, dst_num_pages)}; + Operate(dst_addr, num_pages, MemoryPermission::ReadAndWrite, OperationType::Map, + map_addr); + + dst_addr += num_pages * PageSize; + map_addr += num_pages * PageSize; + src_num_pages -= num_pages; + dst_num_pages -= num_pages; + } + }); +} + +ResultCode PageTable::MapPhysicalMemory(VAddr addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + std::size_t mapped_size{}; + const VAddr end_addr{addr + size}; + + block_manager->IterateForRange(addr, end_addr, [&](const MemoryInfo& info) { + if (info.state != MemoryState::Free) { + mapped_size += GetSizeInRange(info, addr, end_addr); + } + }); + + if (mapped_size == size) { + return RESULT_SUCCESS; + } + + auto process{system.Kernel().CurrentProcess()}; + const std::size_t remaining_size{size - mapped_size}; + const std::size_t remaining_pages{remaining_size / PageSize}; + + if (process->GetResourceLimit() && + !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, remaining_size)) { + return ERR_RESOURCE_LIMIT_EXCEEDED; + } + + PageLinkedList page_linked_list; + { + auto block_guard = detail::ScopeExit([&] { + system.Kernel().MemoryManager().Free(page_linked_list, remaining_pages, memory_pool); + process->GetResourceLimit()->Release(ResourceType::PhysicalMemory, remaining_size); + }); + + CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages, + memory_pool)); + + block_guard.Cancel(); + } + + MapPhysicalMemory(page_linked_list, addr, end_addr); + + physical_memory_usage += remaining_size; + + const std::size_t num_pages{size / PageSize}; + block_manager->Update(addr, num_pages, MemoryState::Free, MemoryPermission::None, + MemoryAttribute::None, MemoryState::Normal, + MemoryPermission::ReadAndWrite, MemoryAttribute::None); + + return RESULT_SUCCESS; +} + +ResultCode PageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + const VAddr end_addr{addr + size}; + ResultCode result{RESULT_SUCCESS}; + std::size_t mapped_size{}; + + // Verify that the region can be unmapped + block_manager->IterateForRange(addr, end_addr, [&](const MemoryInfo& info) { + if (info.state == MemoryState::Normal) { + if (info.attribute != MemoryAttribute::None) { + result = ERR_INVALID_ADDRESS_STATE; + return; + } + mapped_size += GetSizeInRange(info, addr, end_addr); + } else if (info.state != MemoryState::Free) { + result = ERR_INVALID_ADDRESS_STATE; + } + }); + + if (result.IsError()) { + return result; + } + + if (!mapped_size) { + return RESULT_SUCCESS; + } + + CASCADE_CODE(UnmapMemory(addr, size)); + + auto process{system.Kernel().CurrentProcess()}; + process->GetResourceLimit()->Release(ResourceType::PhysicalMemory, mapped_size); + physical_memory_usage -= mapped_size; + + return RESULT_SUCCESS; +} + +ResultCode PageTable::UnmapMemory(VAddr addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + const VAddr end_addr{addr + size}; + ResultCode result{RESULT_SUCCESS}; + PageLinkedList page_linked_list; + + // Unmap each region within the range + block_manager->IterateForRange(addr, end_addr, [&](const MemoryInfo& info) { + if (info.state == MemoryState::Normal) { + const std::size_t block_size{GetSizeInRange(info, addr, end_addr)}; + const std::size_t block_num_pages{block_size / PageSize}; + const VAddr block_addr{GetAddressInRange(info, addr)}; + + AddRegionToPages(block_addr, block_size / PageSize, page_linked_list); + + if (result = Operate(block_addr, block_num_pages, MemoryPermission::None, + OperationType::Unmap); + result.IsError()) { + return; + } + } + }); + + if (result.IsError()) { + return result; + } + + const std::size_t num_pages{size / PageSize}; + system.Kernel().MemoryManager().Free(page_linked_list, num_pages, memory_pool); + + block_manager->Update(addr, num_pages, MemoryState::Free); + + return RESULT_SUCCESS; +} + +ResultCode PageTable::Map(VAddr dst_addr, VAddr src_addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + MemoryState src_state{}; + CASCADE_CODE(CheckMemoryState( + &src_state, nullptr, nullptr, src_addr, size, MemoryState::FlagCanAlias, + MemoryState::FlagCanAlias, MemoryPermission::Mask, MemoryPermission::ReadAndWrite, + MemoryAttribute::Mask, MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped)); + + if (IsRegionMapped(dst_addr, size)) { + return ERR_INVALID_ADDRESS_STATE; + } + + PageLinkedList page_linked_list; + const std::size_t num_pages{size / PageSize}; + + AddRegionToPages(src_addr, num_pages, page_linked_list); + + { + auto block_guard = detail::ScopeExit([&] { + Operate(src_addr, num_pages, MemoryPermission::ReadAndWrite, + OperationType::ChangePermissions); + }); + + CASCADE_CODE( + Operate(src_addr, num_pages, MemoryPermission::None, OperationType::ChangePermissions)); + CASCADE_CODE(MapPages(dst_addr, page_linked_list, MemoryPermission::ReadAndWrite)); + + block_guard.Cancel(); + } + + block_manager->Update(src_addr, num_pages, src_state, MemoryPermission::None, + MemoryAttribute::Locked); + block_manager->Update(dst_addr, num_pages, MemoryState::Stack, MemoryPermission::ReadAndWrite); + + return RESULT_SUCCESS; +} + +ResultCode PageTable::Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + MemoryState src_state{}; + CASCADE_CODE(CheckMemoryState( + &src_state, nullptr, nullptr, src_addr, size, MemoryState::FlagCanAlias, + MemoryState::FlagCanAlias, MemoryPermission::Mask, MemoryPermission::None, + MemoryAttribute::Mask, MemoryAttribute::Locked, MemoryAttribute::IpcAndDeviceMapped)); + + MemoryPermission dst_perm{}; + CASCADE_CODE(CheckMemoryState(nullptr, &dst_perm, nullptr, dst_addr, size, MemoryState::All, + MemoryState::Stack, MemoryPermission::None, + MemoryPermission::None, MemoryAttribute::Mask, + MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped)); + + PageLinkedList src_pages; + PageLinkedList dst_pages; + const std::size_t num_pages{size / PageSize}; + + AddRegionToPages(src_addr, num_pages, src_pages); + AddRegionToPages(dst_addr, num_pages, dst_pages); + + if (!dst_pages.IsEqual(src_pages)) { + return ERR_INVALID_MEMORY_RANGE; + } + + { + auto block_guard = detail::ScopeExit([&] { MapPages(dst_addr, dst_pages, dst_perm); }); + + CASCADE_CODE(Operate(dst_addr, num_pages, MemoryPermission::None, OperationType::Unmap)); + CASCADE_CODE(Operate(src_addr, num_pages, MemoryPermission::ReadAndWrite, + OperationType::ChangePermissions)); + + block_guard.Cancel(); + } + + block_manager->Update(src_addr, num_pages, src_state, MemoryPermission::ReadAndWrite); + block_manager->Update(dst_addr, num_pages, MemoryState::Free); + + return RESULT_SUCCESS; +} + +ResultCode PageTable::MapPages(VAddr addr, const PageLinkedList& page_linked_list, + MemoryPermission perm) { + VAddr cur_addr{addr}; + + for (const auto& node : page_linked_list.Nodes()) { + if (const auto result{ + Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; + result.IsError()) { + const MemoryInfo info{block_manager->FindBlock(cur_addr).GetMemoryInfo()}; + const std::size_t num_pages{(addr - cur_addr) / PageSize}; + + ASSERT( + Operate(addr, num_pages, MemoryPermission::None, OperationType::Unmap).IsSuccess()); + + return result; + } + + cur_addr += node.GetNumPages() * PageSize; + } + + return RESULT_SUCCESS; +} + +ResultCode PageTable::MapPages(VAddr addr, PageLinkedList& page_linked_list, MemoryState state, + MemoryPermission perm) { + std::lock_guard lock{page_table_lock}; + + const std::size_t num_pages{page_linked_list.GetNumPages()}; + const std::size_t size{num_pages * PageSize}; + + if (!CanContain(addr, size, state)) { + return ERR_INVALID_ADDRESS_STATE; + } + + if (IsRegionMapped(addr, num_pages * PageSize)) { + return ERR_INVALID_ADDRESS_STATE; + } + + CASCADE_CODE(MapPages(addr, page_linked_list, perm)); + + block_manager->Update(addr, num_pages, state, perm); + + return RESULT_SUCCESS; +} + +ResultCode PageTable::SetCodeMemoryPermission(VAddr addr, std::size_t size, MemoryPermission perm) { + + std::lock_guard lock{page_table_lock}; + + MemoryState prev_state{}; + MemoryPermission prev_perm{}; + + CASCADE_CODE(CheckMemoryState( + &prev_state, &prev_perm, nullptr, addr, size, MemoryState::FlagCode, MemoryState::FlagCode, + MemoryPermission::None, MemoryPermission::None, MemoryAttribute::Mask, + MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped)); + + MemoryState state{prev_state}; + + // Ensure state is mutable if permission allows write + if ((perm & MemoryPermission::Write) != MemoryPermission::None) { + if (prev_state == MemoryState::Code) { + state = MemoryState::CodeData; + } else if (prev_state == MemoryState::AliasCode) { + state = MemoryState::AliasCodeData; + } else { + UNREACHABLE(); + } + } + + // Return early if there is nothing to change + if (state == prev_state && perm == prev_perm) { + return RESULT_SUCCESS; + } + + const std::size_t num_pages{size / PageSize}; + const OperationType operation{(perm & MemoryPermission::Execute) != MemoryPermission::None + ? OperationType::ChangePermissionsAndRefresh + : OperationType::ChangePermissions}; + + CASCADE_CODE(Operate(addr, num_pages, perm, operation)); + + block_manager->Update(addr, num_pages, state, perm); + + return RESULT_SUCCESS; +} + +MemoryInfo PageTable::QueryInfoImpl(VAddr addr) { + std::lock_guard lock{page_table_lock}; + + return block_manager->FindBlock(addr).GetMemoryInfo(); +} + +MemoryInfo PageTable::QueryInfo(VAddr addr) { + if (!Contains(addr, 1)) { + return {address_space_end, 0 - address_space_end, MemoryState::Inaccessible, + MemoryPermission::None, MemoryAttribute::None, MemoryPermission::None}; + } + + return QueryInfoImpl(addr); +} + +ResultCode PageTable::ReserveTransferMemory(VAddr addr, std::size_t size, MemoryPermission perm) { + std::lock_guard lock{page_table_lock}; + + MemoryState state{}; + MemoryAttribute attribute{}; + + CASCADE_CODE(CheckMemoryState(&state, nullptr, &attribute, addr, size, + MemoryState::FlagCanTransfer | MemoryState::FlagReferenceCounted, + MemoryState::FlagCanTransfer | MemoryState::FlagReferenceCounted, + MemoryPermission::Mask, MemoryPermission::ReadAndWrite, + MemoryAttribute::Mask, MemoryAttribute::None, + MemoryAttribute::IpcAndDeviceMapped)); + + block_manager->Update(addr, size / PageSize, state, perm, attribute | MemoryAttribute::Locked); + + return RESULT_SUCCESS; +} + +ResultCode PageTable::ResetTransferMemory(VAddr addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + MemoryState state{}; + + CASCADE_CODE(CheckMemoryState(&state, nullptr, nullptr, addr, size, + MemoryState::FlagCanTransfer | MemoryState::FlagReferenceCounted, + MemoryState::FlagCanTransfer | MemoryState::FlagReferenceCounted, + MemoryPermission::None, MemoryPermission::None, + MemoryAttribute::Mask, MemoryAttribute::Locked, + MemoryAttribute::IpcAndDeviceMapped)); + + block_manager->Update(addr, size / PageSize, state, MemoryPermission::ReadAndWrite); + + return RESULT_SUCCESS; +} + +ResultCode PageTable::SetMemoryAttribute(VAddr addr, std::size_t size, MemoryAttribute mask, + MemoryAttribute value) { + std::lock_guard lock{page_table_lock}; + + MemoryState state{}; + MemoryPermission perm{}; + MemoryAttribute attribute{}; + + CASCADE_CODE(CheckMemoryState(&state, &perm, &attribute, addr, size, + MemoryState::FlagCanChangeAttribute, + MemoryState::FlagCanChangeAttribute, MemoryPermission::None, + MemoryPermission::None, MemoryAttribute::LockedAndIpcLocked, + MemoryAttribute::None, MemoryAttribute::DeviceSharedAndUncached)); + + attribute = attribute & ~mask; + attribute = attribute | (mask & value); + + block_manager->Update(addr, size / PageSize, state, perm, attribute); + + return RESULT_SUCCESS; +} + +ResultCode PageTable::SetHeapCapacity(std::size_t new_heap_capacity) { + std::lock_guard lock{page_table_lock}; + heap_capacity = new_heap_capacity; + return RESULT_SUCCESS; +} + +ResultVal<VAddr> PageTable::SetHeapSize(std::size_t size) { + + if (size > heap_region_end - heap_region_start) { + return ERR_OUT_OF_MEMORY; + } + + const u64 previous_heap_size{GetHeapSize()}; + + UNIMPLEMENTED_IF_MSG(previous_heap_size > size, "Heap shrink is unimplemented"); + + // Increase the heap size + { + std::lock_guard lock{page_table_lock}; + + const u64 delta{size - previous_heap_size}; + + auto process{system.Kernel().CurrentProcess()}; + if (process->GetResourceLimit() && delta != 0 && + !process->GetResourceLimit()->Reserve(ResourceType::PhysicalMemory, delta)) { + return ERR_RESOURCE_LIMIT_EXCEEDED; + } + + PageLinkedList page_linked_list; + const std::size_t num_pages{delta / PageSize}; + + CASCADE_CODE( + system.Kernel().MemoryManager().Allocate(page_linked_list, num_pages, memory_pool)); + + if (IsRegionMapped(current_heap_addr, delta)) { + return ERR_INVALID_ADDRESS_STATE; + } + + CASCADE_CODE( + Operate(current_heap_addr, num_pages, page_linked_list, OperationType::MapGroup)); + + block_manager->Update(current_heap_addr, num_pages, MemoryState::Normal, + MemoryPermission::ReadAndWrite); + + current_heap_addr = heap_region_start + size; + } + + return MakeResult<VAddr>(heap_region_start); +} + +ResultVal<VAddr> PageTable::AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, + bool is_map_only, VAddr region_start, + std::size_t region_num_pages, MemoryState state, + MemoryPermission perm, PAddr map_addr) { + std::lock_guard lock{page_table_lock}; + + if (!CanContain(region_start, region_num_pages * PageSize, state)) { + return ERR_INVALID_ADDRESS_STATE; + } + + if (region_num_pages <= needed_num_pages) { + return ERR_OUT_OF_MEMORY; + } + + const VAddr addr{ + AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)}; + if (!addr) { + return ERR_OUT_OF_MEMORY; + } + + if (is_map_only) { + CASCADE_CODE(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr)); + } else { + PageLinkedList page_group; + CASCADE_CODE( + system.Kernel().MemoryManager().Allocate(page_group, needed_num_pages, memory_pool)); + CASCADE_CODE(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup)); + } + + block_manager->Update(addr, needed_num_pages, state, perm); + + return MakeResult<VAddr>(addr); +} + +ResultCode PageTable::InitializeMemoryLayout(VAddr start, VAddr end) { + block_manager = std::make_unique<MemoryBlockManager>(start, end); + + return RESULT_SUCCESS; +} + +bool PageTable::IsRegionMapped(VAddr address, u64 size) { + return CheckMemoryState(address, size, MemoryState::All, MemoryState::Free, + MemoryPermission::Mask, MemoryPermission::None, MemoryAttribute::Mask, + MemoryAttribute::None, MemoryAttribute::IpcAndDeviceMapped) + .IsError(); +} + +bool PageTable::IsRegionContiguous(VAddr addr, u64 size) const { + auto start_ptr = system.Memory().GetPointer(addr); + for (u64 offset{}; offset < size; offset += PageSize) { + if (start_ptr != system.Memory().GetPointer(addr + offset)) { + return false; + } + start_ptr += PageSize; + } + return true; +} + +void PageTable::AddRegionToPages(VAddr start, std::size_t num_pages, + PageLinkedList& page_linked_list) { + VAddr addr{start}; + while (addr < start + (num_pages * PageSize)) { + const PAddr paddr{GetPhysicalAddr(addr)}; + if (!paddr) { + UNREACHABLE(); + } + page_linked_list.AddBlock(paddr, 1); + addr += PageSize; + } +} + +VAddr PageTable::AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, + u64 needed_num_pages, std::size_t align) { + if (is_aslr_enabled) { + UNIMPLEMENTED(); + } + return block_manager->FindFreeArea(start, region_num_pages, needed_num_pages, align, 0, + IsKernel() ? 1 : 4); +} + +ResultCode PageTable::Operate(VAddr addr, std::size_t num_pages, const PageLinkedList& page_group, + OperationType operation) { + std::lock_guard lock{page_table_lock}; + + ASSERT(Common::IsAligned(addr, PageSize)); + ASSERT(num_pages > 0); + ASSERT(num_pages == page_group.GetNumPages()); + + for (const auto& node : page_group.Nodes()) { + const std::size_t size{node.GetNumPages() * PageSize}; + + switch (operation) { + case OperationType::MapGroup: + system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress()); + break; + default: + UNREACHABLE(); + } + + addr += size; + } + + return RESULT_SUCCESS; +} + +ResultCode PageTable::Operate(VAddr addr, std::size_t num_pages, MemoryPermission perm, + OperationType operation, PAddr map_addr) { + std::lock_guard lock{page_table_lock}; + + ASSERT(num_pages > 0); + ASSERT(Common::IsAligned(addr, PageSize)); + ASSERT(ContainsPages(addr, num_pages)); + + switch (operation) { + case OperationType::Unmap: + system.Memory().UnmapRegion(page_table_impl, addr, num_pages * PageSize); + break; + case OperationType::Map: { + ASSERT(map_addr); + ASSERT(Common::IsAligned(map_addr, PageSize)); + system.Memory().MapMemoryRegion(page_table_impl, addr, num_pages * PageSize, map_addr); + break; + } + case OperationType::ChangePermissions: + case OperationType::ChangePermissionsAndRefresh: + break; + default: + UNREACHABLE(); + } + return RESULT_SUCCESS; +} + +constexpr VAddr PageTable::GetRegionAddress(MemoryState state) const { + switch (state) { + case MemoryState::Free: + case MemoryState::Kernel: + return address_space_start; + case MemoryState::Normal: + return heap_region_start; + case MemoryState::Ipc: + case MemoryState::NonSecureIpc: + case MemoryState::NonDeviceIpc: + return alias_region_start; + case MemoryState::Stack: + return stack_region_start; + case MemoryState::Io: + case MemoryState::Static: + case MemoryState::ThreadLocal: + return kernel_map_region_start; + case MemoryState::Shared: + case MemoryState::AliasCode: + case MemoryState::AliasCodeData: + case MemoryState::Transfered: + case MemoryState::SharedTransfered: + case MemoryState::SharedCode: + case MemoryState::GeneratedCode: + case MemoryState::CodeOut: + return alias_code_region_start; + case MemoryState::Code: + case MemoryState::CodeData: + return code_region_start; + default: + UNREACHABLE(); + return {}; + } +} + +constexpr std::size_t PageTable::GetRegionSize(MemoryState state) const { + switch (state) { + case MemoryState::Free: + case MemoryState::Kernel: + return address_space_end - address_space_start; + case MemoryState::Normal: + return heap_region_end - heap_region_start; + case MemoryState::Ipc: + case MemoryState::NonSecureIpc: + case MemoryState::NonDeviceIpc: + return alias_region_end - alias_region_start; + case MemoryState::Stack: + return stack_region_end - stack_region_start; + case MemoryState::Io: + case MemoryState::Static: + case MemoryState::ThreadLocal: + return kernel_map_region_end - kernel_map_region_start; + case MemoryState::Shared: + case MemoryState::AliasCode: + case MemoryState::AliasCodeData: + case MemoryState::Transfered: + case MemoryState::SharedTransfered: + case MemoryState::SharedCode: + case MemoryState::GeneratedCode: + case MemoryState::CodeOut: + return alias_code_region_end - alias_code_region_start; + case MemoryState::Code: + case MemoryState::CodeData: + return code_region_end - code_region_start; + default: + UNREACHABLE(); + return {}; + } +} + +constexpr bool PageTable::CanContain(VAddr addr, std::size_t size, MemoryState state) const { + const VAddr end{addr + size}; + const VAddr last{end - 1}; + const VAddr region_start{GetRegionAddress(state)}; + const std::size_t region_size{GetRegionSize(state)}; + const bool is_in_region{region_start <= addr && addr < end && + last <= region_start + region_size - 1}; + const bool is_in_heap{!(end <= heap_region_start || heap_region_end <= addr)}; + const bool is_in_alias{!(end <= alias_region_start || alias_region_end <= addr)}; + + switch (state) { + case MemoryState::Free: + case MemoryState::Kernel: + return is_in_region; + case MemoryState::Io: + case MemoryState::Static: + case MemoryState::Code: + case MemoryState::CodeData: + case MemoryState::Shared: + case MemoryState::AliasCode: + case MemoryState::AliasCodeData: + case MemoryState::Stack: + case MemoryState::ThreadLocal: + case MemoryState::Transfered: + case MemoryState::SharedTransfered: + case MemoryState::SharedCode: + case MemoryState::GeneratedCode: + case MemoryState::CodeOut: + return is_in_region && !is_in_heap && !is_in_alias; + case MemoryState::Normal: + ASSERT(is_in_heap); + return is_in_region && !is_in_alias; + case MemoryState::Ipc: + case MemoryState::NonSecureIpc: + case MemoryState::NonDeviceIpc: + ASSERT(is_in_alias); + return is_in_region && !is_in_heap; + default: + return false; + } +} + +constexpr ResultCode PageTable::CheckMemoryState(const MemoryInfo& info, MemoryState state_mask, + MemoryState state, MemoryPermission perm_mask, + MemoryPermission perm, MemoryAttribute attr_mask, + MemoryAttribute attr) const { + // Validate the states match expectation + if ((info.state & state_mask) != state) { + return ERR_INVALID_ADDRESS_STATE; + } + if ((info.perm & perm_mask) != perm) { + return ERR_INVALID_ADDRESS_STATE; + } + if ((info.attribute & attr_mask) != attr) { + return ERR_INVALID_ADDRESS_STATE; + } + + return RESULT_SUCCESS; +} + +ResultCode PageTable::CheckMemoryState(MemoryState* out_state, MemoryPermission* out_perm, + MemoryAttribute* out_attr, VAddr addr, std::size_t size, + MemoryState state_mask, MemoryState state, + MemoryPermission perm_mask, MemoryPermission perm, + MemoryAttribute attr_mask, MemoryAttribute attr, + MemoryAttribute ignore_attr) { + std::lock_guard lock{page_table_lock}; + + // Get information about the first block + const VAddr last_addr{addr + size - 1}; + MemoryBlockManager::const_iterator it{block_manager->FindIterator(addr)}; + MemoryInfo info{it->GetMemoryInfo()}; + + // Validate all blocks in the range have correct state + const MemoryState first_state{info.state}; + const MemoryPermission first_perm{info.perm}; + const MemoryAttribute first_attr{info.attribute}; + + while (true) { + // Validate the current block + if (!(info.state == first_state)) { + return ERR_INVALID_ADDRESS_STATE; + } + if (!(info.perm == first_perm)) { + return ERR_INVALID_ADDRESS_STATE; + } + if (!((info.attribute | static_cast<MemoryAttribute>(ignore_attr)) == + (first_attr | static_cast<MemoryAttribute>(ignore_attr)))) { + return ERR_INVALID_ADDRESS_STATE; + } + + // Validate against the provided masks + CASCADE_CODE(CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr)); + + // Break once we're done + if (last_addr <= info.GetLastAddress()) { + break; + } + + // Advance our iterator + it++; + ASSERT(it != block_manager->cend()); + info = it->GetMemoryInfo(); + } + + // Write output state + if (out_state) { + *out_state = first_state; + } + if (out_perm) { + *out_perm = first_perm; + } + if (out_attr) { + *out_attr = first_attr & static_cast<MemoryAttribute>(~ignore_attr); + } + + return RESULT_SUCCESS; +} + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/page_table.h b/src/core/hle/kernel/memory/page_table.h new file mode 100644 index 000000000..80384ab0f --- /dev/null +++ b/src/core/hle/kernel/memory/page_table.h @@ -0,0 +1,276 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <list> +#include <memory> +#include <mutex> + +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "common/page_table.h" +#include "core/file_sys/program_metadata.h" +#include "core/hle/kernel/memory/memory_block.h" +#include "core/hle/kernel/memory/memory_manager.h" + +namespace Core { +class System; +} + +namespace Kernel::Memory { + +class MemoryBlockManager; + +class PageTable final : NonCopyable { +public: + explicit PageTable(Core::System& system); + + ResultCode InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, + VAddr code_addr, std::size_t code_size, + Memory::MemoryManager::Pool pool); + ResultCode MapProcessCode(VAddr addr, std::size_t pages_count, MemoryState state, + MemoryPermission perm); + ResultCode MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); + ResultCode UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); + ResultCode MapPhysicalMemory(VAddr addr, std::size_t size); + ResultCode UnmapPhysicalMemory(VAddr addr, std::size_t size); + ResultCode UnmapMemory(VAddr addr, std::size_t size); + ResultCode Map(VAddr dst_addr, VAddr src_addr, std::size_t size); + ResultCode Unmap(VAddr dst_addr, VAddr src_addr, std::size_t size); + ResultCode MapPages(VAddr addr, PageLinkedList& page_linked_list, MemoryState state, + MemoryPermission perm); + ResultCode SetCodeMemoryPermission(VAddr addr, std::size_t size, MemoryPermission perm); + MemoryInfo QueryInfo(VAddr addr); + ResultCode ReserveTransferMemory(VAddr addr, std::size_t size, MemoryPermission perm); + ResultCode ResetTransferMemory(VAddr addr, std::size_t size); + ResultCode SetMemoryAttribute(VAddr addr, std::size_t size, MemoryAttribute mask, + MemoryAttribute value); + ResultCode SetHeapCapacity(std::size_t new_heap_capacity); + ResultVal<VAddr> SetHeapSize(std::size_t size); + ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align, + bool is_map_only, VAddr region_start, + std::size_t region_num_pages, MemoryState state, + MemoryPermission perm, PAddr map_addr = 0); + + Common::PageTable& PageTableImpl() { + return page_table_impl; + } + + const Common::PageTable& PageTableImpl() const { + return page_table_impl; + } + +private: + enum class OperationType : u32 { + Map, + MapGroup, + Unmap, + ChangePermissions, + ChangePermissionsAndRefresh, + }; + + static constexpr MemoryAttribute DefaultMemoryIgnoreAttr = + MemoryAttribute::DontCareMask | MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared; + + ResultCode InitializeMemoryLayout(VAddr start, VAddr end); + ResultCode MapPages(VAddr addr, const PageLinkedList& page_linked_list, MemoryPermission perm); + void MapPhysicalMemory(PageLinkedList& page_linked_list, VAddr start, VAddr end); + bool IsRegionMapped(VAddr address, u64 size); + bool IsRegionContiguous(VAddr addr, u64 size) const; + void AddRegionToPages(VAddr start, std::size_t num_pages, PageLinkedList& page_linked_list); + MemoryInfo QueryInfoImpl(VAddr addr); + VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages, + std::size_t align); + ResultCode Operate(VAddr addr, std::size_t num_pages, const PageLinkedList& page_group, + OperationType operation); + ResultCode Operate(VAddr addr, std::size_t num_pages, MemoryPermission perm, + OperationType operation, PAddr map_addr = 0); + constexpr VAddr GetRegionAddress(MemoryState state) const; + constexpr std::size_t GetRegionSize(MemoryState state) const; + constexpr bool CanContain(VAddr addr, std::size_t size, MemoryState state) const; + + constexpr ResultCode CheckMemoryState(const MemoryInfo& info, MemoryState state_mask, + MemoryState state, MemoryPermission perm_mask, + MemoryPermission perm, MemoryAttribute attr_mask, + MemoryAttribute attr) const; + ResultCode CheckMemoryState(MemoryState* out_state, MemoryPermission* out_perm, + MemoryAttribute* out_attr, VAddr addr, std::size_t size, + MemoryState state_mask, MemoryState state, + MemoryPermission perm_mask, MemoryPermission perm, + MemoryAttribute attr_mask, MemoryAttribute attr, + MemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr); + ResultCode CheckMemoryState(VAddr addr, std::size_t size, MemoryState state_mask, + MemoryState state, MemoryPermission perm_mask, + MemoryPermission perm, MemoryAttribute attr_mask, + MemoryAttribute attr, + MemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) { + return CheckMemoryState(nullptr, nullptr, nullptr, addr, size, state_mask, state, perm_mask, + perm, attr_mask, attr, ignore_attr); + } + + std::recursive_mutex page_table_lock; + std::unique_ptr<MemoryBlockManager> block_manager; + +public: + constexpr VAddr GetAddressSpaceStart() const { + return address_space_start; + } + constexpr VAddr GetAddressSpaceEnd() const { + return address_space_end; + } + constexpr std::size_t GetAddressSpaceSize() const { + return address_space_end - address_space_start; + } + constexpr VAddr GetHeapRegionStart() const { + return heap_region_start; + } + constexpr VAddr GetHeapRegionEnd() const { + return heap_region_end; + } + constexpr std::size_t GetHeapRegionSize() const { + return heap_region_end - heap_region_start; + } + constexpr VAddr GetAliasRegionStart() const { + return alias_region_start; + } + constexpr VAddr GetAliasRegionEnd() const { + return alias_region_end; + } + constexpr std::size_t GetAliasRegionSize() const { + return alias_region_end - alias_region_start; + } + constexpr VAddr GetStackRegionStart() const { + return stack_region_start; + } + constexpr VAddr GetStackRegionEnd() const { + return stack_region_end; + } + constexpr std::size_t GetStackRegionSize() const { + return stack_region_end - stack_region_start; + } + constexpr VAddr GetKernelMapRegionStart() const { + return kernel_map_region_start; + } + constexpr VAddr GetKernelMapRegionEnd() const { + return kernel_map_region_end; + } + constexpr VAddr GetCodeRegionStart() const { + return code_region_start; + } + constexpr VAddr GetCodeRegionEnd() const { + return code_region_end; + } + constexpr VAddr GetAliasCodeRegionStart() const { + return alias_code_region_start; + } + constexpr VAddr GetAliasCodeRegionSize() const { + return alias_code_region_end - alias_code_region_start; + } + constexpr std::size_t GetAddressSpaceWidth() const { + return address_space_width; + } + constexpr std::size_t GetHeapSize() { + return current_heap_addr - heap_region_start; + } + constexpr std::size_t GetTotalHeapSize() { + return GetHeapSize() + physical_memory_usage; + } + constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const { + return address_space_start <= address && address + size - 1 <= address_space_end - 1; + } + constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const { + return alias_region_start > address || address + size - 1 > alias_region_end - 1; + } + constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const { + return stack_region_start > address || address + size - 1 > stack_region_end - 1; + } + constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const { + return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1; + } + constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const { + return address + size > heap_region_start && heap_region_end > address; + } + constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const { + return address + size > alias_region_start && alias_region_end > address; + } + constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const { + if (IsInvalidRegion(address, size)) { + return true; + } + if (IsInsideHeapRegion(address, size)) { + return true; + } + if (IsInsideAliasRegion(address, size)) { + return true; + } + return {}; + } + constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const { + return !IsOutsideASLRRegion(address, size); + } + constexpr PAddr GetPhysicalAddr(VAddr addr) { + return page_table_impl.backing_addr[addr >> Memory::PageBits] + addr; + } + +private: + constexpr bool Contains(VAddr addr) const { + return address_space_start <= addr && addr <= address_space_end - 1; + } + constexpr bool Contains(VAddr addr, std::size_t size) const { + return address_space_start <= addr && addr < addr + size && + addr + size - 1 <= address_space_end - 1; + } + constexpr bool IsKernel() const { + return is_kernel; + } + constexpr bool IsAslrEnabled() const { + return is_aslr_enabled; + } + + constexpr std::size_t GetNumGuardPages() const { + return IsKernel() ? 1 : 4; + } + + constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const { + return (address_space_start <= addr) && + (num_pages <= (address_space_end - address_space_start) / PageSize) && + (addr + num_pages * PageSize - 1 <= address_space_end - 1); + } + +private: + VAddr address_space_start{}; + VAddr address_space_end{}; + VAddr heap_region_start{}; + VAddr heap_region_end{}; + VAddr current_heap_end{}; + VAddr alias_region_start{}; + VAddr alias_region_end{}; + VAddr stack_region_start{}; + VAddr stack_region_end{}; + VAddr kernel_map_region_start{}; + VAddr kernel_map_region_end{}; + VAddr code_region_start{}; + VAddr code_region_end{}; + VAddr alias_code_region_start{}; + VAddr alias_code_region_end{}; + VAddr current_heap_addr{}; + + std::size_t heap_capacity{}; + std::size_t physical_memory_usage{}; + std::size_t max_heap_size{}; + std::size_t max_physical_memory_size{}; + std::size_t address_space_width{}; + + bool is_kernel{}; + bool is_aslr_enabled{}; + + MemoryManager::Pool memory_pool{MemoryManager::Pool::Application}; + + Common::PageTable page_table_impl; + + Core::System& system; +}; + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/slab_heap.h b/src/core/hle/kernel/memory/slab_heap.h new file mode 100644 index 000000000..be95fc3f7 --- /dev/null +++ b/src/core/hle/kernel/memory/slab_heap.h @@ -0,0 +1,164 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +// This file references various implementation details from Atmosphère, an open-source firmware for +// the Nintendo Switch. Copyright 2018-2020 Atmosphère-NX. + +#pragma once + +#include <atomic> + +#include "common/assert.h" +#include "common/common_funcs.h" +#include "common/common_types.h" + +namespace Kernel::Memory { + +namespace impl { + +class SlabHeapImpl final : NonCopyable { +public: + struct Node { + Node* next{}; + }; + + constexpr SlabHeapImpl() = default; + + void Initialize(std::size_t size) { + ASSERT(head == nullptr); + obj_size = size; + } + + constexpr std::size_t GetObjectSize() const { + return obj_size; + } + + Node* GetHead() const { + return head; + } + + void* Allocate() { + Node* ret = head.load(); + + do { + if (ret == nullptr) { + break; + } + } while (!head.compare_exchange_weak(ret, ret->next)); + + return ret; + } + + void Free(void* obj) { + Node* node = static_cast<Node*>(obj); + + Node* cur_head = head.load(); + do { + node->next = cur_head; + } while (!head.compare_exchange_weak(cur_head, node)); + } + +private: + std::atomic<Node*> head{}; + std::size_t obj_size{}; +}; + +} // namespace impl + +class SlabHeapBase : NonCopyable { +public: + constexpr SlabHeapBase() = default; + + constexpr bool Contains(uintptr_t addr) const { + return start <= addr && addr < end; + } + + constexpr std::size_t GetSlabHeapSize() const { + return (end - start) / GetObjectSize(); + } + + constexpr std::size_t GetObjectSize() const { + return impl.GetObjectSize(); + } + + constexpr uintptr_t GetSlabHeapAddress() const { + return start; + } + + std::size_t GetObjectIndexImpl(const void* obj) const { + return (reinterpret_cast<uintptr_t>(obj) - start) / GetObjectSize(); + } + + std::size_t GetPeakIndex() const { + return GetObjectIndexImpl(reinterpret_cast<const void*>(peak)); + } + + void* AllocateImpl() { + return impl.Allocate(); + } + + void FreeImpl(void* obj) { + // Don't allow freeing an object that wasn't allocated from this heap + ASSERT(Contains(reinterpret_cast<uintptr_t>(obj))); + impl.Free(obj); + } + + void InitializeImpl(std::size_t obj_size, void* memory, std::size_t memory_size) { + // Ensure we don't initialize a slab using null memory + ASSERT(memory != nullptr); + + // Initialize the base allocator + impl.Initialize(obj_size); + + // Set our tracking variables + const std::size_t num_obj = (memory_size / obj_size); + start = reinterpret_cast<uintptr_t>(memory); + end = start + num_obj * obj_size; + peak = start; + + // Free the objects + u8* cur = reinterpret_cast<u8*>(end); + + for (std::size_t i{}; i < num_obj; i++) { + cur -= obj_size; + impl.Free(cur); + } + } + +private: + using Impl = impl::SlabHeapImpl; + + Impl impl; + uintptr_t peak{}; + uintptr_t start{}; + uintptr_t end{}; +}; + +template <typename T> +class SlabHeap final : public SlabHeapBase { +public: + constexpr SlabHeap() : SlabHeapBase() {} + + void Initialize(void* memory, std::size_t memory_size) { + InitializeImpl(sizeof(T), memory, memory_size); + } + + T* Allocate() { + T* obj = static_cast<T*>(AllocateImpl()); + if (obj != nullptr) { + new (obj) T(); + } + return obj; + } + + void Free(T* obj) { + FreeImpl(obj); + } + + constexpr std::size_t GetObjectIndex(const T* obj) const { + return GetObjectIndexImpl(obj); + } +}; + +} // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/system_control.cpp b/src/core/hle/kernel/memory/system_control.cpp new file mode 100644 index 000000000..9cae3c6cb --- /dev/null +++ b/src/core/hle/kernel/memory/system_control.cpp @@ -0,0 +1,41 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <random> + +#include "core/hle/kernel/memory/system_control.h" + +namespace Kernel::Memory::SystemControl { + +u64 GenerateRandomU64ForInit() { + static std::random_device device; + static std::mt19937 gen(device()); + static std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max()); + return distribution(gen); +} + +template <typename F> +u64 GenerateUniformRange(u64 min, u64 max, F f) { + /* Handle the case where the difference is too large to represent. */ + if (max == std::numeric_limits<u64>::max() && min == std::numeric_limits<u64>::min()) { + return f(); + } + + /* Iterate until we get a value in range. */ + const u64 range_size = ((max + 1) - min); + const u64 effective_max = (std::numeric_limits<u64>::max() / range_size) * range_size; + while (true) { + if (const u64 rnd = f(); rnd < effective_max) { + return min + (rnd % range_size); + } + } +} + +u64 GenerateRandomRange(u64 min, u64 max) { + return GenerateUniformRange(min, max, GenerateRandomU64ForInit); +} + +} // namespace Kernel::Memory::SystemControl diff --git a/src/core/hle/kernel/memory/system_control.h b/src/core/hle/kernel/memory/system_control.h new file mode 100644 index 000000000..3fa93111d --- /dev/null +++ b/src/core/hle/kernel/memory/system_control.h @@ -0,0 +1,18 @@ +// Copyright 2020 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_types.h" + +namespace Kernel::Memory::SystemControl { + +u64 GenerateRandomU64ForInit(); + +template <typename F> +u64 GenerateUniformRange(u64 min, u64 max, F f); + +u64 GenerateRandomRange(u64 min, u64 max); + +} // namespace Kernel::Memory::SystemControl diff --git a/src/core/hle/kernel/physical_memory.h b/src/core/hle/kernel/physical_memory.h index b689e8e8b..7a0266780 100644 --- a/src/core/hle/kernel/physical_memory.h +++ b/src/core/hle/kernel/physical_memory.h @@ -4,6 +4,8 @@ #pragma once +#include <vector> + #include "common/alignment.h" namespace Kernel { diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index edc414d69..36724569f 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -10,15 +10,18 @@ #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/device_memory.h" #include "core/file_sys/program_metadata.h" #include "core/hle/kernel/code_set.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/memory/memory_block_manager.h" +#include "core/hle/kernel/memory/page_table.h" +#include "core/hle/kernel/memory/slab_heap.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/resource_limit.h" #include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" -#include "core/hle/kernel/vm_manager.h" #include "core/memory.h" #include "core/settings.h" @@ -31,10 +34,8 @@ namespace { * @param kernel The kernel instance to create the main thread under. * @param priority The priority to give the main thread */ -void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority) { - const auto& vm_manager = owner_process.VMManager(); - const VAddr entry_point = vm_manager.GetCodeRegionBaseAddress(); - const VAddr stack_top = vm_manager.GetTLSIORegionEndAddress(); +void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority, VAddr stack_top) { + const VAddr entry_point = owner_process.PageTable().GetCodeRegionStart(); auto thread_res = Thread::Create(kernel, "main", entry_point, priority, 0, owner_process.GetIdealCore(), stack_top, owner_process); @@ -42,6 +43,8 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority) { // Register 1 must be a handle to the main thread const Handle thread_handle = owner_process.GetHandleTable().Create(thread).Unwrap(); + thread->GetContext32().cpu_registers[0] = 0; + thread->GetContext64().cpu_registers[0] = 0; thread->GetContext32().cpu_registers[1] = thread_handle; thread->GetContext64().cpu_registers[1] = thread_handle; @@ -57,7 +60,8 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority) { // (whichever page happens to have an available slot). class TLSPage { public: - static constexpr std::size_t num_slot_entries = Memory::PAGE_SIZE / Memory::TLS_ENTRY_SIZE; + static constexpr std::size_t num_slot_entries = + Core::Memory::PAGE_SIZE / Core::Memory::TLS_ENTRY_SIZE; explicit TLSPage(VAddr address) : base_address{address} {} @@ -76,7 +80,7 @@ public: } is_slot_used[i] = true; - return base_address + (i * Memory::TLS_ENTRY_SIZE); + return base_address + (i * Core::Memory::TLS_ENTRY_SIZE); } return std::nullopt; @@ -86,15 +90,15 @@ public: // Ensure that all given addresses are consistent with how TLS pages // are intended to be used when releasing slots. ASSERT(IsWithinPage(address)); - ASSERT((address % Memory::TLS_ENTRY_SIZE) == 0); + ASSERT((address % Core::Memory::TLS_ENTRY_SIZE) == 0); - const std::size_t index = (address - base_address) / Memory::TLS_ENTRY_SIZE; + const std::size_t index = (address - base_address) / Core::Memory::TLS_ENTRY_SIZE; is_slot_used[index] = false; } private: bool IsWithinPage(VAddr address) const { - return base_address <= address && address < base_address + Memory::PAGE_SIZE; + return base_address <= address && address < base_address + Core::Memory::PAGE_SIZE; } VAddr base_address; @@ -106,7 +110,7 @@ std::shared_ptr<Process> Process::Create(Core::System& system, std::string name, std::shared_ptr<Process> process = std::make_shared<Process>(system); process->name = std::move(name); - process->resource_limit = kernel.GetSystemResourceLimit(); + process->resource_limit = ResourceLimit::Create(kernel); process->status = ProcessStatus::Created; process->program_id = 0; process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID() @@ -127,7 +131,14 @@ std::shared_ptr<ResourceLimit> Process::GetResourceLimit() const { } u64 Process::GetTotalPhysicalMemoryAvailable() const { - return vm_manager.GetTotalPhysicalMemoryAvailable(); + const u64 capacity{resource_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory) + + page_table->GetTotalHeapSize() + image_size + main_thread_stack_size}; + + if (capacity < memory_usage_capacity) { + return capacity; + } + + return memory_usage_capacity; } u64 Process::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const { @@ -135,8 +146,7 @@ u64 Process::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const { } u64 Process::GetTotalPhysicalMemoryUsed() const { - return vm_manager.GetCurrentHeapSize() + main_thread_stack_size + code_memory_size + - GetSystemResourceUsage(); + return image_size + main_thread_stack_size + page_table->GetTotalHeapSize(); } u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const { @@ -209,33 +219,82 @@ ResultCode Process::ClearSignalState() { return RESULT_SUCCESS; } -ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) { +ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, + std::size_t code_size) { program_id = metadata.GetTitleID(); ideal_core = metadata.GetMainThreadCore(); is_64bit_process = metadata.Is64BitProgram(); system_resource_size = metadata.GetSystemResourceSize(); + image_size = code_size; + + // Initialize proces address space + if (const ResultCode result{ + page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false, 0x8000000, + code_size, Memory::MemoryManager::Pool::Application)}; + result.IsError()) { + return result; + } - vm_manager.Reset(metadata.GetAddressSpaceType()); + // Map process code region + if (const ResultCode result{page_table->MapProcessCode( + page_table->GetCodeRegionStart(), code_size / Memory::PageSize, + Memory::MemoryState::Code, Memory::MemoryPermission::None)}; + result.IsError()) { + return result; + } - const auto& caps = metadata.GetKernelCapabilities(); - const auto capability_init_result = - capabilities.InitializeForUserProcess(caps.data(), caps.size(), vm_manager); - if (capability_init_result.IsError()) { - return capability_init_result; + // Initialize process capabilities + const auto& caps{metadata.GetKernelCapabilities()}; + if (const ResultCode result{ + capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)}; + result.IsError()) { + return result; } + // Set memory usage capacity + switch (metadata.GetAddressSpaceType()) { + case FileSys::ProgramAddressSpaceType::Is32Bit: + case FileSys::ProgramAddressSpaceType::Is36Bit: + case FileSys::ProgramAddressSpaceType::Is39Bit: + memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart(); + break; + + case FileSys::ProgramAddressSpaceType::Is32BitNoMap: + memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart() + + page_table->GetAliasRegionEnd() - page_table->GetAliasRegionStart(); + break; + + default: + UNREACHABLE(); + } + + // Set initial resource limits + resource_limit->SetLimitValue( + ResourceType::PhysicalMemory, + kernel.MemoryManager().GetSize(Memory::MemoryManager::Pool::Application)); + resource_limit->SetLimitValue(ResourceType::Threads, 608); + resource_limit->SetLimitValue(ResourceType::Events, 700); + resource_limit->SetLimitValue(ResourceType::TransferMemory, 128); + resource_limit->SetLimitValue(ResourceType::Sessions, 894); + ASSERT(resource_limit->Reserve(ResourceType::PhysicalMemory, code_size)); + + // Create TLS region + tls_region_address = CreateTLSRegion(); + return handle_table.SetSize(capabilities.GetHandleTableSize()); } void Process::Run(s32 main_thread_priority, u64 stack_size) { AllocateMainThreadStack(stack_size); - tls_region_address = CreateTLSRegion(); - vm_manager.LogLayout(); + const std::size_t heap_capacity{memory_usage_capacity - main_thread_stack_size - image_size}; + ASSERT(!page_table->SetHeapCapacity(heap_capacity).IsError()); ChangeStatus(ProcessStatus::Running); - SetupMainThread(*this, kernel, main_thread_priority); + SetupMainThread(*this, kernel, main_thread_priority, main_thread_stack_top); + resource_limit->Reserve(ResourceType::Threads, 1); + resource_limit->Reserve(ResourceType::PhysicalMemory, main_thread_stack_size); } void Process::PrepareForTermination() { @@ -279,32 +338,37 @@ static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) { } VAddr Process::CreateTLSRegion() { - auto tls_page_iter = FindTLSPageWithAvailableSlots(tls_pages); + if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)}; + tls_page_iter != tls_pages.cend()) { + return *tls_page_iter->ReserveSlot(); + } - if (tls_page_iter == tls_pages.cend()) { - const auto region_address = - vm_manager.FindFreeRegion(vm_manager.GetTLSIORegionBaseAddress(), - vm_manager.GetTLSIORegionEndAddress(), Memory::PAGE_SIZE); - ASSERT(region_address.Succeeded()); + Memory::Page* const tls_page_ptr{kernel.GetUserSlabHeapPages().Allocate()}; + ASSERT(tls_page_ptr); - const auto map_result = vm_manager.MapMemoryBlock( - *region_address, std::make_shared<PhysicalMemory>(Memory::PAGE_SIZE), 0, - Memory::PAGE_SIZE, MemoryState::ThreadLocal); - ASSERT(map_result.Succeeded()); + const VAddr start{page_table->GetKernelMapRegionStart()}; + const VAddr size{page_table->GetKernelMapRegionEnd() - start}; + const PAddr tls_map_addr{system.DeviceMemory().GetPhysicalAddr(tls_page_ptr)}; + const VAddr tls_page_addr{ + page_table + ->AllocateAndMapMemory(1, Memory::PageSize, true, start, size / Memory::PageSize, + Memory::MemoryState::ThreadLocal, + Memory::MemoryPermission::ReadAndWrite, tls_map_addr) + .ValueOr(0)}; - tls_pages.emplace_back(*region_address); + ASSERT(tls_page_addr); - const auto reserve_result = tls_pages.back().ReserveSlot(); - ASSERT(reserve_result.has_value()); + std::memset(tls_page_ptr, 0, Memory::PageSize); + tls_pages.emplace_back(tls_page_addr); - return *reserve_result; - } + const auto reserve_result{tls_pages.back().ReserveSlot()}; + ASSERT(reserve_result.has_value()); - return *tls_page_iter->ReserveSlot(); + return *reserve_result; } void Process::FreeTLSRegion(VAddr tls_address) { - const VAddr aligned_address = Common::AlignDown(tls_address, Memory::PAGE_SIZE); + const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE); auto iter = std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) { return page.GetBaseAddress() == aligned_address; @@ -317,28 +381,22 @@ void Process::FreeTLSRegion(VAddr tls_address) { iter->ReleaseSlot(tls_address); } -void Process::LoadModule(CodeSet module_, VAddr base_addr) { - code_memory_size += module_.memory.size(); - - const auto memory = std::make_shared<PhysicalMemory>(std::move(module_.memory)); - - const auto MapSegment = [&](const CodeSet::Segment& segment, VMAPermission permissions, - MemoryState memory_state) { - const auto vma = vm_manager - .MapMemoryBlock(segment.addr + base_addr, memory, segment.offset, - segment.size, memory_state) - .Unwrap(); - vm_manager.Reprotect(vma, permissions); +void Process::LoadModule(CodeSet code_set, VAddr base_addr) { + const auto ReprotectSegment = [&](const CodeSet::Segment& segment, + Memory::MemoryPermission permission) { + page_table->SetCodeMemoryPermission(segment.addr + base_addr, segment.size, permission); }; - // Map CodeSet segments - MapSegment(module_.CodeSegment(), VMAPermission::ReadExecute, MemoryState::Code); - MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeData); - MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeData); + system.Memory().WriteBlock(*this, base_addr, code_set.memory.data(), code_set.memory.size()); + + ReprotectSegment(code_set.CodeSegment(), Memory::MemoryPermission::ReadAndExecute); + ReprotectSegment(code_set.RODataSegment(), Memory::MemoryPermission::Read); + ReprotectSegment(code_set.DataSegment(), Memory::MemoryPermission::ReadAndWrite); } Process::Process(Core::System& system) - : SynchronizationObject{system.Kernel()}, vm_manager{system}, + : SynchronizationObject{system.Kernel()}, page_table{std::make_unique<Memory::PageTable>( + system)}, address_arbiter{system}, mutex{system}, system{system} {} Process::~Process() = default; @@ -361,16 +419,24 @@ void Process::ChangeStatus(ProcessStatus new_status) { Signal(); } -void Process::AllocateMainThreadStack(u64 stack_size) { +ResultCode Process::AllocateMainThreadStack(std::size_t stack_size) { + ASSERT(stack_size); + // The kernel always ensures that the given stack size is page aligned. - main_thread_stack_size = Common::AlignUp(stack_size, Memory::PAGE_SIZE); - - // Allocate and map the main thread stack - const VAddr mapping_address = vm_manager.GetTLSIORegionEndAddress() - main_thread_stack_size; - vm_manager - .MapMemoryBlock(mapping_address, std::make_shared<PhysicalMemory>(main_thread_stack_size), - 0, main_thread_stack_size, MemoryState::Stack) - .Unwrap(); + main_thread_stack_size = Common::AlignUp(stack_size, Memory::PageSize); + + const VAddr start{page_table->GetStackRegionStart()}; + const std::size_t size{page_table->GetStackRegionEnd() - start}; + + CASCADE_RESULT(main_thread_stack_top, + page_table->AllocateAndMapMemory( + main_thread_stack_size / Memory::PageSize, Memory::PageSize, false, start, + size / Memory::PageSize, Memory::MemoryState::Stack, + Memory::MemoryPermission::ReadAndWrite)); + + main_thread_stack_top += main_thread_stack_size; + + return RESULT_SUCCESS; } } // namespace Kernel diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index 4887132a7..9dabe3568 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h @@ -16,7 +16,6 @@ #include "core/hle/kernel/mutex.h" #include "core/hle/kernel/process_capability.h" #include "core/hle/kernel/synchronization_object.h" -#include "core/hle/kernel/vm_manager.h" #include "core/hle/result.h" namespace Core { @@ -36,6 +35,10 @@ class TLSPage; struct CodeSet; +namespace Memory { +class PageTable; +} + enum class MemoryRegion : u16 { APPLICATION = 1, SYSTEM = 2, @@ -100,14 +103,14 @@ public: return HANDLE_TYPE; } - /// Gets a reference to the process' memory manager. - Kernel::VMManager& VMManager() { - return vm_manager; + /// Gets a reference to the process' page table. + Memory::PageTable& PageTable() { + return *page_table; } - /// Gets a const reference to the process' memory manager. - const Kernel::VMManager& VMManager() const { - return vm_manager; + /// Gets const a reference to the process' page table. + const Memory::PageTable& PageTable() const { + return *page_table; } /// Gets a reference to the process' handle table. @@ -273,7 +276,7 @@ public: * @returns RESULT_SUCCESS if all relevant metadata was able to be * loaded and parsed. Otherwise, an error code is returned. */ - ResultCode LoadFromMetadata(const FileSys::ProgramMetadata& metadata); + ResultCode LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size); /** * Starts the main application thread for this process. @@ -289,7 +292,7 @@ public: */ void PrepareForTermination(); - void LoadModule(CodeSet module_, VAddr base_addr); + void LoadModule(CodeSet code_set, VAddr base_addr); /////////////////////////////////////////////////////////////////////////////////////////////// // Thread-local storage management @@ -313,16 +316,10 @@ private: void ChangeStatus(ProcessStatus new_status); /// Allocates the main thread stack for the process, given the stack size in bytes. - void AllocateMainThreadStack(u64 stack_size); - - /// Memory manager for this process. - Kernel::VMManager vm_manager; - - /// Size of the main thread's stack in bytes. - u64 main_thread_stack_size = 0; + ResultCode AllocateMainThreadStack(std::size_t stack_size); - /// Size of the loaded code memory in bytes. - u64 code_memory_size = 0; + /// Memory manager for this process + std::unique_ptr<Memory::PageTable> page_table; /// Current status of the process ProcessStatus status{}; @@ -390,6 +387,18 @@ private: /// Name of this process std::string name; + + /// Address of the top of the main thread's stack + VAddr main_thread_stack_top{}; + + /// Size of the main thread's stack + std::size_t main_thread_stack_size{}; + + /// Memory usage capacity for the process + std::size_t memory_usage_capacity{}; + + /// Process total image size + std::size_t image_size{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp index 583e35b79..48e5ae682 100644 --- a/src/core/hle/kernel/process_capability.cpp +++ b/src/core/hle/kernel/process_capability.cpp @@ -5,8 +5,8 @@ #include "common/bit_util.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process_capability.h" -#include "core/hle/kernel/vm_manager.h" namespace Kernel { namespace { @@ -66,7 +66,7 @@ u32 GetFlagBitOffset(CapabilityType type) { ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities, - VMManager& vm_manager) { + Memory::PageTable& page_table) { Clear(); // Allow all cores and priorities. @@ -74,15 +74,15 @@ ResultCode ProcessCapabilities::InitializeForKernelProcess(const u32* capabiliti priority_mask = 0xFFFFFFFFFFFFFFFF; kernel_version = PackedKernelVersion; - return ParseCapabilities(capabilities, num_capabilities, vm_manager); + return ParseCapabilities(capabilities, num_capabilities, page_table); } ResultCode ProcessCapabilities::InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities, - VMManager& vm_manager) { + Memory::PageTable& page_table) { Clear(); - return ParseCapabilities(capabilities, num_capabilities, vm_manager); + return ParseCapabilities(capabilities, num_capabilities, page_table); } void ProcessCapabilities::InitializeForMetadatalessProcess() { @@ -105,7 +105,7 @@ void ProcessCapabilities::InitializeForMetadatalessProcess() { ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities, std::size_t num_capabilities, - VMManager& vm_manager) { + Memory::PageTable& page_table) { u32 set_flags = 0; u32 set_svc_bits = 0; @@ -127,13 +127,13 @@ ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities, return ERR_INVALID_COMBINATION; } - const auto result = HandleMapPhysicalFlags(descriptor, size_flags, vm_manager); + const auto result = HandleMapPhysicalFlags(descriptor, size_flags, page_table); if (result.IsError()) { return result; } } else { const auto result = - ParseSingleFlagCapability(set_flags, set_svc_bits, descriptor, vm_manager); + ParseSingleFlagCapability(set_flags, set_svc_bits, descriptor, page_table); if (result.IsError()) { return result; } @@ -144,7 +144,7 @@ ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities, } ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, - u32 flag, VMManager& vm_manager) { + u32 flag, Memory::PageTable& page_table) { const auto type = GetCapabilityType(flag); if (type == CapabilityType::Unset) { @@ -172,7 +172,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s case CapabilityType::Syscall: return HandleSyscallFlags(set_svc_bits, flag); case CapabilityType::MapIO: - return HandleMapIOFlags(flag, vm_manager); + return HandleMapIOFlags(flag, page_table); case CapabilityType::Interrupt: return HandleInterruptFlags(flag); case CapabilityType::ProgramType: @@ -269,12 +269,12 @@ ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags) } ResultCode ProcessCapabilities::HandleMapPhysicalFlags(u32 flags, u32 size_flags, - VMManager& vm_manager) { + Memory::PageTable& page_table) { // TODO(Lioncache): Implement once the memory manager can handle this. return RESULT_SUCCESS; } -ResultCode ProcessCapabilities::HandleMapIOFlags(u32 flags, VMManager& vm_manager) { +ResultCode ProcessCapabilities::HandleMapIOFlags(u32 flags, Memory::PageTable& page_table) { // TODO(Lioncache): Implement once the memory manager can handle this. return RESULT_SUCCESS; } diff --git a/src/core/hle/kernel/process_capability.h b/src/core/hle/kernel/process_capability.h index 5cdd80747..ea9d12c16 100644 --- a/src/core/hle/kernel/process_capability.h +++ b/src/core/hle/kernel/process_capability.h @@ -12,7 +12,9 @@ union ResultCode; namespace Kernel { -class VMManager; +namespace Memory { +class PageTable; +} /// The possible types of programs that may be indicated /// by the program type capability descriptor. @@ -81,27 +83,27 @@ public: /// /// @param capabilities The capabilities to parse /// @param num_capabilities The number of capabilities to parse. - /// @param vm_manager The memory manager to use for handling any mapping-related + /// @param page_table The memory manager to use for handling any mapping-related /// operations (such as mapping IO memory, etc). /// /// @returns RESULT_SUCCESS if this capabilities instance was able to be initialized, /// otherwise, an error code upon failure. /// ResultCode InitializeForKernelProcess(const u32* capabilities, std::size_t num_capabilities, - VMManager& vm_manager); + Memory::PageTable& page_table); /// Initializes this process capabilities instance for a userland process. /// /// @param capabilities The capabilities to parse. /// @param num_capabilities The total number of capabilities to parse. - /// @param vm_manager The memory manager to use for handling any mapping-related + /// @param page_table The memory manager to use for handling any mapping-related /// operations (such as mapping IO memory, etc). /// /// @returns RESULT_SUCCESS if this capabilities instance was able to be initialized, /// otherwise, an error code upon failure. /// ResultCode InitializeForUserProcess(const u32* capabilities, std::size_t num_capabilities, - VMManager& vm_manager); + Memory::PageTable& page_table); /// Initializes this process capabilities instance for a process that does not /// have any metadata to parse. @@ -181,13 +183,13 @@ private: /// /// @param capabilities The sequence of capability descriptors to parse. /// @param num_capabilities The number of descriptors within the given sequence. - /// @param vm_manager The memory manager that will perform any memory + /// @param page_table The memory manager that will perform any memory /// mapping if necessary. /// /// @return RESULT_SUCCESS if no errors occur, otherwise an error code. /// ResultCode ParseCapabilities(const u32* capabilities, std::size_t num_capabilities, - VMManager& vm_manager); + Memory::PageTable& page_table); /// Attempts to parse a capability descriptor that is only represented by a /// single flag set. @@ -196,13 +198,13 @@ private: /// flags being initialized more than once when they shouldn't be. /// @param set_svc_bits Running set of bits representing the allowed supervisor calls mask. /// @param flag The flag to attempt to parse. - /// @param vm_manager The memory manager that will perform any memory + /// @param page_table The memory manager that will perform any memory /// mapping if necessary. /// /// @return RESULT_SUCCESS if no errors occurred, otherwise an error code. /// ResultCode ParseSingleFlagCapability(u32& set_flags, u32& set_svc_bits, u32 flag, - VMManager& vm_manager); + Memory::PageTable& page_table); /// Clears the internal state of this process capability instance. Necessary, /// to have a sane starting point due to us allowing running executables without @@ -226,10 +228,10 @@ private: ResultCode HandleSyscallFlags(u32& set_svc_bits, u32 flags); /// Handles flags related to mapping physical memory pages. - ResultCode HandleMapPhysicalFlags(u32 flags, u32 size_flags, VMManager& vm_manager); + ResultCode HandleMapPhysicalFlags(u32 flags, u32 size_flags, Memory::PageTable& page_table); /// Handles flags related to mapping IO pages. - ResultCode HandleMapIOFlags(u32 flags, VMManager& vm_manager); + ResultCode HandleMapIOFlags(u32 flags, Memory::PageTable& page_table); /// Handles flags related to the interrupt capability flags. ResultCode HandleInterruptFlags(u32 flags); diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp index b53423462..96e5b9892 100644 --- a/src/core/hle/kernel/resource_limit.cpp +++ b/src/core/hle/kernel/resource_limit.cpp @@ -16,26 +16,60 @@ constexpr std::size_t ResourceTypeToIndex(ResourceType type) { ResourceLimit::ResourceLimit(KernelCore& kernel) : Object{kernel} {} ResourceLimit::~ResourceLimit() = default; +bool ResourceLimit::Reserve(ResourceType resource, s64 amount) { + return Reserve(resource, amount, 10000000000); +} + +bool ResourceLimit::Reserve(ResourceType resource, s64 amount, u64 timeout) { + const std::size_t index{ResourceTypeToIndex(resource)}; + + s64 new_value = current[index] + amount; + while (new_value > limit[index] && available[index] + amount <= limit[index]) { + // TODO(bunnei): This is wrong for multicore, we should wait the calling thread for timeout + new_value = current[index] + amount; + + if (timeout >= 0) { + break; + } + } + + if (new_value <= limit[index]) { + current[index] = new_value; + return true; + } + return false; +} + +void ResourceLimit::Release(ResourceType resource, u64 amount) { + Release(resource, amount, amount); +} + +void ResourceLimit::Release(ResourceType resource, u64 used_amount, u64 available_amount) { + const std::size_t index{ResourceTypeToIndex(resource)}; + + current[index] -= used_amount; + available[index] -= available_amount; +} + std::shared_ptr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) { return std::make_shared<ResourceLimit>(kernel); } s64 ResourceLimit::GetCurrentResourceValue(ResourceType resource) const { - return values.at(ResourceTypeToIndex(resource)); + return limit.at(ResourceTypeToIndex(resource)) - current.at(ResourceTypeToIndex(resource)); } s64 ResourceLimit::GetMaxResourceValue(ResourceType resource) const { - return limits.at(ResourceTypeToIndex(resource)); + return limit.at(ResourceTypeToIndex(resource)); } ResultCode ResourceLimit::SetLimitValue(ResourceType resource, s64 value) { - const auto index = ResourceTypeToIndex(resource); - - if (value < values[index]) { + const std::size_t index{ResourceTypeToIndex(resource)}; + if (current[index] <= value) { + limit[index] = value; + return RESULT_SUCCESS; + } else { return ERR_INVALID_STATE; } - - values[index] = value; - return RESULT_SUCCESS; } } // namespace Kernel diff --git a/src/core/hle/kernel/resource_limit.h b/src/core/hle/kernel/resource_limit.h index 53b89e621..936cc4d0f 100644 --- a/src/core/hle/kernel/resource_limit.h +++ b/src/core/hle/kernel/resource_limit.h @@ -51,6 +51,11 @@ public: return HANDLE_TYPE; } + bool Reserve(ResourceType resource, s64 amount); + bool Reserve(ResourceType resource, s64 amount, u64 timeout); + void Release(ResourceType resource, u64 amount); + void Release(ResourceType resource, u64 used_amount, u64 available_amount); + /** * Gets the current value for the specified resource. * @param resource Requested resource type @@ -91,10 +96,9 @@ private: using ResourceArray = std::array<s64, static_cast<std::size_t>(ResourceType::ResourceTypeCount)>; - /// Maximum values a resource type may reach. - ResourceArray limits{}; - /// Current resource limit values. - ResourceArray values{}; + ResourceArray limit{}; + ResourceArray current{}; + ResourceArray available{}; }; } // namespace Kernel diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp index 4604e35c5..0f102ca44 100644 --- a/src/core/hle/kernel/server_session.cpp +++ b/src/core/hle/kernel/server_session.cpp @@ -134,7 +134,8 @@ ResultCode ServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& con return RESULT_SUCCESS; } -ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory) { +ResultCode ServerSession::QueueSyncRequest(std::shared_ptr<Thread> thread, + Core::Memory::Memory& memory) { u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(thread->GetTLSAddress()))}; std::shared_ptr<Kernel::HLERequestContext> context{ std::make_shared<Kernel::HLERequestContext>(SharedFrom(this), std::move(thread))}; @@ -178,7 +179,7 @@ ResultCode ServerSession::CompleteSyncRequest() { } ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread, - Memory::Memory& memory) { + Core::Memory::Memory& memory) { Core::System::GetInstance().CoreTiming().ScheduleEvent(20000, request_event, {}); return QueueSyncRequest(std::move(thread), memory); } diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h index 77e4f6721..403aaf10b 100644 --- a/src/core/hle/kernel/server_session.h +++ b/src/core/hle/kernel/server_session.h @@ -13,7 +13,7 @@ #include "core/hle/kernel/synchronization_object.h" #include "core/hle/result.h" -namespace Memory { +namespace Core::Memory { class Memory; } @@ -92,7 +92,7 @@ public: * * @returns ResultCode from the operation. */ - ResultCode HandleSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory); + ResultCode HandleSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory); bool ShouldWait(const Thread* thread) const override; @@ -126,7 +126,7 @@ public: private: /// Queues a sync request from the emulated application. - ResultCode QueueSyncRequest(std::shared_ptr<Thread> thread, Memory::Memory& memory); + ResultCode QueueSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory); /// Completes a sync request from the emulated application. ResultCode CompleteSyncRequest(); diff --git a/src/core/hle/kernel/shared_memory.cpp b/src/core/hle/kernel/shared_memory.cpp index afb2e3fc2..c67696757 100644 --- a/src/core/hle/kernel/shared_memory.cpp +++ b/src/core/hle/kernel/shared_memory.cpp @@ -2,149 +2,56 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -#include <utility> - #include "common/assert.h" -#include "common/logging/log.h" -#include "core/hle/kernel/errors.h" +#include "core/core.h" #include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/shared_memory.h" namespace Kernel { -SharedMemory::SharedMemory(KernelCore& kernel) : Object{kernel} {} -SharedMemory::~SharedMemory() = default; - -std::shared_ptr<SharedMemory> SharedMemory::Create(KernelCore& kernel, Process* owner_process, - u64 size, MemoryPermission permissions, - MemoryPermission other_permissions, - VAddr address, MemoryRegion region, - std::string name) { - std::shared_ptr<SharedMemory> shared_memory = std::make_shared<SharedMemory>(kernel); - - shared_memory->owner_process = owner_process; - shared_memory->name = std::move(name); - shared_memory->size = size; - shared_memory->permissions = permissions; - shared_memory->other_permissions = other_permissions; - - if (address == 0) { - shared_memory->backing_block = std::make_shared<Kernel::PhysicalMemory>(size); - shared_memory->backing_block_offset = 0; - - // Refresh the address mappings for the current process. - if (kernel.CurrentProcess() != nullptr) { - kernel.CurrentProcess()->VMManager().RefreshMemoryBlockMappings( - shared_memory->backing_block.get()); - } - } else { - const auto& vm_manager = shared_memory->owner_process->VMManager(); +SharedMemory::SharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory) + : Object{kernel}, device_memory{device_memory} {} - // The memory is already available and mapped in the owner process. - const auto vma = vm_manager.FindVMA(address); - ASSERT_MSG(vm_manager.IsValidHandle(vma), "Invalid memory address"); - ASSERT_MSG(vma->second.backing_block, "Backing block doesn't exist for address"); - - // The returned VMA might be a bigger one encompassing the desired address. - const auto vma_offset = address - vma->first; - ASSERT_MSG(vma_offset + size <= vma->second.size, - "Shared memory exceeds bounds of mapped block"); - - shared_memory->backing_block = vma->second.backing_block; - shared_memory->backing_block_offset = vma->second.offset + vma_offset; - } - - shared_memory->base_address = address; +SharedMemory::~SharedMemory() = default; - return shared_memory; -} +std::shared_ptr<SharedMemory> SharedMemory::Create( + KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process, + Memory::PageLinkedList&& page_list, Memory::MemoryPermission owner_permission, + Memory::MemoryPermission user_permission, PAddr physical_address, std::size_t size, + std::string name) { -std::shared_ptr<SharedMemory> SharedMemory::CreateForApplet( - KernelCore& kernel, std::shared_ptr<Kernel::PhysicalMemory> heap_block, std::size_t offset, - u64 size, MemoryPermission permissions, MemoryPermission other_permissions, std::string name) { - std::shared_ptr<SharedMemory> shared_memory = std::make_shared<SharedMemory>(kernel); + std::shared_ptr<SharedMemory> shared_memory{ + std::make_shared<SharedMemory>(kernel, device_memory)}; - shared_memory->owner_process = nullptr; - shared_memory->name = std::move(name); + shared_memory->owner_process = owner_process; + shared_memory->page_list = std::move(page_list); + shared_memory->owner_permission = owner_permission; + shared_memory->user_permission = user_permission; + shared_memory->physical_address = physical_address; shared_memory->size = size; - shared_memory->permissions = permissions; - shared_memory->other_permissions = other_permissions; - shared_memory->backing_block = std::move(heap_block); - shared_memory->backing_block_offset = offset; - shared_memory->base_address = - kernel.CurrentProcess()->VMManager().GetHeapRegionBaseAddress() + offset; + shared_memory->name = name; return shared_memory; } -ResultCode SharedMemory::Map(Process& target_process, VAddr address, MemoryPermission permissions, - MemoryPermission other_permissions) { - const MemoryPermission own_other_permissions = - &target_process == owner_process ? this->permissions : this->other_permissions; - - // Automatically allocated memory blocks can only be mapped with other_permissions = DontCare - if (base_address == 0 && other_permissions != MemoryPermission::DontCare) { - return ERR_INVALID_MEMORY_PERMISSIONS; - } - - // Error out if the requested permissions don't match what the creator process allows. - if (static_cast<u32>(permissions) & ~static_cast<u32>(own_other_permissions)) { - LOG_ERROR(Kernel, "cannot map id={}, address=0x{:X} name={}, permissions don't match", - GetObjectId(), address, name); - return ERR_INVALID_MEMORY_PERMISSIONS; - } +ResultCode SharedMemory::Map(Process& target_process, VAddr address, std::size_t size, + Memory::MemoryPermission permission) { + const u64 page_count{(size + Memory::PageSize - 1) / Memory::PageSize}; - // Error out if the provided permissions are not compatible with what the creator process needs. - if (other_permissions != MemoryPermission::DontCare && - static_cast<u32>(this->permissions) & ~static_cast<u32>(other_permissions)) { - LOG_ERROR(Kernel, "cannot map id={}, address=0x{:X} name={}, permissions don't match", - GetObjectId(), address, name); - return ERR_INVALID_MEMORY_PERMISSIONS; + if (page_list.GetNumPages() != page_count) { + UNIMPLEMENTED_MSG("Page count does not match"); } - VAddr target_address = address; + Memory::MemoryPermission expected = + &target_process == owner_process ? owner_permission : user_permission; - // Map the memory block into the target process - auto result = target_process.VMManager().MapMemoryBlock( - target_address, backing_block, backing_block_offset, size, MemoryState::Shared); - if (result.Failed()) { - LOG_ERROR( - Kernel, - "cannot map id={}, target_address=0x{:X} name={}, error mapping to virtual memory", - GetObjectId(), target_address, name); - return result.Code(); + if (permission != expected) { + UNIMPLEMENTED_MSG("Permission does not match"); } - return target_process.VMManager().ReprotectRange(target_address, size, - ConvertPermissions(permissions)); -} - -ResultCode SharedMemory::Unmap(Process& target_process, VAddr address, u64 unmap_size) { - if (unmap_size != size) { - LOG_ERROR(Kernel, - "Invalid size passed to Unmap. Size must be equal to the size of the " - "memory managed. Shared memory size=0x{:016X}, Unmap size=0x{:016X}", - size, unmap_size); - return ERR_INVALID_SIZE; - } - - // TODO(Subv): Verify what happens if the application tries to unmap an address that is not - // mapped to a SharedMemory. - return target_process.VMManager().UnmapRange(address, size); -} - -VMAPermission SharedMemory::ConvertPermissions(MemoryPermission permission) { - u32 masked_permissions = - static_cast<u32>(permission) & static_cast<u32>(MemoryPermission::ReadWriteExecute); - return static_cast<VMAPermission>(masked_permissions); -} - -u8* SharedMemory::GetPointer(std::size_t offset) { - return backing_block->data() + backing_block_offset + offset; -} - -const u8* SharedMemory::GetPointer(std::size_t offset) const { - return backing_block->data() + backing_block_offset + offset; + return target_process.PageTable().MapPages(address, page_list, Memory::MemoryState::Shared, + permission); } } // namespace Kernel diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h index 014951d82..cd16d6412 100644 --- a/src/core/hle/kernel/shared_memory.h +++ b/src/core/hle/kernel/shared_memory.h @@ -8,8 +8,10 @@ #include <string> #include "common/common_types.h" +#include "core/device_memory.h" +#include "core/hle/kernel/memory/memory_block.h" +#include "core/hle/kernel/memory/page_linked_list.h" #include "core/hle/kernel/object.h" -#include "core/hle/kernel/physical_memory.h" #include "core/hle/kernel/process.h" #include "core/hle/result.h" @@ -17,63 +19,21 @@ namespace Kernel { class KernelCore; -/// Permissions for mapped shared memory blocks -enum class MemoryPermission : u32 { - None = 0, - Read = (1u << 0), - Write = (1u << 1), - ReadWrite = (Read | Write), - Execute = (1u << 2), - ReadExecute = (Read | Execute), - WriteExecute = (Write | Execute), - ReadWriteExecute = (Read | Write | Execute), - DontCare = (1u << 28) -}; - class SharedMemory final : public Object { public: - explicit SharedMemory(KernelCore& kernel); + explicit SharedMemory(KernelCore& kernel, Core::DeviceMemory& device_memory); ~SharedMemory() override; - /** - * Creates a shared memory object. - * @param kernel The kernel instance to create a shared memory instance under. - * @param owner_process Process that created this shared memory object. - * @param size Size of the memory block. Must be page-aligned. - * @param permissions Permission restrictions applied to the process which created the block. - * @param other_permissions Permission restrictions applied to other processes mapping the - * block. - * @param address The address from which to map the Shared Memory. - * @param region If the address is 0, the shared memory will be allocated in this region of the - * linear heap. - * @param name Optional object name, used for debugging purposes. - */ - static std::shared_ptr<SharedMemory> Create(KernelCore& kernel, Process* owner_process, - u64 size, MemoryPermission permissions, - MemoryPermission other_permissions, - VAddr address = 0, - MemoryRegion region = MemoryRegion::BASE, - std::string name = "Unknown"); - - /** - * Creates a shared memory object from a block of memory managed by an HLE applet. - * @param kernel The kernel instance to create a shared memory instance under. - * @param heap_block Heap block of the HLE applet. - * @param offset The offset into the heap block that the SharedMemory will map. - * @param size Size of the memory block. Must be page-aligned. - * @param permissions Permission restrictions applied to the process which created the block. - * @param other_permissions Permission restrictions applied to other processes mapping the - * block. - * @param name Optional object name, used for debugging purposes. - */ - static std::shared_ptr<SharedMemory> CreateForApplet( - KernelCore& kernel, std::shared_ptr<Kernel::PhysicalMemory> heap_block, std::size_t offset, - u64 size, MemoryPermission permissions, MemoryPermission other_permissions, - std::string name = "Unknown Applet"); + static std::shared_ptr<SharedMemory> Create( + KernelCore& kernel, Core::DeviceMemory& device_memory, Process* owner_process, + Memory::PageLinkedList&& page_list, Memory::MemoryPermission owner_permission, + Memory::MemoryPermission user_permission, PAddr physical_address, std::size_t size, + std::string name); std::string GetTypeName() const override { return "SharedMemory"; } + std::string GetName() const override { return name; } @@ -83,71 +43,42 @@ public: return HANDLE_TYPE; } - /// Gets the size of the underlying memory block in bytes. - u64 GetSize() const { - return size; - } - - /** - * Converts the specified MemoryPermission into the equivalent VMAPermission. - * @param permission The MemoryPermission to convert. - */ - static VMAPermission ConvertPermissions(MemoryPermission permission); - /** * Maps a shared memory block to an address in the target process' address space - * @param target_process Process on which to map the memory block. + * @param target_process Process on which to map the memory block * @param address Address in system memory to map shared memory block to + * @param size Size of the shared memory block to map * @param permissions Memory block map permissions (specified by SVC field) - * @param other_permissions Memory block map other permissions (specified by SVC field) - */ - ResultCode Map(Process& target_process, VAddr address, MemoryPermission permissions, - MemoryPermission other_permissions); - - /** - * Unmaps a shared memory block from the specified address in system memory - * - * @param target_process Process from which to unmap the memory block. - * @param address Address in system memory where the shared memory block is mapped. - * @param unmap_size The amount of bytes to unmap from this shared memory instance. - * - * @return Result code of the unmap operation - * - * @pre The given size to unmap must be the same size as the amount of memory managed by - * the SharedMemory instance itself, otherwise ERR_INVALID_SIZE will be returned. */ - ResultCode Unmap(Process& target_process, VAddr address, u64 unmap_size); + ResultCode Map(Process& target_process, VAddr address, std::size_t size, + Memory::MemoryPermission permission); /** * Gets a pointer to the shared memory block * @param offset Offset from the start of the shared memory block to get pointer * @return A pointer to the shared memory block from the specified offset */ - u8* GetPointer(std::size_t offset = 0); + u8* GetPointer(std::size_t offset = 0) { + return device_memory.GetPointer(physical_address + offset); + } /** - * Gets a constant pointer to the shared memory block + * Gets a pointer to the shared memory block * @param offset Offset from the start of the shared memory block to get pointer - * @return A constant pointer to the shared memory block from the specified offset + * @return A pointer to the shared memory block from the specified offset */ - const u8* GetPointer(std::size_t offset = 0) const; + const u8* GetPointer(std::size_t offset = 0) const { + return device_memory.GetPointer(physical_address + offset); + } private: - /// Backing memory for this shared memory block. - std::shared_ptr<PhysicalMemory> backing_block; - /// Offset into the backing block for this shared memory. - std::size_t backing_block_offset = 0; - /// Size of the memory block. Page-aligned. - u64 size = 0; - /// Permission restrictions applied to the process which created the block. - MemoryPermission permissions{}; - /// Permission restrictions applied to other processes mapping the block. - MemoryPermission other_permissions{}; - /// Process that created this shared memory block. - Process* owner_process; - /// Address of shared memory block in the owner process if specified. - VAddr base_address = 0; - /// Name of shared memory object. + Core::DeviceMemory& device_memory; + Process* owner_process{}; + Memory::PageLinkedList page_list; + Memory::MemoryPermission owner_permission{}; + Memory::MemoryPermission user_permission{}; + PAddr physical_address{}; + std::size_t size{}; std::string name; }; diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 4ffc113c2..4134acf65 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -24,6 +24,8 @@ #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/memory/memory_block.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/mutex.h" #include "core/hle/kernel/process.h" #include "core/hle/kernel/readable_event.h" @@ -31,6 +33,7 @@ #include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/shared_memory.h" #include "core/hle/kernel/svc.h" +#include "core/hle/kernel/svc_types.h" #include "core/hle/kernel/svc_wrap.h" #include "core/hle/kernel/synchronization.h" #include "core/hle/kernel/thread.h" @@ -42,7 +45,7 @@ #include "core/memory.h" #include "core/reporter.h" -namespace Kernel { +namespace Kernel::Svc { namespace { // Checks if address + size is greater than the given address @@ -58,8 +61,8 @@ constexpr u64 MAIN_MEMORY_SIZE = 0x200000000; // Helper function that performs the common sanity checks for svcMapMemory // and svcUnmapMemory. This is doable, as both functions perform their sanitizing // in the same order. -ResultCode MapUnmapMemorySanityChecks(const VMManager& vm_manager, VAddr dst_addr, VAddr src_addr, - u64 size) { +ResultCode MapUnmapMemorySanityChecks(const Memory::PageTable& manager, VAddr dst_addr, + VAddr src_addr, u64 size) { if (!Common::Is4KBAligned(dst_addr)) { LOG_ERROR(Kernel_SVC, "Destination address is not aligned to 4KB, 0x{:016X}", dst_addr); return ERR_INVALID_ADDRESS; @@ -93,36 +96,33 @@ ResultCode MapUnmapMemorySanityChecks(const VMManager& vm_manager, VAddr dst_add return ERR_INVALID_ADDRESS_STATE; } - if (!vm_manager.IsWithinAddressSpace(src_addr, size)) { + if (!manager.IsInsideAddressSpace(src_addr, size)) { LOG_ERROR(Kernel_SVC, "Source is not within the address space, addr=0x{:016X}, size=0x{:016X}", src_addr, size); return ERR_INVALID_ADDRESS_STATE; } - if (!vm_manager.IsWithinStackRegion(dst_addr, size)) { + if (manager.IsOutsideStackRegion(dst_addr, size)) { LOG_ERROR(Kernel_SVC, "Destination is not within the stack region, addr=0x{:016X}, size=0x{:016X}", dst_addr, size); return ERR_INVALID_MEMORY_RANGE; } - const VAddr dst_end_address = dst_addr + size; - if (dst_end_address > vm_manager.GetHeapRegionBaseAddress() && - vm_manager.GetHeapRegionEndAddress() > dst_addr) { + if (manager.IsInsideHeapRegion(dst_addr, size)) { LOG_ERROR(Kernel_SVC, "Destination does not fit within the heap region, addr=0x{:016X}, " - "size=0x{:016X}, end_addr=0x{:016X}", - dst_addr, size, dst_end_address); + "size=0x{:016X}", + dst_addr, size); return ERR_INVALID_MEMORY_RANGE; } - if (dst_end_address > vm_manager.GetMapRegionBaseAddress() && - vm_manager.GetMapRegionEndAddress() > dst_addr) { + if (manager.IsInsideAliasRegion(dst_addr, size)) { LOG_ERROR(Kernel_SVC, "Destination does not fit within the map region, addr=0x{:016X}, " - "size=0x{:016X}, end_addr=0x{:016X}", - dst_addr, size, dst_end_address); + "size=0x{:016X}", + dst_addr, size); return ERR_INVALID_MEMORY_RANGE; } @@ -177,13 +177,10 @@ static ResultCode SetHeapSize(Core::System& system, VAddr* heap_addr, u64 heap_s return ERR_INVALID_SIZE; } - auto& vm_manager = system.Kernel().CurrentProcess()->VMManager(); - const auto alloc_result = vm_manager.SetHeapSize(heap_size); - if (alloc_result.Failed()) { - return alloc_result.Code(); - } + auto& page_table{system.Kernel().CurrentProcess()->PageTable()}; + + CASCADE_RESULT(*heap_addr, page_table.SetHeapSize(heap_size)); - *heap_addr = *alloc_result; return RESULT_SUCCESS; } @@ -194,63 +191,6 @@ static ResultCode SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_s return result; } -static ResultCode SetMemoryPermission(Core::System& system, VAddr addr, u64 size, u32 prot) { - LOG_TRACE(Kernel_SVC, "called, addr=0x{:X}, size=0x{:X}, prot=0x{:X}", addr, size, prot); - - if (!Common::Is4KBAligned(addr)) { - LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, addr=0x{:016X}", addr); - return ERR_INVALID_ADDRESS; - } - - if (size == 0) { - LOG_ERROR(Kernel_SVC, "Size is 0"); - return ERR_INVALID_SIZE; - } - - if (!Common::Is4KBAligned(size)) { - LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, size=0x{:016X}", size); - return ERR_INVALID_SIZE; - } - - if (!IsValidAddressRange(addr, size)) { - LOG_ERROR(Kernel_SVC, "Region is not a valid address range, addr=0x{:016X}, size=0x{:016X}", - addr, size); - return ERR_INVALID_ADDRESS_STATE; - } - - const auto permission = static_cast<MemoryPermission>(prot); - if (permission != MemoryPermission::None && permission != MemoryPermission::Read && - permission != MemoryPermission::ReadWrite) { - LOG_ERROR(Kernel_SVC, "Invalid memory permission specified, Got memory permission=0x{:08X}", - static_cast<u32>(permission)); - return ERR_INVALID_MEMORY_PERMISSIONS; - } - - auto* const current_process = system.Kernel().CurrentProcess(); - auto& vm_manager = current_process->VMManager(); - - if (!vm_manager.IsWithinAddressSpace(addr, size)) { - LOG_ERROR(Kernel_SVC, - "Source is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr, - size); - return ERR_INVALID_ADDRESS_STATE; - } - - const VMManager::VMAHandle iter = vm_manager.FindVMA(addr); - if (!vm_manager.IsValidHandle(iter)) { - LOG_ERROR(Kernel_SVC, "Unable to find VMA for address=0x{:016X}", addr); - return ERR_INVALID_ADDRESS_STATE; - } - - LOG_WARNING(Kernel_SVC, "Uniformity check on protected memory is not implemented."); - // TODO: Performs a uniformity check to make sure only protected memory is changed (it doesn't - // make sense to allow changing permissions on kernel memory itself, etc). - - const auto converted_permissions = SharedMemory::ConvertPermissions(permission); - - return vm_manager.ReprotectRange(addr, size, converted_permissions); -} - static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask, u32 attribute) { LOG_DEBUG(Kernel_SVC, @@ -274,30 +214,19 @@ static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 si return ERR_INVALID_ADDRESS_STATE; } - const auto mem_attribute = static_cast<MemoryAttribute>(attribute); - const auto mem_mask = static_cast<MemoryAttribute>(mask); - const auto attribute_with_mask = mem_attribute | mem_mask; - - if (attribute_with_mask != mem_mask) { + const auto attributes{static_cast<Memory::MemoryAttribute>(mask | attribute)}; + if (attributes != static_cast<Memory::MemoryAttribute>(mask) || + (attributes | Memory::MemoryAttribute::Uncached) != Memory::MemoryAttribute::Uncached) { LOG_ERROR(Kernel_SVC, "Memory attribute doesn't match the given mask (Attribute: 0x{:X}, Mask: {:X}", attribute, mask); return ERR_INVALID_COMBINATION; } - if ((attribute_with_mask | MemoryAttribute::Uncached) != MemoryAttribute::Uncached) { - LOG_ERROR(Kernel_SVC, "Specified attribute isn't equal to MemoryAttributeUncached (8)."); - return ERR_INVALID_COMBINATION; - } - - auto& vm_manager = system.Kernel().CurrentProcess()->VMManager(); - if (!vm_manager.IsWithinAddressSpace(address, size)) { - LOG_ERROR(Kernel_SVC, - "Given address (0x{:016X}) is outside the bounds of the address space.", address); - return ERR_INVALID_ADDRESS_STATE; - } + auto& page_table{system.Kernel().CurrentProcess()->PageTable()}; - return vm_manager.SetMemoryAttribute(address, size, mem_mask, mem_attribute); + return page_table.SetMemoryAttribute(address, size, static_cast<Memory::MemoryAttribute>(mask), + static_cast<Memory::MemoryAttribute>(attribute)); } /// Maps a memory range into a different range. @@ -305,14 +234,14 @@ static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, src_addr, size); - auto& vm_manager = system.Kernel().CurrentProcess()->VMManager(); - const auto result = MapUnmapMemorySanityChecks(vm_manager, dst_addr, src_addr, size); + auto& page_table{system.Kernel().CurrentProcess()->PageTable()}; - if (result.IsError()) { + if (const ResultCode result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)}; + result.IsError()) { return result; } - return vm_manager.MirrorMemory(dst_addr, src_addr, size, MemoryState::Stack); + return page_table.Map(dst_addr, src_addr, size); } /// Unmaps a region that was previously mapped with svcMapMemory @@ -320,21 +249,14 @@ static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_ad LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, src_addr, size); - auto& vm_manager = system.Kernel().CurrentProcess()->VMManager(); - const auto result = MapUnmapMemorySanityChecks(vm_manager, dst_addr, src_addr, size); + auto& page_table{system.Kernel().CurrentProcess()->PageTable()}; - if (result.IsError()) { + if (const ResultCode result{MapUnmapMemorySanityChecks(page_table, dst_addr, src_addr, size)}; + result.IsError()) { return result; } - const auto unmap_res = vm_manager.UnmapRange(dst_addr, size); - - // Reprotect the source mapping on success - if (unmap_res.IsSuccess()) { - ASSERT(vm_manager.ReprotectRange(src_addr, size, VMAPermission::ReadWrite).IsSuccess()); - } - - return unmap_res; + return page_table.Unmap(dst_addr, src_addr, size); } /// Connect to an OS service given the port name, returns the handle to the port to out @@ -367,6 +289,8 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out_handle, return ERR_NOT_FOUND; } + ASSERT(kernel.CurrentProcess()->GetResourceLimit()->Reserve(ResourceType::Sessions, 1)); + auto client_port = it->second; std::shared_ptr<ClientSession> client_session; @@ -538,7 +462,7 @@ static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_hand "requesting_current_thread_handle=0x{:08X}", holding_thread_handle, mutex_addr, requesting_thread_handle); - if (Memory::IsKernelVirtualAddress(mutex_addr)) { + if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) { LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}", mutex_addr); return ERR_INVALID_ADDRESS_STATE; @@ -558,7 +482,7 @@ static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_hand static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) { LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr); - if (Memory::IsKernelVirtualAddress(mutex_addr)) { + if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) { LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}", mutex_addr); return ERR_INVALID_ADDRESS_STATE; @@ -683,7 +607,6 @@ static void Break(Core::System& system, u32 reason, u64 info1, u64 info2) { auto* const current_thread = system.CurrentScheduler().GetCurrentThread(); const auto thread_processor_id = current_thread->GetProcessorID(); system.ArmInterface(static_cast<std::size_t>(thread_processor_id)).LogBacktrace(); - ASSERT(false); system.Kernel().CurrentProcess()->PrepareForTermination(); @@ -785,35 +708,35 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha return RESULT_SUCCESS; case GetInfoType::MapRegionBaseAddr: - *result = process->VMManager().GetMapRegionBaseAddress(); + *result = process->PageTable().GetAliasRegionStart(); return RESULT_SUCCESS; case GetInfoType::MapRegionSize: - *result = process->VMManager().GetMapRegionSize(); + *result = process->PageTable().GetAliasRegionSize(); return RESULT_SUCCESS; case GetInfoType::HeapRegionBaseAddr: - *result = process->VMManager().GetHeapRegionBaseAddress(); + *result = process->PageTable().GetHeapRegionStart(); return RESULT_SUCCESS; case GetInfoType::HeapRegionSize: - *result = process->VMManager().GetHeapRegionSize(); + *result = process->PageTable().GetHeapRegionSize(); return RESULT_SUCCESS; case GetInfoType::ASLRRegionBaseAddr: - *result = process->VMManager().GetASLRRegionBaseAddress(); + *result = process->PageTable().GetAliasCodeRegionStart(); return RESULT_SUCCESS; case GetInfoType::ASLRRegionSize: - *result = process->VMManager().GetASLRRegionSize(); + *result = process->PageTable().GetAliasCodeRegionSize(); return RESULT_SUCCESS; case GetInfoType::StackRegionBaseAddr: - *result = process->VMManager().GetStackRegionBaseAddress(); + *result = process->PageTable().GetStackRegionStart(); return RESULT_SUCCESS; case GetInfoType::StackRegionSize: - *result = process->VMManager().GetStackRegionSize(); + *result = process->PageTable().GetStackRegionSize(); return RESULT_SUCCESS; case GetInfoType::TotalPhysicalMemoryAvailable: @@ -987,20 +910,29 @@ static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) return ERR_INVALID_MEMORY_RANGE; } - Process* const current_process = system.Kernel().CurrentProcess(); - auto& vm_manager = current_process->VMManager(); + Process* const current_process{system.Kernel().CurrentProcess()}; + auto& page_table{current_process->PageTable()}; if (current_process->GetSystemResourceSize() == 0) { LOG_ERROR(Kernel_SVC, "System Resource Size is zero"); return ERR_INVALID_STATE; } - if (!vm_manager.IsWithinMapRegion(addr, size)) { - LOG_ERROR(Kernel_SVC, "Range not within map region"); + if (!page_table.IsInsideAddressSpace(addr, size)) { + LOG_ERROR(Kernel_SVC, + "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr, + size); + return ERR_INVALID_MEMORY_RANGE; + } + + if (page_table.IsOutsideAliasRegion(addr, size)) { + LOG_ERROR(Kernel_SVC, + "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr, + size); return ERR_INVALID_MEMORY_RANGE; } - return vm_manager.MapPhysicalMemory(addr, size); + return page_table.MapPhysicalMemory(addr, size); } /// Unmaps memory previously mapped via MapPhysicalMemory @@ -1027,20 +959,29 @@ static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size return ERR_INVALID_MEMORY_RANGE; } - Process* const current_process = system.Kernel().CurrentProcess(); - auto& vm_manager = current_process->VMManager(); + Process* const current_process{system.Kernel().CurrentProcess()}; + auto& page_table{current_process->PageTable()}; if (current_process->GetSystemResourceSize() == 0) { LOG_ERROR(Kernel_SVC, "System Resource Size is zero"); return ERR_INVALID_STATE; } - if (!vm_manager.IsWithinMapRegion(addr, size)) { - LOG_ERROR(Kernel_SVC, "Range not within map region"); + if (!page_table.IsInsideAddressSpace(addr, size)) { + LOG_ERROR(Kernel_SVC, + "Address is not within the address space, addr=0x{:016X}, size=0x{:016X}", addr, + size); + return ERR_INVALID_MEMORY_RANGE; + } + + if (page_table.IsOutsideAliasRegion(addr, size)) { + LOG_ERROR(Kernel_SVC, + "Address is not within the alias region, addr=0x{:016X}, size=0x{:016X}", addr, + size); return ERR_INVALID_MEMORY_RANGE; } - return vm_manager.UnmapPhysicalMemory(addr, size); + return page_table.UnmapPhysicalMemory(addr, size); } /// Sets the thread activity @@ -1197,74 +1138,49 @@ static ResultCode MapSharedMemory(Core::System& system, Handle shared_memory_han return ERR_INVALID_ADDRESS_STATE; } - const auto permissions_type = static_cast<MemoryPermission>(permissions); - if (permissions_type != MemoryPermission::Read && - permissions_type != MemoryPermission::ReadWrite) { + const auto permission_type = static_cast<Memory::MemoryPermission>(permissions); + if ((permission_type | Memory::MemoryPermission::Write) != + Memory::MemoryPermission::ReadAndWrite) { LOG_ERROR(Kernel_SVC, "Expected Read or ReadWrite permission but got permissions=0x{:08X}", permissions); return ERR_INVALID_MEMORY_PERMISSIONS; } - auto* const current_process = system.Kernel().CurrentProcess(); - auto shared_memory = current_process->GetHandleTable().Get<SharedMemory>(shared_memory_handle); - if (!shared_memory) { - LOG_ERROR(Kernel_SVC, "Shared memory does not exist, shared_memory_handle=0x{:08X}", - shared_memory_handle); - return ERR_INVALID_HANDLE; - } + auto* const current_process{system.Kernel().CurrentProcess()}; + auto& page_table{current_process->PageTable()}; - const auto& vm_manager = current_process->VMManager(); - if (!vm_manager.IsWithinASLRRegion(addr, size)) { - LOG_ERROR(Kernel_SVC, "Region is not within the ASLR region. addr=0x{:016X}, size={:016X}", + if (page_table.IsInvalidRegion(addr, size)) { + LOG_ERROR(Kernel_SVC, + "Addr does not fit within the valid region, addr=0x{:016X}, " + "size=0x{:016X}", addr, size); return ERR_INVALID_MEMORY_RANGE; } - return shared_memory->Map(*current_process, addr, permissions_type, MemoryPermission::DontCare); -} - -static ResultCode UnmapSharedMemory(Core::System& system, Handle shared_memory_handle, VAddr addr, - u64 size) { - LOG_WARNING(Kernel_SVC, "called, shared_memory_handle=0x{:08X}, addr=0x{:X}, size=0x{:X}", - shared_memory_handle, addr, size); - - if (!Common::Is4KBAligned(addr)) { - LOG_ERROR(Kernel_SVC, "Address is not aligned to 4KB, addr=0x{:016X}", addr); - return ERR_INVALID_ADDRESS; - } - - if (size == 0) { - LOG_ERROR(Kernel_SVC, "Size is 0"); - return ERR_INVALID_SIZE; - } - - if (!Common::Is4KBAligned(size)) { - LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, size=0x{:016X}", size); - return ERR_INVALID_SIZE; + if (page_table.IsInsideHeapRegion(addr, size)) { + LOG_ERROR(Kernel_SVC, + "Addr does not fit within the heap region, addr=0x{:016X}, " + "size=0x{:016X}", + addr, size); + return ERR_INVALID_MEMORY_RANGE; } - if (!IsValidAddressRange(addr, size)) { - LOG_ERROR(Kernel_SVC, "Region is not a valid address range, addr=0x{:016X}, size=0x{:016X}", + if (page_table.IsInsideAliasRegion(addr, size)) { + LOG_ERROR(Kernel_SVC, + "Address does not fit within the map region, addr=0x{:016X}, " + "size=0x{:016X}", addr, size); - return ERR_INVALID_ADDRESS_STATE; + return ERR_INVALID_MEMORY_RANGE; } - auto* const current_process = system.Kernel().CurrentProcess(); - auto shared_memory = current_process->GetHandleTable().Get<SharedMemory>(shared_memory_handle); + auto shared_memory{current_process->GetHandleTable().Get<SharedMemory>(shared_memory_handle)}; if (!shared_memory) { LOG_ERROR(Kernel_SVC, "Shared memory does not exist, shared_memory_handle=0x{:08X}", shared_memory_handle); return ERR_INVALID_HANDLE; } - const auto& vm_manager = current_process->VMManager(); - if (!vm_manager.IsWithinASLRRegion(addr, size)) { - LOG_ERROR(Kernel_SVC, "Region is not within the ASLR region. addr=0x{:016X}, size={:016X}", - addr, size); - return ERR_INVALID_MEMORY_RANGE; - } - - return shared_memory->Unmap(*current_process, addr, size); + return shared_memory->Map(*current_process, addr, size, permission_type); } static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address, @@ -1279,18 +1195,17 @@ static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_add return ERR_INVALID_HANDLE; } - auto& memory = system.Memory(); - const auto& vm_manager = process->VMManager(); - const MemoryInfo memory_info = vm_manager.QueryMemory(address); - - memory.Write64(memory_info_address, memory_info.base_address); - memory.Write64(memory_info_address + 8, memory_info.size); - memory.Write32(memory_info_address + 16, memory_info.state); - memory.Write32(memory_info_address + 20, memory_info.attributes); - memory.Write32(memory_info_address + 24, memory_info.permission); - memory.Write32(memory_info_address + 32, memory_info.ipc_ref_count); - memory.Write32(memory_info_address + 28, memory_info.device_ref_count); - memory.Write32(memory_info_address + 36, 0); + auto& memory{system.Memory()}; + const auto memory_info{process->PageTable().QueryInfo(address).GetSvcMemoryInfo()}; + + memory.Write64(memory_info_address + 0x00, memory_info.addr); + memory.Write64(memory_info_address + 0x08, memory_info.size); + memory.Write32(memory_info_address + 0x10, static_cast<u32>(memory_info.state) & 0xff); + memory.Write32(memory_info_address + 0x14, static_cast<u32>(memory_info.attr)); + memory.Write32(memory_info_address + 0x18, static_cast<u32>(memory_info.perm)); + memory.Write32(memory_info_address + 0x1c, memory_info.ipc_refcount); + memory.Write32(memory_info_address + 0x20, memory_info.device_refcount); + memory.Write32(memory_info_address + 0x24, 0); // Page info appears to be currently unused by the kernel and is always set to zero. memory.Write32(page_info_address, 0); @@ -1314,142 +1229,6 @@ static ResultCode QueryMemory32(Core::System& system, u32 memory_info_address, return QueryMemory(system, memory_info_address, page_info_address, query_address); } -static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address, - u64 src_address, u64 size) { - LOG_DEBUG(Kernel_SVC, - "called. process_handle=0x{:08X}, dst_address=0x{:016X}, " - "src_address=0x{:016X}, size=0x{:016X}", - process_handle, dst_address, src_address, size); - - if (!Common::Is4KBAligned(src_address)) { - LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).", - src_address); - return ERR_INVALID_ADDRESS; - } - - if (!Common::Is4KBAligned(dst_address)) { - LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).", - dst_address); - return ERR_INVALID_ADDRESS; - } - - if (size == 0 || !Common::Is4KBAligned(size)) { - LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X})", size); - return ERR_INVALID_SIZE; - } - - if (!IsValidAddressRange(dst_address, size)) { - LOG_ERROR(Kernel_SVC, - "Destination address range overflows the address space (dst_address=0x{:016X}, " - "size=0x{:016X}).", - dst_address, size); - return ERR_INVALID_ADDRESS_STATE; - } - - if (!IsValidAddressRange(src_address, size)) { - LOG_ERROR(Kernel_SVC, - "Source address range overflows the address space (src_address=0x{:016X}, " - "size=0x{:016X}).", - src_address, size); - return ERR_INVALID_ADDRESS_STATE; - } - - const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - auto process = handle_table.Get<Process>(process_handle); - if (!process) { - LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).", - process_handle); - return ERR_INVALID_HANDLE; - } - - auto& vm_manager = process->VMManager(); - if (!vm_manager.IsWithinAddressSpace(src_address, size)) { - LOG_ERROR(Kernel_SVC, - "Source address range is not within the address space (src_address=0x{:016X}, " - "size=0x{:016X}).", - src_address, size); - return ERR_INVALID_ADDRESS_STATE; - } - - if (!vm_manager.IsWithinASLRRegion(dst_address, size)) { - LOG_ERROR(Kernel_SVC, - "Destination address range is not within the ASLR region (dst_address=0x{:016X}, " - "size=0x{:016X}).", - dst_address, size); - return ERR_INVALID_MEMORY_RANGE; - } - - return vm_manager.MapCodeMemory(dst_address, src_address, size); -} - -static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_handle, - u64 dst_address, u64 src_address, u64 size) { - LOG_DEBUG(Kernel_SVC, - "called. process_handle=0x{:08X}, dst_address=0x{:016X}, src_address=0x{:016X}, " - "size=0x{:016X}", - process_handle, dst_address, src_address, size); - - if (!Common::Is4KBAligned(dst_address)) { - LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).", - dst_address); - return ERR_INVALID_ADDRESS; - } - - if (!Common::Is4KBAligned(src_address)) { - LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).", - src_address); - return ERR_INVALID_ADDRESS; - } - - if (size == 0 || Common::Is4KBAligned(size)) { - LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size); - return ERR_INVALID_SIZE; - } - - if (!IsValidAddressRange(dst_address, size)) { - LOG_ERROR(Kernel_SVC, - "Destination address range overflows the address space (dst_address=0x{:016X}, " - "size=0x{:016X}).", - dst_address, size); - return ERR_INVALID_ADDRESS_STATE; - } - - if (!IsValidAddressRange(src_address, size)) { - LOG_ERROR(Kernel_SVC, - "Source address range overflows the address space (src_address=0x{:016X}, " - "size=0x{:016X}).", - src_address, size); - return ERR_INVALID_ADDRESS_STATE; - } - - const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); - auto process = handle_table.Get<Process>(process_handle); - if (!process) { - LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).", - process_handle); - return ERR_INVALID_HANDLE; - } - - auto& vm_manager = process->VMManager(); - if (!vm_manager.IsWithinAddressSpace(src_address, size)) { - LOG_ERROR(Kernel_SVC, - "Source address range is not within the address space (src_address=0x{:016X}, " - "size=0x{:016X}).", - src_address, size); - return ERR_INVALID_ADDRESS_STATE; - } - - if (!vm_manager.IsWithinASLRRegion(dst_address, size)) { - LOG_ERROR(Kernel_SVC, - "Destination address range is not within the ASLR region (dst_address=0x{:016X}, " - "size=0x{:016X}).", - dst_address, size); - return ERR_INVALID_MEMORY_RANGE; - } - - return vm_manager.UnmapCodeMemory(dst_address, src_address, size); -} - /// Exits the current process static void ExitProcess(Core::System& system) { auto* current_process = system.Kernel().CurrentProcess(); @@ -1506,6 +1285,9 @@ static ResultCode CreateThread(Core::System& system, Handle* out_handle, VAddr e } auto& kernel = system.Kernel(); + + ASSERT(kernel.CurrentProcess()->GetResourceLimit()->Reserve(ResourceType::Threads, 1)); + CASCADE_RESULT(std::shared_ptr<Thread> thread, Thread::Create(kernel, "", entry_point, priority, arg, processor_id, stack_top, *current_process)); @@ -1610,7 +1392,7 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add "called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}", mutex_addr, condition_variable_addr, thread_handle, nano_seconds); - if (Memory::IsKernelVirtualAddress(mutex_addr)) { + if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) { LOG_ERROR( Kernel_SVC, "Given mutex address must not be within the kernel address space. address=0x{:016X}", @@ -1741,7 +1523,7 @@ static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type, type, value, timeout); // If the passed address is a kernel virtual address, return invalid memory state. - if (Memory::IsKernelVirtualAddress(address)) { + if (Core::Memory::IsKernelVirtualAddress(address)) { LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address); return ERR_INVALID_ADDRESS_STATE; } @@ -1769,7 +1551,7 @@ static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, address, type, value, num_to_wake); // If the passed address is a kernel virtual address, return invalid memory state. - if (Memory::IsKernelVirtualAddress(address)) { + if (Core::Memory::IsKernelVirtualAddress(address)) { LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address); return ERR_INVALID_ADDRESS_STATE; } @@ -1865,9 +1647,9 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd return ERR_INVALID_ADDRESS_STATE; } - const auto perms = static_cast<MemoryPermission>(permissions); - if (perms != MemoryPermission::None && perms != MemoryPermission::Read && - perms != MemoryPermission::ReadWrite) { + const auto perms{static_cast<Memory::MemoryPermission>(permissions)}; + if (perms > Memory::MemoryPermission::ReadAndWrite || + perms == Memory::MemoryPermission::Write) { LOG_ERROR(Kernel_SVC, "Invalid memory permissions for transfer memory! (perms={:08X})", permissions); return ERR_INVALID_MEMORY_PERMISSIONS; @@ -1890,111 +1672,6 @@ static ResultCode CreateTransferMemory(Core::System& system, Handle* handle, VAd return RESULT_SUCCESS; } -static ResultCode MapTransferMemory(Core::System& system, Handle handle, VAddr address, u64 size, - u32 permission_raw) { - LOG_DEBUG(Kernel_SVC, - "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}, permissions=0x{:08X}", - handle, address, size, permission_raw); - - if (!Common::Is4KBAligned(address)) { - LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).", - address); - return ERR_INVALID_ADDRESS; - } - - if (size == 0 || !Common::Is4KBAligned(size)) { - LOG_ERROR(Kernel_SVC, - "Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).", - size); - return ERR_INVALID_SIZE; - } - - if (!IsValidAddressRange(address, size)) { - LOG_ERROR(Kernel_SVC, - "Given address and size overflows the 64-bit range (address=0x{:016X}, " - "size=0x{:016X}).", - address, size); - return ERR_INVALID_ADDRESS_STATE; - } - - const auto permissions = static_cast<MemoryPermission>(permission_raw); - if (permissions != MemoryPermission::None && permissions != MemoryPermission::Read && - permissions != MemoryPermission::ReadWrite) { - LOG_ERROR(Kernel_SVC, "Invalid transfer memory permissions given (permissions=0x{:08X}).", - permission_raw); - return ERR_INVALID_STATE; - } - - const auto& kernel = system.Kernel(); - const auto* const current_process = kernel.CurrentProcess(); - const auto& handle_table = current_process->GetHandleTable(); - - auto transfer_memory = handle_table.Get<TransferMemory>(handle); - if (!transfer_memory) { - LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).", - handle); - return ERR_INVALID_HANDLE; - } - - if (!current_process->VMManager().IsWithinASLRRegion(address, size)) { - LOG_ERROR(Kernel_SVC, - "Given address and size don't fully fit within the ASLR region " - "(address=0x{:016X}, size=0x{:016X}).", - address, size); - return ERR_INVALID_MEMORY_RANGE; - } - - return transfer_memory->MapMemory(address, size, permissions); -} - -static ResultCode UnmapTransferMemory(Core::System& system, Handle handle, VAddr address, - u64 size) { - LOG_DEBUG(Kernel_SVC, "called. handle=0x{:08X}, address=0x{:016X}, size=0x{:016X}", handle, - address, size); - - if (!Common::Is4KBAligned(address)) { - LOG_ERROR(Kernel_SVC, "Transfer memory addresses must be 4KB aligned (size=0x{:016X}).", - address); - return ERR_INVALID_ADDRESS; - } - - if (size == 0 || !Common::Is4KBAligned(size)) { - LOG_ERROR(Kernel_SVC, - "Transfer memory sizes must be 4KB aligned and not be zero (size=0x{:016X}).", - size); - return ERR_INVALID_SIZE; - } - - if (!IsValidAddressRange(address, size)) { - LOG_ERROR(Kernel_SVC, - "Given address and size overflows the 64-bit range (address=0x{:016X}, " - "size=0x{:016X}).", - address, size); - return ERR_INVALID_ADDRESS_STATE; - } - - const auto& kernel = system.Kernel(); - const auto* const current_process = kernel.CurrentProcess(); - const auto& handle_table = current_process->GetHandleTable(); - - auto transfer_memory = handle_table.Get<TransferMemory>(handle); - if (!transfer_memory) { - LOG_ERROR(Kernel_SVC, "Nonexistent transfer memory handle given (handle=0x{:08X}).", - handle); - return ERR_INVALID_HANDLE; - } - - if (!current_process->VMManager().IsWithinASLRRegion(address, size)) { - LOG_ERROR(Kernel_SVC, - "Given address and size don't fully fit within the ASLR region " - "(address=0x{:016X}, size=0x{:016X}).", - address, size); - return ERR_INVALID_MEMORY_RANGE; - } - - return transfer_memory->UnmapMemory(address, size); -} - static ResultCode GetThreadCoreMask(Core::System& system, Handle thread_handle, u32* core, u64* mask) { LOG_TRACE(Kernel_SVC, "called, handle=0x{:08X}", thread_handle); @@ -2073,52 +1750,6 @@ static ResultCode SetThreadCoreMask(Core::System& system, Handle thread_handle, return RESULT_SUCCESS; } -static ResultCode CreateSharedMemory(Core::System& system, Handle* handle, u64 size, - u32 local_permissions, u32 remote_permissions) { - LOG_TRACE(Kernel_SVC, "called, size=0x{:X}, localPerms=0x{:08X}, remotePerms=0x{:08X}", size, - local_permissions, remote_permissions); - if (size == 0) { - LOG_ERROR(Kernel_SVC, "Size is 0"); - return ERR_INVALID_SIZE; - } - if (!Common::Is4KBAligned(size)) { - LOG_ERROR(Kernel_SVC, "Size is not aligned to 4KB, 0x{:016X}", size); - return ERR_INVALID_SIZE; - } - - if (size >= MAIN_MEMORY_SIZE) { - LOG_ERROR(Kernel_SVC, "Size is not less than 8GB, 0x{:016X}", size); - return ERR_INVALID_SIZE; - } - - const auto local_perms = static_cast<MemoryPermission>(local_permissions); - if (local_perms != MemoryPermission::Read && local_perms != MemoryPermission::ReadWrite) { - LOG_ERROR(Kernel_SVC, - "Invalid local memory permissions, expected Read or ReadWrite but got " - "local_permissions={}", - static_cast<u32>(local_permissions)); - return ERR_INVALID_MEMORY_PERMISSIONS; - } - - const auto remote_perms = static_cast<MemoryPermission>(remote_permissions); - if (remote_perms != MemoryPermission::Read && remote_perms != MemoryPermission::ReadWrite && - remote_perms != MemoryPermission::DontCare) { - LOG_ERROR(Kernel_SVC, - "Invalid remote memory permissions, expected Read, ReadWrite or DontCare but got " - "remote_permissions={}", - static_cast<u32>(remote_permissions)); - return ERR_INVALID_MEMORY_PERMISSIONS; - } - - auto& kernel = system.Kernel(); - auto process = kernel.CurrentProcess(); - auto& handle_table = process->GetHandleTable(); - auto shared_mem_handle = SharedMemory::Create(kernel, process, size, local_perms, remote_perms); - - CASCADE_RESULT(*handle, handle_table.Create(shared_mem_handle)); - return RESULT_SUCCESS; -} - static ResultCode CreateEvent(Core::System& system, Handle* write_handle, Handle* read_handle) { LOG_DEBUG(Kernel_SVC, "called"); @@ -2305,11 +1936,10 @@ static ResultCode GetProcessList(Core::System& system, u32* out_num_processes, } const auto& kernel = system.Kernel(); - const auto& vm_manager = kernel.CurrentProcess()->VMManager(); const auto total_copy_size = out_process_ids_size * sizeof(u64); - if (out_process_ids_size > 0 && - !vm_manager.IsWithinAddressSpace(out_process_ids, total_copy_size)) { + if (out_process_ids_size > 0 && !kernel.CurrentProcess()->PageTable().IsInsideAddressSpace( + out_process_ids, total_copy_size)) { LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}", out_process_ids, out_process_ids + total_copy_size); return ERR_INVALID_ADDRESS_STATE; @@ -2345,11 +1975,10 @@ static ResultCode GetThreadList(Core::System& system, u32* out_num_threads, VAdd } const auto* const current_process = system.Kernel().CurrentProcess(); - const auto& vm_manager = current_process->VMManager(); const auto total_copy_size = out_thread_ids_size * sizeof(u64); if (out_thread_ids_size > 0 && - !vm_manager.IsWithinAddressSpace(out_thread_ids, total_copy_size)) { + !current_process->PageTable().IsInsideAddressSpace(out_thread_ids, total_copy_size)) { LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}", out_thread_ids, out_thread_ids + total_copy_size); return ERR_INVALID_ADDRESS_STATE; @@ -2510,7 +2139,7 @@ static const FunctionDef SVC_Table_32[] = { static const FunctionDef SVC_Table_64[] = { {0x00, nullptr, "Unknown"}, {0x01, SvcWrap64<SetHeapSize>, "SetHeapSize"}, - {0x02, SvcWrap64<SetMemoryPermission>, "SetMemoryPermission"}, + {0x02, nullptr, "SetMemoryPermission"}, {0x03, SvcWrap64<SetMemoryAttribute>, "SetMemoryAttribute"}, {0x04, SvcWrap64<MapMemory>, "MapMemory"}, {0x05, SvcWrap64<UnmapMemory>, "UnmapMemory"}, @@ -2528,7 +2157,7 @@ static const FunctionDef SVC_Table_64[] = { {0x11, SvcWrap64<SignalEvent>, "SignalEvent"}, {0x12, SvcWrap64<ClearEvent>, "ClearEvent"}, {0x13, SvcWrap64<MapSharedMemory>, "MapSharedMemory"}, - {0x14, SvcWrap64<UnmapSharedMemory>, "UnmapSharedMemory"}, + {0x14, nullptr, "UnmapSharedMemory"}, {0x15, SvcWrap64<CreateTransferMemory>, "CreateTransferMemory"}, {0x16, SvcWrap64<CloseHandle>, "CloseHandle"}, {0x17, SvcWrap64<ResetSignal>, "ResetSignal"}, @@ -2588,9 +2217,9 @@ static const FunctionDef SVC_Table_64[] = { {0x4D, nullptr, "SleepSystem"}, {0x4E, nullptr, "ReadWriteRegister"}, {0x4F, nullptr, "SetProcessActivity"}, - {0x50, SvcWrap64<CreateSharedMemory>, "CreateSharedMemory"}, - {0x51, SvcWrap64<MapTransferMemory>, "MapTransferMemory"}, - {0x52, SvcWrap64<UnmapTransferMemory>, "UnmapTransferMemory"}, + {0x50, nullptr, "CreateSharedMemory"}, + {0x51, nullptr, "MapTransferMemory"}, + {0x52, nullptr, "UnmapTransferMemory"}, {0x53, nullptr, "CreateInterruptEvent"}, {0x54, nullptr, "QueryPhysicalAddress"}, {0x55, nullptr, "QueryIoMapping"}, @@ -2627,8 +2256,8 @@ static const FunctionDef SVC_Table_64[] = { {0x74, nullptr, "MapProcessMemory"}, {0x75, nullptr, "UnmapProcessMemory"}, {0x76, SvcWrap64<QueryProcessMemory>, "QueryProcessMemory"}, - {0x77, SvcWrap64<MapProcessCodeMemory>, "MapProcessCodeMemory"}, - {0x78, SvcWrap64<UnmapProcessCodeMemory>, "UnmapProcessCodeMemory"}, + {0x77, nullptr, "MapProcessCodeMemory"}, + {0x78, nullptr, "UnmapProcessCodeMemory"}, {0x79, nullptr, "CreateProcess"}, {0x7A, nullptr, "StartProcess"}, {0x7B, nullptr, "TerminateProcess"}, @@ -2656,7 +2285,7 @@ static const FunctionDef* GetSVCInfo64(u32 func_num) { MICROPROFILE_DEFINE(Kernel_SVC, "Kernel", "SVC", MP_RGB(70, 200, 70)); -void CallSVC(Core::System& system, u32 immediate) { +void Call(Core::System& system, u32 immediate) { MICROPROFILE_SCOPE(Kernel_SVC); // Lock the global kernel mutex when we enter the kernel HLE. @@ -2675,4 +2304,4 @@ void CallSVC(Core::System& system, u32 immediate) { } } -} // namespace Kernel +} // namespace Kernel::Svc diff --git a/src/core/hle/kernel/svc.h b/src/core/hle/kernel/svc.h index c5539ac1c..46e64277e 100644 --- a/src/core/hle/kernel/svc.h +++ b/src/core/hle/kernel/svc.h @@ -10,8 +10,8 @@ namespace Core { class System; } -namespace Kernel { +namespace Kernel::Svc { -void CallSVC(Core::System& system, u32 immediate); +void Call(Core::System& system, u32 immediate); -} // namespace Kernel +} // namespace Kernel::Svc diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h new file mode 100644 index 000000000..986724beb --- /dev/null +++ b/src/core/hle/kernel/svc_types.h @@ -0,0 +1,68 @@ +// Copyright 2020 yuzu emulator team +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_funcs.h" +#include "common/common_types.h" + +namespace Kernel::Svc { + +enum class MemoryState : u32 { + Free = 0x00, + Io = 0x01, + Static = 0x02, + Code = 0x03, + CodeData = 0x04, + Normal = 0x05, + Shared = 0x06, + Alias = 0x07, + AliasCode = 0x08, + AliasCodeData = 0x09, + Ipc = 0x0A, + Stack = 0x0B, + ThreadLocal = 0x0C, + Transfered = 0x0D, + SharedTransfered = 0x0E, + SharedCode = 0x0F, + Inaccessible = 0x10, + NonSecureIpc = 0x11, + NonDeviceIpc = 0x12, + Kernel = 0x13, + GeneratedCode = 0x14, + CodeOut = 0x15, +}; +DECLARE_ENUM_FLAG_OPERATORS(MemoryState); + +enum class MemoryAttribute : u32 { + Locked = (1 << 0), + IpcLocked = (1 << 1), + DeviceShared = (1 << 2), + Uncached = (1 << 3), +}; +DECLARE_ENUM_FLAG_OPERATORS(MemoryAttribute); + +enum class MemoryPermission : u32 { + None = (0 << 0), + Read = (1 << 0), + Write = (1 << 1), + Execute = (1 << 2), + ReadWrite = Read | Write, + ReadExecute = Read | Execute, + DontCare = (1 << 28), +}; +DECLARE_ENUM_FLAG_OPERATORS(MemoryPermission); + +struct MemoryInfo { + u64 addr{}; + u64 size{}; + MemoryState state{}; + MemoryAttribute attr{}; + MemoryPermission perm{}; + u32 ipc_refcount{}; + u32 device_refcount{}; + u32 padding{}; +}; + +} // namespace Kernel::Svc diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 83e956036..4c0451c01 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -85,6 +85,7 @@ void Thread::ResumeFromWait() { ASSERT_MSG(wait_objects.empty(), "Thread is waking up while waiting for objects"); switch (status) { + case ThreadStatus::Paused: case ThreadStatus::WaitSynch: case ThreadStatus::WaitHLEEvent: case ThreadStatus::WaitSleep: @@ -92,6 +93,7 @@ void Thread::ResumeFromWait() { case ThreadStatus::WaitMutex: case ThreadStatus::WaitCondVar: case ThreadStatus::WaitArb: + case ThreadStatus::Dormant: break; case ThreadStatus::Ready: diff --git a/src/core/hle/kernel/transfer_memory.cpp b/src/core/hle/kernel/transfer_memory.cpp index f2d3f8b49..765f408c3 100644 --- a/src/core/hle/kernel/transfer_memory.cpp +++ b/src/core/hle/kernel/transfer_memory.cpp @@ -2,17 +2,16 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -#include "core/hle/kernel/errors.h" #include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/shared_memory.h" #include "core/hle/kernel/transfer_memory.h" #include "core/hle/result.h" #include "core/memory.h" namespace Kernel { -TransferMemory::TransferMemory(KernelCore& kernel, Memory::Memory& memory) +TransferMemory::TransferMemory(KernelCore& kernel, Core::Memory::Memory& memory) : Object{kernel}, memory{memory} {} TransferMemory::~TransferMemory() { @@ -20,14 +19,15 @@ TransferMemory::~TransferMemory() { Reset(); } -std::shared_ptr<TransferMemory> TransferMemory::Create(KernelCore& kernel, Memory::Memory& memory, - VAddr base_address, u64 size, - MemoryPermission permissions) { +std::shared_ptr<TransferMemory> TransferMemory::Create(KernelCore& kernel, + Core::Memory::Memory& memory, + VAddr base_address, std::size_t size, + Memory::MemoryPermission permissions) { std::shared_ptr<TransferMemory> transfer_memory{ std::make_shared<TransferMemory>(kernel, memory)}; transfer_memory->base_address = base_address; - transfer_memory->memory_size = size; + transfer_memory->size = size; transfer_memory->owner_permissions = permissions; transfer_memory->owner_process = kernel.CurrentProcess(); @@ -38,98 +38,12 @@ const u8* TransferMemory::GetPointer() const { return memory.GetPointer(base_address); } -u64 TransferMemory::GetSize() const { - return memory_size; -} - -ResultCode TransferMemory::MapMemory(VAddr address, u64 size, MemoryPermission permissions) { - if (memory_size != size) { - return ERR_INVALID_SIZE; - } - - if (owner_permissions != permissions) { - return ERR_INVALID_STATE; - } - - if (is_mapped) { - return ERR_INVALID_STATE; - } - - backing_block = std::make_shared<PhysicalMemory>(size); - - const auto map_state = owner_permissions == MemoryPermission::None - ? MemoryState::TransferMemoryIsolated - : MemoryState::TransferMemory; - auto& vm_manager = owner_process->VMManager(); - const auto map_result = vm_manager.MapMemoryBlock(address, backing_block, 0, size, map_state); - if (map_result.Failed()) { - return map_result.Code(); - } - - is_mapped = true; - return RESULT_SUCCESS; -} - ResultCode TransferMemory::Reserve() { - auto& vm_manager{owner_process->VMManager()}; - const auto check_range_result{vm_manager.CheckRangeState( - base_address, memory_size, MemoryState::FlagTransfer | MemoryState::FlagMemoryPoolAllocated, - MemoryState::FlagTransfer | MemoryState::FlagMemoryPoolAllocated, VMAPermission::All, - VMAPermission::ReadWrite, MemoryAttribute::Mask, MemoryAttribute::None, - MemoryAttribute::IpcAndDeviceMapped)}; - - if (check_range_result.Failed()) { - return check_range_result.Code(); - } - - auto [state_, permissions_, attribute] = *check_range_result; - - if (const auto result{vm_manager.ReprotectRange( - base_address, memory_size, SharedMemory::ConvertPermissions(owner_permissions))}; - result.IsError()) { - return result; - } - - return vm_manager.SetMemoryAttribute(base_address, memory_size, MemoryAttribute::Mask, - attribute | MemoryAttribute::Locked); + return owner_process->PageTable().ReserveTransferMemory(base_address, size, owner_permissions); } ResultCode TransferMemory::Reset() { - auto& vm_manager{owner_process->VMManager()}; - if (const auto result{vm_manager.CheckRangeState( - base_address, memory_size, - MemoryState::FlagTransfer | MemoryState::FlagMemoryPoolAllocated, - MemoryState::FlagTransfer | MemoryState::FlagMemoryPoolAllocated, VMAPermission::None, - VMAPermission::None, MemoryAttribute::Mask, MemoryAttribute::Locked, - MemoryAttribute::IpcAndDeviceMapped)}; - result.Failed()) { - return result.Code(); - } - - if (const auto result{ - vm_manager.ReprotectRange(base_address, memory_size, VMAPermission::ReadWrite)}; - result.IsError()) { - return result; - } - - return vm_manager.SetMemoryAttribute(base_address, memory_size, MemoryAttribute::Mask, - MemoryAttribute::None); -} - -ResultCode TransferMemory::UnmapMemory(VAddr address, u64 size) { - if (memory_size != size) { - return ERR_INVALID_SIZE; - } - - auto& vm_manager = owner_process->VMManager(); - const auto result = vm_manager.UnmapRange(address, size); - - if (result.IsError()) { - return result; - } - - is_mapped = false; - return RESULT_SUCCESS; + return owner_process->PageTable().ResetTransferMemory(base_address, size); } } // namespace Kernel diff --git a/src/core/hle/kernel/transfer_memory.h b/src/core/hle/kernel/transfer_memory.h index 6e388536a..05e9f7464 100644 --- a/src/core/hle/kernel/transfer_memory.h +++ b/src/core/hle/kernel/transfer_memory.h @@ -6,12 +6,13 @@ #include <memory> +#include "core/hle/kernel/memory/memory_block.h" #include "core/hle/kernel/object.h" #include "core/hle/kernel/physical_memory.h" union ResultCode; -namespace Memory { +namespace Core::Memory { class Memory; } @@ -20,8 +21,6 @@ namespace Kernel { class KernelCore; class Process; -enum class MemoryPermission : u32; - /// Defines the interface for transfer memory objects. /// /// Transfer memory is typically used for the purpose of @@ -30,14 +29,14 @@ enum class MemoryPermission : u32; /// class TransferMemory final : public Object { public: - explicit TransferMemory(KernelCore& kernel, Memory::Memory& memory); + explicit TransferMemory(KernelCore& kernel, Core::Memory::Memory& memory); ~TransferMemory() override; static constexpr HandleType HANDLE_TYPE = HandleType::TransferMemory; - static std::shared_ptr<TransferMemory> Create(KernelCore& kernel, Memory::Memory& memory, - VAddr base_address, u64 size, - MemoryPermission permissions); + static std::shared_ptr<TransferMemory> Create(KernelCore& kernel, Core::Memory::Memory& memory, + VAddr base_address, std::size_t size, + Memory::MemoryPermission permissions); TransferMemory(const TransferMemory&) = delete; TransferMemory& operator=(const TransferMemory&) = delete; @@ -61,29 +60,9 @@ public: const u8* GetPointer() const; /// Gets the size of the memory backing this instance in bytes. - u64 GetSize() const; - - /// Attempts to map transfer memory with the given range and memory permissions. - /// - /// @param address The base address to being mapping memory at. - /// @param size The size of the memory to map, in bytes. - /// @param permissions The memory permissions to check against when mapping memory. - /// - /// @pre The given address, size, and memory permissions must all match - /// the same values that were given when creating the transfer memory - /// instance. - /// - ResultCode MapMemory(VAddr address, u64 size, MemoryPermission permissions); - - /// Unmaps the transfer memory with the given range - /// - /// @param address The base address to begin unmapping memory at. - /// @param size The size of the memory to unmap, in bytes. - /// - /// @pre The given address and size must be the same as the ones used - /// to create the transfer memory instance. - /// - ResultCode UnmapMemory(VAddr address, u64 size); + constexpr std::size_t GetSize() const { + return size; + } /// Reserves the region to be used for the transfer memory, called after the transfer memory is /// created. @@ -94,25 +73,19 @@ public: ResultCode Reset(); private: - /// Memory block backing this instance. - std::shared_ptr<PhysicalMemory> backing_block; - /// The base address for the memory managed by this instance. - VAddr base_address = 0; + VAddr base_address{}; /// Size of the memory, in bytes, that this instance manages. - u64 memory_size = 0; + std::size_t size{}; /// The memory permissions that are applied to this instance. - MemoryPermission owner_permissions{}; + Memory::MemoryPermission owner_permissions{}; /// The process that this transfer memory instance was created under. - Process* owner_process = nullptr; - - /// Whether or not this transfer memory instance has mapped memory. - bool is_mapped = false; + Process* owner_process{}; - Memory::Memory& memory; + Core::Memory::Memory& memory; }; } // namespace Kernel diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp deleted file mode 100644 index 024c22901..000000000 --- a/src/core/hle/kernel/vm_manager.cpp +++ /dev/null @@ -1,1175 +0,0 @@ -// Copyright 2015 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include <algorithm> -#include <cstring> -#include <iterator> -#include <utility> -#include "common/alignment.h" -#include "common/assert.h" -#include "common/logging/log.h" -#include "common/memory_hook.h" -#include "core/core.h" -#include "core/file_sys/program_metadata.h" -#include "core/hle/kernel/errors.h" -#include "core/hle/kernel/process.h" -#include "core/hle/kernel/resource_limit.h" -#include "core/hle/kernel/vm_manager.h" -#include "core/memory.h" - -namespace Kernel { -namespace { -const char* GetMemoryStateName(MemoryState state) { - static constexpr const char* names[] = { - "Unmapped", "Io", - "Normal", "Code", - "CodeData", "Heap", - "Shared", "Unknown1", - "ModuleCode", "ModuleCodeData", - "IpcBuffer0", "Stack", - "ThreadLocal", "TransferMemoryIsolated", - "TransferMemory", "ProcessMemory", - "Inaccessible", "IpcBuffer1", - "IpcBuffer3", "KernelStack", - }; - - return names[ToSvcMemoryState(state)]; -} - -// Checks if a given address range lies within a larger address range. -constexpr bool IsInsideAddressRange(VAddr address, u64 size, VAddr address_range_begin, - VAddr address_range_end) { - const VAddr end_address = address + size - 1; - return address_range_begin <= address && end_address <= address_range_end - 1; -} -} // Anonymous namespace - -bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const { - ASSERT(base + size == next.base); - if (permissions != next.permissions || state != next.state || attribute != next.attribute || - type != next.type) { - return false; - } - if ((attribute & MemoryAttribute::DeviceMapped) == MemoryAttribute::DeviceMapped) { - // TODO: Can device mapped memory be merged sanely? - // Not merging it may cause inaccuracies versus hardware when memory layout is queried. - return false; - } - if (type == VMAType::AllocatedMemoryBlock) { - return true; - } - if (type == VMAType::BackingMemory && backing_memory + size != next.backing_memory) { - return false; - } - if (type == VMAType::MMIO && paddr + size != next.paddr) { - return false; - } - return true; -} - -VMManager::VMManager(Core::System& system) : system{system} { - // Default to assuming a 39-bit address space. This way we have a sane - // starting point with executables that don't provide metadata. - Reset(FileSys::ProgramAddressSpaceType::Is39Bit); -} - -VMManager::~VMManager() = default; - -void VMManager::Reset(FileSys::ProgramAddressSpaceType type) { - Clear(); - - InitializeMemoryRegionRanges(type); - - page_table.Resize(address_space_width); - - // Initialize the map with a single free region covering the entire managed space. - VirtualMemoryArea initial_vma; - initial_vma.size = address_space_end; - vma_map.emplace(initial_vma.base, initial_vma); - - UpdatePageTableForVMA(initial_vma); -} - -VMManager::VMAHandle VMManager::FindVMA(VAddr target) const { - if (target >= address_space_end) { - return vma_map.end(); - } else { - return std::prev(vma_map.upper_bound(target)); - } -} - -bool VMManager::IsValidHandle(VMAHandle handle) const { - return handle != vma_map.cend(); -} - -ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, - std::shared_ptr<PhysicalMemory> block, - std::size_t offset, u64 size, - MemoryState state, VMAPermission perm) { - ASSERT(block != nullptr); - ASSERT(offset + size <= block->size()); - - // This is the appropriately sized VMA that will turn into our allocation. - CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size)); - VirtualMemoryArea& final_vma = vma_handle->second; - ASSERT(final_vma.size == size); - - final_vma.type = VMAType::AllocatedMemoryBlock; - final_vma.permissions = perm; - final_vma.state = state; - final_vma.backing_block = std::move(block); - final_vma.offset = offset; - UpdatePageTableForVMA(final_vma); - - return MakeResult<VMAHandle>(MergeAdjacent(vma_handle)); -} - -ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* memory, u64 size, - MemoryState state) { - ASSERT(memory != nullptr); - - // This is the appropriately sized VMA that will turn into our allocation. - CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size)); - VirtualMemoryArea& final_vma = vma_handle->second; - ASSERT(final_vma.size == size); - - final_vma.type = VMAType::BackingMemory; - final_vma.permissions = VMAPermission::ReadWrite; - final_vma.state = state; - final_vma.backing_memory = memory; - UpdatePageTableForVMA(final_vma); - - return MakeResult<VMAHandle>(MergeAdjacent(vma_handle)); -} - -ResultVal<VAddr> VMManager::FindFreeRegion(u64 size) const { - return FindFreeRegion(GetASLRRegionBaseAddress(), GetASLRRegionEndAddress(), size); -} - -ResultVal<VAddr> VMManager::FindFreeRegion(VAddr begin, VAddr end, u64 size) const { - ASSERT(begin < end); - ASSERT(size <= end - begin); - - const VMAHandle vma_handle = - std::find_if(vma_map.begin(), vma_map.end(), [begin, end, size](const auto& vma) { - if (vma.second.type != VMAType::Free) { - return false; - } - const VAddr vma_base = vma.second.base; - const VAddr vma_end = vma_base + vma.second.size; - const VAddr assumed_base = (begin < vma_base) ? vma_base : begin; - const VAddr used_range = assumed_base + size; - - return vma_base <= assumed_base && assumed_base < used_range && used_range < end && - used_range <= vma_end; - }); - - if (vma_handle == vma_map.cend()) { - // TODO(Subv): Find the correct error code here. - return RESULT_UNKNOWN; - } - - const VAddr target = std::max(begin, vma_handle->second.base); - return MakeResult<VAddr>(target); -} - -ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size, - MemoryState state, - Common::MemoryHookPointer mmio_handler) { - // This is the appropriately sized VMA that will turn into our allocation. - CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size)); - VirtualMemoryArea& final_vma = vma_handle->second; - ASSERT(final_vma.size == size); - - final_vma.type = VMAType::MMIO; - final_vma.permissions = VMAPermission::ReadWrite; - final_vma.state = state; - final_vma.paddr = paddr; - final_vma.mmio_handler = std::move(mmio_handler); - UpdatePageTableForVMA(final_vma); - - return MakeResult<VMAHandle>(MergeAdjacent(vma_handle)); -} - -VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) { - VirtualMemoryArea& vma = vma_handle->second; - vma.type = VMAType::Free; - vma.permissions = VMAPermission::None; - vma.state = MemoryState::Unmapped; - vma.attribute = MemoryAttribute::None; - - vma.backing_block = nullptr; - vma.offset = 0; - vma.backing_memory = nullptr; - vma.paddr = 0; - - UpdatePageTableForVMA(vma); - - return MergeAdjacent(vma_handle); -} - -ResultCode VMManager::UnmapRange(VAddr target, u64 size) { - CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size)); - const VAddr target_end = target + size; - - const VMAIter end = vma_map.end(); - // The comparison against the end of the range must be done using addresses since VMAs can be - // merged during this process, causing invalidation of the iterators. - while (vma != end && vma->second.base < target_end) { - vma = std::next(Unmap(vma)); - } - - ASSERT(FindVMA(target)->second.size >= size); - - return RESULT_SUCCESS; -} - -VMManager::VMAHandle VMManager::Reprotect(VMAHandle vma_handle, VMAPermission new_perms) { - VMAIter iter = StripIterConstness(vma_handle); - - VirtualMemoryArea& vma = iter->second; - vma.permissions = new_perms; - UpdatePageTableForVMA(vma); - - return MergeAdjacent(iter); -} - -ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_perms) { - CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size)); - const VAddr target_end = target + size; - - const VMAIter end = vma_map.end(); - // The comparison against the end of the range must be done using addresses since VMAs can be - // merged during this process, causing invalidation of the iterators. - while (vma != end && vma->second.base < target_end) { - vma = std::next(StripIterConstness(Reprotect(vma, new_perms))); - } - - return RESULT_SUCCESS; -} - -ResultVal<VAddr> VMManager::SetHeapSize(u64 size) { - if (size > GetHeapRegionSize()) { - return ERR_OUT_OF_MEMORY; - } - - // No need to do any additional work if the heap is already the given size. - if (size == GetCurrentHeapSize()) { - return MakeResult(heap_region_base); - } - - if (heap_memory == nullptr) { - // Initialize heap - heap_memory = std::make_shared<PhysicalMemory>(size); - heap_end = heap_region_base + size; - } else { - UnmapRange(heap_region_base, GetCurrentHeapSize()); - } - - // If necessary, expand backing vector to cover new heap extents in - // the case of allocating. Otherwise, shrink the backing memory, - // if a smaller heap has been requested. - heap_memory->resize(size); - heap_memory->shrink_to_fit(); - RefreshMemoryBlockMappings(heap_memory.get()); - - heap_end = heap_region_base + size; - ASSERT(GetCurrentHeapSize() == heap_memory->size()); - - const auto mapping_result = - MapMemoryBlock(heap_region_base, heap_memory, 0, size, MemoryState::Heap); - if (mapping_result.Failed()) { - return mapping_result.Code(); - } - - return MakeResult<VAddr>(heap_region_base); -} - -ResultCode VMManager::MapPhysicalMemory(VAddr target, u64 size) { - // Check how much memory we've already mapped. - const auto mapped_size_result = SizeOfAllocatedVMAsInRange(target, size); - if (mapped_size_result.Failed()) { - return mapped_size_result.Code(); - } - - // If we've already mapped the desired amount, return early. - const std::size_t mapped_size = *mapped_size_result; - if (mapped_size == size) { - return RESULT_SUCCESS; - } - - // Check that we can map the memory we want. - const auto res_limit = system.CurrentProcess()->GetResourceLimit(); - const u64 physmem_remaining = res_limit->GetMaxResourceValue(ResourceType::PhysicalMemory) - - res_limit->GetCurrentResourceValue(ResourceType::PhysicalMemory); - if (physmem_remaining < (size - mapped_size)) { - return ERR_RESOURCE_LIMIT_EXCEEDED; - } - - // Keep track of the memory regions we unmap. - std::vector<std::pair<u64, u64>> mapped_regions; - ResultCode result = RESULT_SUCCESS; - - // Iterate, trying to map memory. - { - const auto end_addr = target + size; - const auto last_addr = end_addr - 1; - VAddr cur_addr = target; - - auto iter = FindVMA(target); - ASSERT(iter != vma_map.end()); - - while (true) { - const auto& vma = iter->second; - const auto vma_start = vma.base; - const auto vma_end = vma_start + vma.size; - const auto vma_last = vma_end - 1; - - // Map the memory block - const auto map_size = std::min(end_addr - cur_addr, vma_end - cur_addr); - if (vma.state == MemoryState::Unmapped) { - const auto map_res = - MapMemoryBlock(cur_addr, std::make_shared<PhysicalMemory>(map_size), 0, - map_size, MemoryState::Heap, VMAPermission::ReadWrite); - result = map_res.Code(); - if (result.IsError()) { - break; - } - - mapped_regions.emplace_back(cur_addr, map_size); - } - - // Break once we hit the end of the range. - if (last_addr <= vma_last) { - break; - } - - // Advance to the next block. - cur_addr = vma_end; - iter = FindVMA(cur_addr); - ASSERT(iter != vma_map.end()); - } - } - - // If we failed, unmap memory. - if (result.IsError()) { - for (const auto [unmap_address, unmap_size] : mapped_regions) { - ASSERT_MSG(UnmapRange(unmap_address, unmap_size).IsSuccess(), - "Failed to unmap memory range."); - } - - return result; - } - - // Update amount of mapped physical memory. - physical_memory_mapped += size - mapped_size; - - return RESULT_SUCCESS; -} - -ResultCode VMManager::UnmapPhysicalMemory(VAddr target, u64 size) { - // Check how much memory is currently mapped. - const auto mapped_size_result = SizeOfUnmappablePhysicalMemoryInRange(target, size); - if (mapped_size_result.Failed()) { - return mapped_size_result.Code(); - } - - // If we've already unmapped all the memory, return early. - const std::size_t mapped_size = *mapped_size_result; - if (mapped_size == 0) { - return RESULT_SUCCESS; - } - - // Keep track of the memory regions we unmap. - std::vector<std::pair<u64, u64>> unmapped_regions; - ResultCode result = RESULT_SUCCESS; - - // Try to unmap regions. - { - const auto end_addr = target + size; - const auto last_addr = end_addr - 1; - VAddr cur_addr = target; - - auto iter = FindVMA(target); - ASSERT(iter != vma_map.end()); - - while (true) { - const auto& vma = iter->second; - const auto vma_start = vma.base; - const auto vma_end = vma_start + vma.size; - const auto vma_last = vma_end - 1; - - // Unmap the memory block - const auto unmap_size = std::min(end_addr - cur_addr, vma_end - cur_addr); - if (vma.state == MemoryState::Heap) { - result = UnmapRange(cur_addr, unmap_size); - if (result.IsError()) { - break; - } - - unmapped_regions.emplace_back(cur_addr, unmap_size); - } - - // Break once we hit the end of the range. - if (last_addr <= vma_last) { - break; - } - - // Advance to the next block. - cur_addr = vma_end; - iter = FindVMA(cur_addr); - ASSERT(iter != vma_map.end()); - } - } - - // If we failed, re-map regions. - // TODO: Preserve memory contents? - if (result.IsError()) { - for (const auto [map_address, map_size] : unmapped_regions) { - const auto remap_res = - MapMemoryBlock(map_address, std::make_shared<PhysicalMemory>(map_size), 0, map_size, - MemoryState::Heap, VMAPermission::None); - ASSERT_MSG(remap_res.Succeeded(), "Failed to remap a memory block."); - } - - return result; - } - - // Update mapped amount - physical_memory_mapped -= mapped_size; - - return RESULT_SUCCESS; -} - -ResultCode VMManager::MapCodeMemory(VAddr dst_address, VAddr src_address, u64 size) { - constexpr auto ignore_attribute = MemoryAttribute::LockedForIPC | MemoryAttribute::DeviceMapped; - const auto src_check_result = CheckRangeState( - src_address, size, MemoryState::All, MemoryState::Heap, VMAPermission::All, - VMAPermission::ReadWrite, MemoryAttribute::Mask, MemoryAttribute::None, ignore_attribute); - - if (src_check_result.Failed()) { - return src_check_result.Code(); - } - - const auto mirror_result = - MirrorMemory(dst_address, src_address, size, MemoryState::ModuleCode); - if (mirror_result.IsError()) { - return mirror_result; - } - - // Ensure we lock the source memory region. - const auto src_vma_result = CarveVMARange(src_address, size); - if (src_vma_result.Failed()) { - return src_vma_result.Code(); - } - auto src_vma_iter = *src_vma_result; - src_vma_iter->second.attribute = MemoryAttribute::Locked; - Reprotect(src_vma_iter, VMAPermission::Read); - - // The destination memory region is fine as is, however we need to make it read-only. - return ReprotectRange(dst_address, size, VMAPermission::Read); -} - -ResultCode VMManager::UnmapCodeMemory(VAddr dst_address, VAddr src_address, u64 size) { - constexpr auto ignore_attribute = MemoryAttribute::LockedForIPC | MemoryAttribute::DeviceMapped; - const auto src_check_result = CheckRangeState( - src_address, size, MemoryState::All, MemoryState::Heap, VMAPermission::None, - VMAPermission::None, MemoryAttribute::Mask, MemoryAttribute::Locked, ignore_attribute); - - if (src_check_result.Failed()) { - return src_check_result.Code(); - } - - // Yes, the kernel only checks the first page of the region. - const auto dst_check_result = - CheckRangeState(dst_address, Memory::PAGE_SIZE, MemoryState::FlagModule, - MemoryState::FlagModule, VMAPermission::None, VMAPermission::None, - MemoryAttribute::Mask, MemoryAttribute::None, ignore_attribute); - - if (dst_check_result.Failed()) { - return dst_check_result.Code(); - } - - const auto dst_memory_state = std::get<MemoryState>(*dst_check_result); - const auto dst_contiguous_check_result = CheckRangeState( - dst_address, size, MemoryState::All, dst_memory_state, VMAPermission::None, - VMAPermission::None, MemoryAttribute::Mask, MemoryAttribute::None, ignore_attribute); - - if (dst_contiguous_check_result.Failed()) { - return dst_contiguous_check_result.Code(); - } - - const auto unmap_result = UnmapRange(dst_address, size); - if (unmap_result.IsError()) { - return unmap_result; - } - - // With the mirrored portion unmapped, restore the original region's traits. - const auto src_vma_result = CarveVMARange(src_address, size); - if (src_vma_result.Failed()) { - return src_vma_result.Code(); - } - auto src_vma_iter = *src_vma_result; - src_vma_iter->second.state = MemoryState::Heap; - src_vma_iter->second.attribute = MemoryAttribute::None; - Reprotect(src_vma_iter, VMAPermission::ReadWrite); - - if (dst_memory_state == MemoryState::ModuleCode) { - system.InvalidateCpuInstructionCaches(); - } - - return unmap_result; -} - -MemoryInfo VMManager::QueryMemory(VAddr address) const { - const auto vma = FindVMA(address); - MemoryInfo memory_info{}; - - if (IsValidHandle(vma)) { - memory_info.base_address = vma->second.base; - memory_info.attributes = ToSvcMemoryAttribute(vma->second.attribute); - memory_info.permission = static_cast<u32>(vma->second.permissions); - memory_info.size = vma->second.size; - memory_info.state = ToSvcMemoryState(vma->second.state); - } else { - memory_info.base_address = address_space_end; - memory_info.permission = static_cast<u32>(VMAPermission::None); - memory_info.size = 0 - address_space_end; - memory_info.state = static_cast<u32>(MemoryState::Inaccessible); - } - - return memory_info; -} - -ResultCode VMManager::SetMemoryAttribute(VAddr address, u64 size, MemoryAttribute mask, - MemoryAttribute attribute) { - constexpr auto ignore_mask = - MemoryAttribute::Uncached | MemoryAttribute::DeviceMapped | MemoryAttribute::Locked; - constexpr auto attribute_mask = ~ignore_mask; - - const auto result = CheckRangeState( - address, size, MemoryState::FlagUncached, MemoryState::FlagUncached, VMAPermission::None, - VMAPermission::None, attribute_mask, MemoryAttribute::None, ignore_mask); - - if (result.Failed()) { - return result.Code(); - } - - const auto [prev_state, prev_permissions, prev_attributes] = *result; - const auto new_attribute = (prev_attributes & ~mask) | (mask & attribute); - - const auto carve_result = CarveVMARange(address, size); - if (carve_result.Failed()) { - return carve_result.Code(); - } - - auto vma_iter = *carve_result; - vma_iter->second.attribute = new_attribute; - - MergeAdjacent(vma_iter); - return RESULT_SUCCESS; -} - -ResultCode VMManager::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state) { - const auto vma = FindVMA(src_addr); - - ASSERT_MSG(vma != vma_map.end(), "Invalid memory address"); - ASSERT_MSG(vma->second.backing_block, "Backing block doesn't exist for address"); - - // The returned VMA might be a bigger one encompassing the desired address. - const auto vma_offset = src_addr - vma->first; - ASSERT_MSG(vma_offset + size <= vma->second.size, - "Shared memory exceeds bounds of mapped block"); - - const std::shared_ptr<PhysicalMemory>& backing_block = vma->second.backing_block; - const std::size_t backing_block_offset = vma->second.offset + vma_offset; - - CASCADE_RESULT(auto new_vma, - MapMemoryBlock(dst_addr, backing_block, backing_block_offset, size, state)); - // Protect mirror with permissions from old region - Reprotect(new_vma, vma->second.permissions); - // Remove permissions from old region - ReprotectRange(src_addr, size, VMAPermission::None); - - return RESULT_SUCCESS; -} - -void VMManager::RefreshMemoryBlockMappings(const PhysicalMemory* block) { - // If this ever proves to have a noticeable performance impact, allow users of the function to - // specify a specific range of addresses to limit the scan to. - for (const auto& p : vma_map) { - const VirtualMemoryArea& vma = p.second; - if (block == vma.backing_block.get()) { - UpdatePageTableForVMA(vma); - } - } -} - -void VMManager::LogLayout() const { - for (const auto& p : vma_map) { - const VirtualMemoryArea& vma = p.second; - LOG_DEBUG(Kernel, "{:016X} - {:016X} size: {:016X} {}{}{} {}", vma.base, - vma.base + vma.size, vma.size, - (u8)vma.permissions & (u8)VMAPermission::Read ? 'R' : '-', - (u8)vma.permissions & (u8)VMAPermission::Write ? 'W' : '-', - (u8)vma.permissions & (u8)VMAPermission::Execute ? 'X' : '-', - GetMemoryStateName(vma.state)); - } -} - -VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle& iter) { - // This uses a neat C++ trick to convert a const_iterator to a regular iterator, given - // non-const access to its container. - return vma_map.erase(iter, iter); // Erases an empty range of elements -} - -ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u64 size) { - ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x{:016X}", size); - ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x{:016X}", base); - - VMAIter vma_handle = StripIterConstness(FindVMA(base)); - if (vma_handle == vma_map.end()) { - // Target address is outside the range managed by the kernel - return ERR_INVALID_ADDRESS; - } - - const VirtualMemoryArea& vma = vma_handle->second; - if (vma.type != VMAType::Free) { - // Region is already allocated - return ERR_INVALID_ADDRESS_STATE; - } - - const VAddr start_in_vma = base - vma.base; - const VAddr end_in_vma = start_in_vma + size; - - if (end_in_vma > vma.size) { - // Requested allocation doesn't fit inside VMA - return ERR_INVALID_ADDRESS_STATE; - } - - if (end_in_vma != vma.size) { - // Split VMA at the end of the allocated region - SplitVMA(vma_handle, end_in_vma); - } - if (start_in_vma != 0) { - // Split VMA at the start of the allocated region - vma_handle = SplitVMA(vma_handle, start_in_vma); - } - - return MakeResult<VMAIter>(vma_handle); -} - -ResultVal<VMManager::VMAIter> VMManager::CarveVMARange(VAddr target, u64 size) { - ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x{:016X}", size); - ASSERT_MSG((target & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x{:016X}", target); - - const VAddr target_end = target + size; - ASSERT(target_end >= target); - ASSERT(target_end <= address_space_end); - ASSERT(size > 0); - - VMAIter begin_vma = StripIterConstness(FindVMA(target)); - const VMAIter i_end = vma_map.lower_bound(target_end); - if (std::any_of(begin_vma, i_end, - [](const auto& entry) { return entry.second.type == VMAType::Free; })) { - return ERR_INVALID_ADDRESS_STATE; - } - - if (target != begin_vma->second.base) { - begin_vma = SplitVMA(begin_vma, target - begin_vma->second.base); - } - - VMAIter end_vma = StripIterConstness(FindVMA(target_end)); - if (end_vma != vma_map.end() && target_end != end_vma->second.base) { - end_vma = SplitVMA(end_vma, target_end - end_vma->second.base); - } - - return MakeResult<VMAIter>(begin_vma); -} - -VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) { - VirtualMemoryArea& old_vma = vma_handle->second; - VirtualMemoryArea new_vma = old_vma; // Make a copy of the VMA - - // For now, don't allow no-op VMA splits (trying to split at a boundary) because it's probably - // a bug. This restriction might be removed later. - ASSERT(offset_in_vma < old_vma.size); - ASSERT(offset_in_vma > 0); - - old_vma.size = offset_in_vma; - new_vma.base += offset_in_vma; - new_vma.size -= offset_in_vma; - - switch (new_vma.type) { - case VMAType::Free: - break; - case VMAType::AllocatedMemoryBlock: - new_vma.offset += offset_in_vma; - break; - case VMAType::BackingMemory: - new_vma.backing_memory += offset_in_vma; - break; - case VMAType::MMIO: - new_vma.paddr += offset_in_vma; - break; - } - - ASSERT(old_vma.CanBeMergedWith(new_vma)); - - return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma); -} - -VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) { - const VMAIter next_vma = std::next(iter); - if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) { - MergeAdjacentVMA(iter->second, next_vma->second); - vma_map.erase(next_vma); - } - - if (iter != vma_map.begin()) { - VMAIter prev_vma = std::prev(iter); - if (prev_vma->second.CanBeMergedWith(iter->second)) { - MergeAdjacentVMA(prev_vma->second, iter->second); - vma_map.erase(iter); - iter = prev_vma; - } - } - - return iter; -} - -void VMManager::MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryArea& right) { - ASSERT(left.CanBeMergedWith(right)); - - // Always merge allocated memory blocks, even when they don't share the same backing block. - if (left.type == VMAType::AllocatedMemoryBlock && - (left.backing_block != right.backing_block || left.offset + left.size != right.offset)) { - - // Check if we can save work. - if (left.offset == 0 && left.size == left.backing_block->size()) { - // Fast case: left is an entire backing block. - left.backing_block->resize(left.size + right.size); - std::memcpy(left.backing_block->data() + left.size, - right.backing_block->data() + right.offset, right.size); - } else { - // Slow case: make a new memory block for left and right. - auto new_memory = std::make_shared<PhysicalMemory>(); - new_memory->resize(left.size + right.size); - std::memcpy(new_memory->data(), left.backing_block->data() + left.offset, left.size); - std::memcpy(new_memory->data() + left.size, right.backing_block->data() + right.offset, - right.size); - - left.backing_block = std::move(new_memory); - left.offset = 0; - } - - // Page table update is needed, because backing memory changed. - left.size += right.size; - UpdatePageTableForVMA(left); - } else { - // Just update the size. - left.size += right.size; - } -} - -void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) { - auto& memory = system.Memory(); - - switch (vma.type) { - case VMAType::Free: - memory.UnmapRegion(page_table, vma.base, vma.size); - break; - case VMAType::AllocatedMemoryBlock: - memory.MapMemoryRegion(page_table, vma.base, vma.size, *vma.backing_block, vma.offset); - break; - case VMAType::BackingMemory: - memory.MapMemoryRegion(page_table, vma.base, vma.size, vma.backing_memory); - break; - case VMAType::MMIO: - memory.MapIoRegion(page_table, vma.base, vma.size, vma.mmio_handler); - break; - } -} - -void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType type) { - u64 map_region_size = 0; - u64 heap_region_size = 0; - u64 stack_region_size = 0; - u64 tls_io_region_size = 0; - - u64 stack_and_tls_io_end = 0; - - switch (type) { - case FileSys::ProgramAddressSpaceType::Is32Bit: - case FileSys::ProgramAddressSpaceType::Is32BitNoMap: - address_space_width = 32; - code_region_base = 0x200000; - code_region_end = code_region_base + 0x3FE00000; - aslr_region_base = 0x200000; - aslr_region_end = aslr_region_base + 0xFFE00000; - if (type == FileSys::ProgramAddressSpaceType::Is32Bit) { - map_region_size = 0x40000000; - heap_region_size = 0x40000000; - } else { - map_region_size = 0; - heap_region_size = 0x80000000; - } - stack_and_tls_io_end = 0x40000000; - break; - case FileSys::ProgramAddressSpaceType::Is36Bit: - address_space_width = 36; - code_region_base = 0x8000000; - code_region_end = code_region_base + 0x78000000; - aslr_region_base = 0x8000000; - aslr_region_end = aslr_region_base + 0xFF8000000; - map_region_size = 0x180000000; - heap_region_size = 0x180000000; - stack_and_tls_io_end = 0x80000000; - break; - case FileSys::ProgramAddressSpaceType::Is39Bit: - address_space_width = 39; - code_region_base = 0x8000000; - code_region_end = code_region_base + 0x80000000; - aslr_region_base = 0x8000000; - aslr_region_end = aslr_region_base + 0x7FF8000000; - map_region_size = 0x1000000000; - heap_region_size = 0x180000000; - stack_region_size = 0x80000000; - tls_io_region_size = 0x1000000000; - break; - default: - UNREACHABLE_MSG("Invalid address space type specified: {}", static_cast<u32>(type)); - return; - } - - const u64 stack_and_tls_io_begin = aslr_region_base; - - address_space_base = 0; - address_space_end = 1ULL << address_space_width; - - map_region_base = code_region_end; - map_region_end = map_region_base + map_region_size; - - heap_region_base = map_region_end; - heap_region_end = heap_region_base + heap_region_size; - heap_end = heap_region_base; - - stack_region_base = heap_region_end; - stack_region_end = stack_region_base + stack_region_size; - - tls_io_region_base = stack_region_end; - tls_io_region_end = tls_io_region_base + tls_io_region_size; - - if (stack_region_size == 0) { - stack_region_base = stack_and_tls_io_begin; - stack_region_end = stack_and_tls_io_end; - } - - if (tls_io_region_size == 0) { - tls_io_region_base = stack_and_tls_io_begin; - tls_io_region_end = stack_and_tls_io_end; - } -} - -void VMManager::Clear() { - ClearVMAMap(); - ClearPageTable(); -} - -void VMManager::ClearVMAMap() { - vma_map.clear(); -} - -void VMManager::ClearPageTable() { - std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr); - page_table.special_regions.clear(); - std::fill(page_table.attributes.begin(), page_table.attributes.end(), - Common::PageType::Unmapped); -} - -VMManager::CheckResults VMManager::CheckRangeState(VAddr address, u64 size, MemoryState state_mask, - MemoryState state, VMAPermission permission_mask, - VMAPermission permissions, - MemoryAttribute attribute_mask, - MemoryAttribute attribute, - MemoryAttribute ignore_mask) const { - auto iter = FindVMA(address); - - // If we don't have a valid VMA handle at this point, then it means this is - // being called with an address outside of the address space, which is definitely - // indicative of a bug, as this function only operates on mapped memory regions. - DEBUG_ASSERT(IsValidHandle(iter)); - - const VAddr end_address = address + size - 1; - const MemoryAttribute initial_attributes = iter->second.attribute; - const VMAPermission initial_permissions = iter->second.permissions; - const MemoryState initial_state = iter->second.state; - - while (true) { - // The iterator should be valid throughout the traversal. Hitting the end of - // the mapped VMA regions is unquestionably indicative of a bug. - DEBUG_ASSERT(IsValidHandle(iter)); - - const auto& vma = iter->second; - - if (vma.state != initial_state) { - return ERR_INVALID_ADDRESS_STATE; - } - - if ((vma.state & state_mask) != state) { - return ERR_INVALID_ADDRESS_STATE; - } - - if (vma.permissions != initial_permissions) { - return ERR_INVALID_ADDRESS_STATE; - } - - if ((vma.permissions & permission_mask) != permissions) { - return ERR_INVALID_ADDRESS_STATE; - } - - if ((vma.attribute | ignore_mask) != (initial_attributes | ignore_mask)) { - return ERR_INVALID_ADDRESS_STATE; - } - - if ((vma.attribute & attribute_mask) != attribute) { - return ERR_INVALID_ADDRESS_STATE; - } - - if (end_address <= vma.EndAddress()) { - break; - } - - ++iter; - } - - return MakeResult( - std::make_tuple(initial_state, initial_permissions, initial_attributes & ~ignore_mask)); -} - -ResultVal<std::size_t> VMManager::SizeOfAllocatedVMAsInRange(VAddr address, - std::size_t size) const { - const VAddr end_addr = address + size; - const VAddr last_addr = end_addr - 1; - std::size_t mapped_size = 0; - - VAddr cur_addr = address; - auto iter = FindVMA(cur_addr); - ASSERT(iter != vma_map.end()); - - while (true) { - const auto& vma = iter->second; - const VAddr vma_start = vma.base; - const VAddr vma_end = vma_start + vma.size; - const VAddr vma_last = vma_end - 1; - - // Add size if relevant. - if (vma.state != MemoryState::Unmapped) { - mapped_size += std::min(end_addr - cur_addr, vma_end - cur_addr); - } - - // Break once we hit the end of the range. - if (last_addr <= vma_last) { - break; - } - - // Advance to the next block. - cur_addr = vma_end; - iter = std::next(iter); - ASSERT(iter != vma_map.end()); - } - - return MakeResult(mapped_size); -} - -ResultVal<std::size_t> VMManager::SizeOfUnmappablePhysicalMemoryInRange(VAddr address, - std::size_t size) const { - const VAddr end_addr = address + size; - const VAddr last_addr = end_addr - 1; - std::size_t mapped_size = 0; - - VAddr cur_addr = address; - auto iter = FindVMA(cur_addr); - ASSERT(iter != vma_map.end()); - - while (true) { - const auto& vma = iter->second; - const auto vma_start = vma.base; - const auto vma_end = vma_start + vma.size; - const auto vma_last = vma_end - 1; - const auto state = vma.state; - const auto attr = vma.attribute; - - // Memory within region must be free or mapped heap. - if (!((state == MemoryState::Heap && attr == MemoryAttribute::None) || - (state == MemoryState::Unmapped))) { - return ERR_INVALID_ADDRESS_STATE; - } - - // Add size if relevant. - if (state != MemoryState::Unmapped) { - mapped_size += std::min(end_addr - cur_addr, vma_end - cur_addr); - } - - // Break once we hit the end of the range. - if (last_addr <= vma_last) { - break; - } - - // Advance to the next block. - cur_addr = vma_end; - iter = std::next(iter); - ASSERT(iter != vma_map.end()); - } - - return MakeResult(mapped_size); -} - -u64 VMManager::GetTotalPhysicalMemoryAvailable() const { - LOG_WARNING(Kernel, "(STUBBED) called"); - return 0xF8000000; -} - -VAddr VMManager::GetAddressSpaceBaseAddress() const { - return address_space_base; -} - -VAddr VMManager::GetAddressSpaceEndAddress() const { - return address_space_end; -} - -u64 VMManager::GetAddressSpaceSize() const { - return address_space_end - address_space_base; -} - -u64 VMManager::GetAddressSpaceWidth() const { - return address_space_width; -} - -bool VMManager::IsWithinAddressSpace(VAddr address, u64 size) const { - return IsInsideAddressRange(address, size, GetAddressSpaceBaseAddress(), - GetAddressSpaceEndAddress()); -} - -VAddr VMManager::GetASLRRegionBaseAddress() const { - return aslr_region_base; -} - -VAddr VMManager::GetASLRRegionEndAddress() const { - return aslr_region_end; -} - -u64 VMManager::GetASLRRegionSize() const { - return aslr_region_end - aslr_region_base; -} - -bool VMManager::IsWithinASLRRegion(VAddr begin, u64 size) const { - const VAddr range_end = begin + size; - const VAddr aslr_start = GetASLRRegionBaseAddress(); - const VAddr aslr_end = GetASLRRegionEndAddress(); - - if (aslr_start > begin || begin > range_end || range_end - 1 > aslr_end - 1) { - return false; - } - - if (range_end > heap_region_base && heap_region_end > begin) { - return false; - } - - if (range_end > map_region_base && map_region_end > begin) { - return false; - } - - return true; -} - -VAddr VMManager::GetCodeRegionBaseAddress() const { - return code_region_base; -} - -VAddr VMManager::GetCodeRegionEndAddress() const { - return code_region_end; -} - -u64 VMManager::GetCodeRegionSize() const { - return code_region_end - code_region_base; -} - -bool VMManager::IsWithinCodeRegion(VAddr address, u64 size) const { - return IsInsideAddressRange(address, size, GetCodeRegionBaseAddress(), - GetCodeRegionEndAddress()); -} - -VAddr VMManager::GetHeapRegionBaseAddress() const { - return heap_region_base; -} - -VAddr VMManager::GetHeapRegionEndAddress() const { - return heap_region_end; -} - -u64 VMManager::GetHeapRegionSize() const { - return heap_region_end - heap_region_base; -} - -u64 VMManager::GetCurrentHeapSize() const { - return heap_end - heap_region_base; -} - -bool VMManager::IsWithinHeapRegion(VAddr address, u64 size) const { - return IsInsideAddressRange(address, size, GetHeapRegionBaseAddress(), - GetHeapRegionEndAddress()); -} - -VAddr VMManager::GetMapRegionBaseAddress() const { - return map_region_base; -} - -VAddr VMManager::GetMapRegionEndAddress() const { - return map_region_end; -} - -u64 VMManager::GetMapRegionSize() const { - return map_region_end - map_region_base; -} - -bool VMManager::IsWithinMapRegion(VAddr address, u64 size) const { - return IsInsideAddressRange(address, size, GetMapRegionBaseAddress(), GetMapRegionEndAddress()); -} - -VAddr VMManager::GetStackRegionBaseAddress() const { - return stack_region_base; -} - -VAddr VMManager::GetStackRegionEndAddress() const { - return stack_region_end; -} - -u64 VMManager::GetStackRegionSize() const { - return stack_region_end - stack_region_base; -} - -bool VMManager::IsWithinStackRegion(VAddr address, u64 size) const { - return IsInsideAddressRange(address, size, GetStackRegionBaseAddress(), - GetStackRegionEndAddress()); -} - -VAddr VMManager::GetTLSIORegionBaseAddress() const { - return tls_io_region_base; -} - -VAddr VMManager::GetTLSIORegionEndAddress() const { - return tls_io_region_end; -} - -u64 VMManager::GetTLSIORegionSize() const { - return tls_io_region_end - tls_io_region_base; -} - -bool VMManager::IsWithinTLSIORegion(VAddr address, u64 size) const { - return IsInsideAddressRange(address, size, GetTLSIORegionBaseAddress(), - GetTLSIORegionEndAddress()); -} - -} // namespace Kernel diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h deleted file mode 100644 index 90b4b006a..000000000 --- a/src/core/hle/kernel/vm_manager.h +++ /dev/null @@ -1,796 +0,0 @@ -// Copyright 2015 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include <map> -#include <memory> -#include <tuple> -#include <vector> -#include "common/common_types.h" -#include "common/memory_hook.h" -#include "common/page_table.h" -#include "core/hle/kernel/physical_memory.h" -#include "core/hle/result.h" -#include "core/memory.h" - -namespace Core { -class System; -} - -namespace FileSys { -enum class ProgramAddressSpaceType : u8; -} - -namespace Kernel { - -enum class VMAType : u8 { - /// VMA represents an unmapped region of the address space. - Free, - /// VMA is backed by a ref-counted allocate memory block. - AllocatedMemoryBlock, - /// VMA is backed by a raw, unmanaged pointer. - BackingMemory, - /// VMA is mapped to MMIO registers at a fixed PAddr. - MMIO, - // TODO(yuriks): Implement MemoryAlias to support MAP/UNMAP -}; - -/// Permissions for mapped memory blocks -enum class VMAPermission : u8 { - None = 0, - Read = 1, - Write = 2, - Execute = 4, - - ReadWrite = Read | Write, - ReadExecute = Read | Execute, - WriteExecute = Write | Execute, - ReadWriteExecute = Read | Write | Execute, - - // Used as a wildcard when checking permissions across memory ranges - All = 0xFF, -}; - -constexpr VMAPermission operator|(VMAPermission lhs, VMAPermission rhs) { - return static_cast<VMAPermission>(u32(lhs) | u32(rhs)); -} - -constexpr VMAPermission operator&(VMAPermission lhs, VMAPermission rhs) { - return static_cast<VMAPermission>(u32(lhs) & u32(rhs)); -} - -constexpr VMAPermission operator^(VMAPermission lhs, VMAPermission rhs) { - return static_cast<VMAPermission>(u32(lhs) ^ u32(rhs)); -} - -constexpr VMAPermission operator~(VMAPermission permission) { - return static_cast<VMAPermission>(~u32(permission)); -} - -constexpr VMAPermission& operator|=(VMAPermission& lhs, VMAPermission rhs) { - lhs = lhs | rhs; - return lhs; -} - -constexpr VMAPermission& operator&=(VMAPermission& lhs, VMAPermission rhs) { - lhs = lhs & rhs; - return lhs; -} - -constexpr VMAPermission& operator^=(VMAPermission& lhs, VMAPermission rhs) { - lhs = lhs ^ rhs; - return lhs; -} - -/// Attribute flags that can be applied to a VMA -enum class MemoryAttribute : u32 { - Mask = 0xFF, - - /// No particular qualities - None = 0, - /// Memory locked/borrowed for use. e.g. This would be used by transfer memory. - Locked = 1, - /// Memory locked for use by IPC-related internals. - LockedForIPC = 2, - /// Mapped as part of the device address space. - DeviceMapped = 4, - /// Uncached memory - Uncached = 8, - - IpcAndDeviceMapped = LockedForIPC | DeviceMapped, -}; - -constexpr MemoryAttribute operator|(MemoryAttribute lhs, MemoryAttribute rhs) { - return static_cast<MemoryAttribute>(u32(lhs) | u32(rhs)); -} - -constexpr MemoryAttribute operator&(MemoryAttribute lhs, MemoryAttribute rhs) { - return static_cast<MemoryAttribute>(u32(lhs) & u32(rhs)); -} - -constexpr MemoryAttribute operator^(MemoryAttribute lhs, MemoryAttribute rhs) { - return static_cast<MemoryAttribute>(u32(lhs) ^ u32(rhs)); -} - -constexpr MemoryAttribute operator~(MemoryAttribute attribute) { - return static_cast<MemoryAttribute>(~u32(attribute)); -} - -constexpr MemoryAttribute& operator|=(MemoryAttribute& lhs, MemoryAttribute rhs) { - lhs = lhs | rhs; - return lhs; -} - -constexpr MemoryAttribute& operator&=(MemoryAttribute& lhs, MemoryAttribute rhs) { - lhs = lhs & rhs; - return lhs; -} - -constexpr MemoryAttribute& operator^=(MemoryAttribute& lhs, MemoryAttribute rhs) { - lhs = lhs ^ rhs; - return lhs; -} - -constexpr u32 ToSvcMemoryAttribute(MemoryAttribute attribute) { - return static_cast<u32>(attribute & MemoryAttribute::Mask); -} - -// clang-format off -/// Represents memory states and any relevant flags, as used by the kernel. -/// svcQueryMemory interprets these by masking away all but the first eight -/// bits when storing memory state into a MemoryInfo instance. -enum class MemoryState : u32 { - Mask = 0xFF, - FlagProtect = 1U << 8, - FlagDebug = 1U << 9, - FlagIPC0 = 1U << 10, - FlagIPC3 = 1U << 11, - FlagIPC1 = 1U << 12, - FlagMapped = 1U << 13, - FlagCode = 1U << 14, - FlagAlias = 1U << 15, - FlagModule = 1U << 16, - FlagTransfer = 1U << 17, - FlagQueryPhysicalAddressAllowed = 1U << 18, - FlagSharedDevice = 1U << 19, - FlagSharedDeviceAligned = 1U << 20, - FlagIPCBuffer = 1U << 21, - FlagMemoryPoolAllocated = 1U << 22, - FlagMapProcess = 1U << 23, - FlagUncached = 1U << 24, - FlagCodeMemory = 1U << 25, - - // Wildcard used in range checking to indicate all states. - All = 0xFFFFFFFF, - - // Convenience flag sets to reduce repetition - IPCFlags = FlagIPC0 | FlagIPC3 | FlagIPC1, - - CodeFlags = FlagDebug | IPCFlags | FlagMapped | FlagCode | FlagQueryPhysicalAddressAllowed | - FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated, - - DataFlags = FlagProtect | IPCFlags | FlagMapped | FlagAlias | FlagTransfer | - FlagQueryPhysicalAddressAllowed | FlagSharedDevice | FlagSharedDeviceAligned | - FlagMemoryPoolAllocated | FlagIPCBuffer | FlagUncached, - - Unmapped = 0x00, - Io = 0x01 | FlagMapped, - Normal = 0x02 | FlagMapped | FlagQueryPhysicalAddressAllowed, - Code = 0x03 | CodeFlags | FlagMapProcess, - CodeData = 0x04 | DataFlags | FlagMapProcess | FlagCodeMemory, - Heap = 0x05 | DataFlags | FlagCodeMemory, - Shared = 0x06 | FlagMapped | FlagMemoryPoolAllocated, - ModuleCode = 0x08 | CodeFlags | FlagModule | FlagMapProcess, - ModuleCodeData = 0x09 | DataFlags | FlagModule | FlagMapProcess | FlagCodeMemory, - - IpcBuffer0 = 0x0A | FlagMapped | FlagQueryPhysicalAddressAllowed | FlagMemoryPoolAllocated | - IPCFlags | FlagSharedDevice | FlagSharedDeviceAligned, - - Stack = 0x0B | FlagMapped | IPCFlags | FlagQueryPhysicalAddressAllowed | - FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated, - - ThreadLocal = 0x0C | FlagMapped | FlagMemoryPoolAllocated, - - TransferMemoryIsolated = 0x0D | IPCFlags | FlagMapped | FlagQueryPhysicalAddressAllowed | - FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated | - FlagUncached, - - TransferMemory = 0x0E | FlagIPC3 | FlagIPC1 | FlagMapped | FlagQueryPhysicalAddressAllowed | - FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated, - - ProcessMemory = 0x0F | FlagIPC3 | FlagIPC1 | FlagMapped | FlagMemoryPoolAllocated, - - // Used to signify an inaccessible or invalid memory region with memory queries - Inaccessible = 0x10, - - IpcBuffer1 = 0x11 | FlagIPC3 | FlagIPC1 | FlagMapped | FlagQueryPhysicalAddressAllowed | - FlagSharedDevice | FlagSharedDeviceAligned | FlagMemoryPoolAllocated, - - IpcBuffer3 = 0x12 | FlagIPC3 | FlagMapped | FlagQueryPhysicalAddressAllowed | - FlagSharedDeviceAligned | FlagMemoryPoolAllocated, - - KernelStack = 0x13 | FlagMapped, -}; -// clang-format on - -constexpr MemoryState operator|(MemoryState lhs, MemoryState rhs) { - return static_cast<MemoryState>(u32(lhs) | u32(rhs)); -} - -constexpr MemoryState operator&(MemoryState lhs, MemoryState rhs) { - return static_cast<MemoryState>(u32(lhs) & u32(rhs)); -} - -constexpr MemoryState operator^(MemoryState lhs, MemoryState rhs) { - return static_cast<MemoryState>(u32(lhs) ^ u32(rhs)); -} - -constexpr MemoryState operator~(MemoryState lhs) { - return static_cast<MemoryState>(~u32(lhs)); -} - -constexpr MemoryState& operator|=(MemoryState& lhs, MemoryState rhs) { - lhs = lhs | rhs; - return lhs; -} - -constexpr MemoryState& operator&=(MemoryState& lhs, MemoryState rhs) { - lhs = lhs & rhs; - return lhs; -} - -constexpr MemoryState& operator^=(MemoryState& lhs, MemoryState rhs) { - lhs = lhs ^ rhs; - return lhs; -} - -constexpr u32 ToSvcMemoryState(MemoryState state) { - return static_cast<u32>(state & MemoryState::Mask); -} - -struct MemoryInfo { - u64 base_address; - u64 size; - u32 state; - u32 attributes; - u32 permission; - u32 ipc_ref_count; - u32 device_ref_count; -}; -static_assert(sizeof(MemoryInfo) == 0x28, "MemoryInfo has incorrect size."); - -struct PageInfo { - u32 flags; -}; - -/** - * Represents a VMA in an address space. A VMA is a contiguous region of virtual addressing space - * with homogeneous attributes across its extents. In this particular implementation each VMA is - * also backed by a single host memory allocation. - */ -struct VirtualMemoryArea { - /// Gets the starting (base) address of this VMA. - VAddr StartAddress() const { - return base; - } - - /// Gets the ending address of this VMA. - VAddr EndAddress() const { - return base + size - 1; - } - - /// Virtual base address of the region. - VAddr base = 0; - /// Size of the region. - u64 size = 0; - - VMAType type = VMAType::Free; - VMAPermission permissions = VMAPermission::None; - MemoryState state = MemoryState::Unmapped; - MemoryAttribute attribute = MemoryAttribute::None; - - // Settings for type = AllocatedMemoryBlock - /// Memory block backing this VMA. - std::shared_ptr<PhysicalMemory> backing_block = nullptr; - /// Offset into the backing_memory the mapping starts from. - std::size_t offset = 0; - - // Settings for type = BackingMemory - /// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed. - u8* backing_memory = nullptr; - - // Settings for type = MMIO - /// Physical address of the register area this VMA maps to. - PAddr paddr = 0; - Common::MemoryHookPointer mmio_handler = nullptr; - - /// Tests if this area can be merged to the right with `next`. - bool CanBeMergedWith(const VirtualMemoryArea& next) const; -}; - -/** - * Manages a process' virtual addressing space. This class maintains a list of allocated and free - * regions in the address space, along with their attributes, and allows kernel clients to - * manipulate it, adjusting the page table to match. - * - * This is similar in idea and purpose to the VM manager present in operating system kernels, with - * the main difference being that it doesn't have to support swapping or memory mapping of files. - * The implementation is also simplified by not having to allocate page frames. See these articles - * about the Linux kernel for an explantion of the concept and implementation: - * - http://duartes.org/gustavo/blog/post/how-the-kernel-manages-your-memory/ - * - http://duartes.org/gustavo/blog/post/page-cache-the-affair-between-memory-and-files/ - */ -class VMManager final { - using VMAMap = std::map<VAddr, VirtualMemoryArea>; - -public: - using VMAHandle = VMAMap::const_iterator; - - explicit VMManager(Core::System& system); - ~VMManager(); - - /// Clears the address space map, re-initializing with a single free area. - void Reset(FileSys::ProgramAddressSpaceType type); - - /// Finds the VMA in which the given address is included in, or `vma_map.end()`. - VMAHandle FindVMA(VAddr target) const; - - /// Indicates whether or not the given handle is within the VMA map. - bool IsValidHandle(VMAHandle handle) const; - - // TODO(yuriks): Should these functions actually return the handle? - - /** - * Maps part of a ref-counted block of memory at a given address. - * - * @param target The guest address to start the mapping at. - * @param block The block to be mapped. - * @param offset Offset into `block` to map from. - * @param size Size of the mapping. - * @param state MemoryState tag to attach to the VMA. - */ - ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<PhysicalMemory> block, - std::size_t offset, u64 size, MemoryState state, - VMAPermission perm = VMAPermission::ReadWrite); - - /** - * Maps an unmanaged host memory pointer at a given address. - * - * @param target The guest address to start the mapping at. - * @param memory The memory to be mapped. - * @param size Size of the mapping. - * @param state MemoryState tag to attach to the VMA. - */ - ResultVal<VMAHandle> MapBackingMemory(VAddr target, u8* memory, u64 size, MemoryState state); - - /** - * Finds the first free memory region of the given size within - * the user-addressable ASLR memory region. - * - * @param size The size of the desired region in bytes. - * - * @returns If successful, the base address of the free region with - * the given size. - */ - ResultVal<VAddr> FindFreeRegion(u64 size) const; - - /** - * Finds the first free address range that can hold a region of the desired size - * - * @param begin The starting address of the range. - * This is treated as an inclusive beginning address. - * - * @param end The ending address of the range. - * This is treated as an exclusive ending address. - * - * @param size The size of the free region to attempt to locate, - * in bytes. - * - * @returns If successful, the base address of the free region with - * the given size. - * - * @returns If unsuccessful, a result containing an error code. - * - * @pre The starting address must be less than the ending address. - * @pre The size must not exceed the address range itself. - */ - ResultVal<VAddr> FindFreeRegion(VAddr begin, VAddr end, u64 size) const; - - /** - * Maps a memory-mapped IO region at a given address. - * - * @param target The guest address to start the mapping at. - * @param paddr The physical address where the registers are present. - * @param size Size of the mapping. - * @param state MemoryState tag to attach to the VMA. - * @param mmio_handler The handler that will implement read and write for this MMIO region. - */ - ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u64 size, MemoryState state, - Common::MemoryHookPointer mmio_handler); - - /// Unmaps a range of addresses, splitting VMAs as necessary. - ResultCode UnmapRange(VAddr target, u64 size); - - /// Changes the permissions of the given VMA. - VMAHandle Reprotect(VMAHandle vma, VMAPermission new_perms); - - /// Changes the permissions of a range of addresses, splitting VMAs as necessary. - ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms); - - ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state); - - /// Attempts to allocate a heap with the given size. - /// - /// @param size The size of the heap to allocate in bytes. - /// - /// @note If a heap is currently allocated, and this is called - /// with a size that is equal to the size of the current heap, - /// then this function will do nothing and return the current - /// heap's starting address, as there's no need to perform - /// any additional heap allocation work. - /// - /// @note If a heap is currently allocated, and this is called - /// with a size less than the current heap's size, then - /// this function will attempt to shrink the heap. - /// - /// @note If a heap is currently allocated, and this is called - /// with a size larger than the current heap's size, then - /// this function will attempt to extend the size of the heap. - /// - /// @returns A result indicating either success or failure. - /// <p> - /// If successful, this function will return a result - /// containing the starting address to the allocated heap. - /// <p> - /// If unsuccessful, this function will return a result - /// containing an error code. - /// - /// @pre The given size must lie within the allowable heap - /// memory region managed by this VMManager instance. - /// Failure to abide by this will result in ERR_OUT_OF_MEMORY - /// being returned as the result. - /// - ResultVal<VAddr> SetHeapSize(u64 size); - - /// Maps memory at a given address. - /// - /// @param target The virtual address to map memory at. - /// @param size The amount of memory to map. - /// - /// @note The destination address must lie within the Map region. - /// - /// @note This function requires that SystemResourceSize be non-zero, - /// however, this is just because if it were not then the - /// resulting page tables could be exploited on hardware by - /// a malicious program. SystemResource usage does not need - /// to be explicitly checked or updated here. - ResultCode MapPhysicalMemory(VAddr target, u64 size); - - /// Unmaps memory at a given address. - /// - /// @param target The virtual address to unmap memory at. - /// @param size The amount of memory to unmap. - /// - /// @note The destination address must lie within the Map region. - /// - /// @note This function requires that SystemResourceSize be non-zero, - /// however, this is just because if it were not then the - /// resulting page tables could be exploited on hardware by - /// a malicious program. SystemResource usage does not need - /// to be explicitly checked or updated here. - ResultCode UnmapPhysicalMemory(VAddr target, u64 size); - - /// Maps a region of memory as code memory. - /// - /// @param dst_address The base address of the region to create the aliasing memory region. - /// @param src_address The base address of the region to be aliased. - /// @param size The total amount of memory to map in bytes. - /// - /// @pre Both memory regions lie within the actual addressable address space. - /// - /// @post After this function finishes execution, assuming success, then the address range - /// [dst_address, dst_address+size) will alias the memory region, - /// [src_address, src_address+size). - /// <p> - /// What this also entails is as follows: - /// 1. The aliased region gains the Locked memory attribute. - /// 2. The aliased region becomes read-only. - /// 3. The aliasing region becomes read-only. - /// 4. The aliasing region is created with a memory state of MemoryState::CodeModule. - /// - ResultCode MapCodeMemory(VAddr dst_address, VAddr src_address, u64 size); - - /// Unmaps a region of memory designated as code module memory. - /// - /// @param dst_address The base address of the memory region aliasing the source memory region. - /// @param src_address The base address of the memory region being aliased. - /// @param size The size of the memory region to unmap in bytes. - /// - /// @pre Both memory ranges lie within the actual addressable address space. - /// - /// @pre The memory region being unmapped has been previously been mapped - /// by a call to MapCodeMemory. - /// - /// @post After execution of the function, if successful. the aliasing memory region - /// will be unmapped and the aliased region will have various traits about it - /// restored to what they were prior to the original mapping call preceding - /// this function call. - /// <p> - /// What this also entails is as follows: - /// 1. The state of the memory region will now indicate a general heap region. - /// 2. All memory attributes for the memory region are cleared. - /// 3. Memory permissions for the region are restored to user read/write. - /// - ResultCode UnmapCodeMemory(VAddr dst_address, VAddr src_address, u64 size); - - /// Queries the memory manager for information about the given address. - /// - /// @param address The address to query the memory manager about for information. - /// - /// @return A MemoryInfo instance containing information about the given address. - /// - MemoryInfo QueryMemory(VAddr address) const; - - /// Sets an attribute across the given address range. - /// - /// @param address The starting address - /// @param size The size of the range to set the attribute on. - /// @param mask The attribute mask - /// @param attribute The attribute to set across the given address range - /// - /// @returns RESULT_SUCCESS if successful - /// @returns ERR_INVALID_ADDRESS_STATE if the attribute could not be set. - /// - ResultCode SetMemoryAttribute(VAddr address, u64 size, MemoryAttribute mask, - MemoryAttribute attribute); - - /** - * Scans all VMAs and updates the page table range of any that use the given vector as backing - * memory. This should be called after any operation that causes reallocation of the vector. - */ - void RefreshMemoryBlockMappings(const PhysicalMemory* block); - - /// Dumps the address space layout to the log, for debugging - void LogLayout() const; - - /// Gets the total memory usage, used by svcGetInfo - u64 GetTotalPhysicalMemoryAvailable() const; - - /// Gets the address space base address - VAddr GetAddressSpaceBaseAddress() const; - - /// Gets the address space end address - VAddr GetAddressSpaceEndAddress() const; - - /// Gets the total address space address size in bytes - u64 GetAddressSpaceSize() const; - - /// Gets the address space width in bits. - u64 GetAddressSpaceWidth() const; - - /// Determines whether or not the given address range lies within the address space. - bool IsWithinAddressSpace(VAddr address, u64 size) const; - - /// Gets the base address of the ASLR region. - VAddr GetASLRRegionBaseAddress() const; - - /// Gets the end address of the ASLR region. - VAddr GetASLRRegionEndAddress() const; - - /// Gets the size of the ASLR region - u64 GetASLRRegionSize() const; - - /// Determines whether or not the specified address range is within the ASLR region. - bool IsWithinASLRRegion(VAddr address, u64 size) const; - - /// Gets the base address of the code region. - VAddr GetCodeRegionBaseAddress() const; - - /// Gets the end address of the code region. - VAddr GetCodeRegionEndAddress() const; - - /// Gets the total size of the code region in bytes. - u64 GetCodeRegionSize() const; - - /// Determines whether or not the specified range is within the code region. - bool IsWithinCodeRegion(VAddr address, u64 size) const; - - /// Gets the base address of the heap region. - VAddr GetHeapRegionBaseAddress() const; - - /// Gets the end address of the heap region; - VAddr GetHeapRegionEndAddress() const; - - /// Gets the total size of the heap region in bytes. - u64 GetHeapRegionSize() const; - - /// Gets the total size of the current heap in bytes. - /// - /// @note This is the current allocated heap size, not the size - /// of the region it's allowed to exist within. - /// - u64 GetCurrentHeapSize() const; - - /// Determines whether or not the specified range is within the heap region. - bool IsWithinHeapRegion(VAddr address, u64 size) const; - - /// Gets the base address of the map region. - VAddr GetMapRegionBaseAddress() const; - - /// Gets the end address of the map region. - VAddr GetMapRegionEndAddress() const; - - /// Gets the total size of the map region in bytes. - u64 GetMapRegionSize() const; - - /// Determines whether or not the specified range is within the map region. - bool IsWithinMapRegion(VAddr address, u64 size) const; - - /// Gets the base address of the stack region. - VAddr GetStackRegionBaseAddress() const; - - /// Gets the end address of the stack region. - VAddr GetStackRegionEndAddress() const; - - /// Gets the total size of the stack region in bytes. - u64 GetStackRegionSize() const; - - /// Determines whether or not the given address range is within the stack region - bool IsWithinStackRegion(VAddr address, u64 size) const; - - /// Gets the base address of the TLS IO region. - VAddr GetTLSIORegionBaseAddress() const; - - /// Gets the end address of the TLS IO region. - VAddr GetTLSIORegionEndAddress() const; - - /// Gets the total size of the TLS IO region in bytes. - u64 GetTLSIORegionSize() const; - - /// Determines if the given address range is within the TLS IO region. - bool IsWithinTLSIORegion(VAddr address, u64 size) const; - - /// Each VMManager has its own page table, which is set as the main one when the owning process - /// is scheduled. - Common::PageTable page_table{Memory::PAGE_BITS}; - - using CheckResults = ResultVal<std::tuple<MemoryState, VMAPermission, MemoryAttribute>>; - - /// Checks if an address range adheres to the specified states provided. - /// - /// @param address The starting address of the address range. - /// @param size The size of the address range. - /// @param state_mask The memory state mask. - /// @param state The state to compare the individual VMA states against, - /// which is done in the form of: (vma.state & state_mask) != state. - /// @param permission_mask The memory permissions mask. - /// @param permissions The permission to compare the individual VMA permissions against, - /// which is done in the form of: - /// (vma.permission & permission_mask) != permission. - /// @param attribute_mask The memory attribute mask. - /// @param attribute The memory attributes to compare the individual VMA attributes - /// against, which is done in the form of: - /// (vma.attributes & attribute_mask) != attribute. - /// @param ignore_mask The memory attributes to ignore during the check. - /// - /// @returns If successful, returns a tuple containing the memory attributes - /// (with ignored bits specified by ignore_mask unset), memory permissions, and - /// memory state across the memory range. - /// @returns If not successful, returns ERR_INVALID_ADDRESS_STATE. - /// - CheckResults CheckRangeState(VAddr address, u64 size, MemoryState state_mask, MemoryState state, - VMAPermission permission_mask, VMAPermission permissions, - MemoryAttribute attribute_mask, MemoryAttribute attribute, - MemoryAttribute ignore_mask) const; - -private: - using VMAIter = VMAMap::iterator; - - /// Converts a VMAHandle to a mutable VMAIter. - VMAIter StripIterConstness(const VMAHandle& iter); - - /// Unmaps the given VMA. - VMAIter Unmap(VMAIter vma); - - /** - * Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing - * the appropriate error checking. - */ - ResultVal<VMAIter> CarveVMA(VAddr base, u64 size); - - /** - * Splits the edges of the given range of non-Free VMAs so that there is a VMA split at each - * end of the range. - */ - ResultVal<VMAIter> CarveVMARange(VAddr base, u64 size); - - /** - * Splits a VMA in two, at the specified offset. - * @returns the right side of the split, with the original iterator becoming the left side. - */ - VMAIter SplitVMA(VMAIter vma, u64 offset_in_vma); - - /** - * Checks for and merges the specified VMA with adjacent ones if possible. - * @returns the merged VMA or the original if no merging was possible. - */ - VMAIter MergeAdjacent(VMAIter vma); - - /** - * Merges two adjacent VMAs. - */ - void MergeAdjacentVMA(VirtualMemoryArea& left, const VirtualMemoryArea& right); - - /// Updates the pages corresponding to this VMA so they match the VMA's attributes. - void UpdatePageTableForVMA(const VirtualMemoryArea& vma); - - /// Initializes memory region ranges to adhere to a given address space type. - void InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType type); - - /// Clears the underlying map and page table. - void Clear(); - - /// Clears out the VMA map, unmapping any previously mapped ranges. - void ClearVMAMap(); - - /// Clears out the page table - void ClearPageTable(); - - /// Gets the amount of memory currently mapped (state != Unmapped) in a range. - ResultVal<std::size_t> SizeOfAllocatedVMAsInRange(VAddr address, std::size_t size) const; - - /// Gets the amount of memory unmappable by UnmapPhysicalMemory in a range. - ResultVal<std::size_t> SizeOfUnmappablePhysicalMemoryInRange(VAddr address, - std::size_t size) const; - - /** - * A map covering the entirety of the managed address space, keyed by the `base` field of each - * VMA. It must always be modified by splitting or merging VMAs, so that the invariant - * `elem.base + elem.size == next.base` is preserved, and mergeable regions must always be - * merged when possible so that no two similar and adjacent regions exist that have not been - * merged. - */ - VMAMap vma_map; - - u32 address_space_width = 0; - VAddr address_space_base = 0; - VAddr address_space_end = 0; - - VAddr aslr_region_base = 0; - VAddr aslr_region_end = 0; - - VAddr code_region_base = 0; - VAddr code_region_end = 0; - - VAddr heap_region_base = 0; - VAddr heap_region_end = 0; - - VAddr map_region_base = 0; - VAddr map_region_end = 0; - - VAddr stack_region_base = 0; - VAddr stack_region_end = 0; - - VAddr tls_io_region_base = 0; - VAddr tls_io_region_end = 0; - - // Memory used to back the allocations in the regular heap. A single vector is used to cover - // the entire virtual address space extents that bound the allocations, including any holes. - // This makes deallocation and reallocation of holes fast and keeps process memory contiguous - // in the emulator address space, allowing Memory::GetPointer to be reasonably safe. - std::shared_ptr<PhysicalMemory> heap_memory; - - // The end of the currently allocated heap. This is not an inclusive - // end of the range. This is essentially 'base_address + current_size'. - VAddr heap_end = 0; - - // The current amount of memory mapped via MapPhysicalMemory. - // This is used here (and in Nintendo's kernel) only for debugging, and does not impact - // any behavior. - u64 physical_memory_mapped = 0; - - Core::System& system; -}; -} // namespace Kernel diff --git a/src/core/hle/service/audio/audout_u.cpp b/src/core/hle/service/audio/audout_u.cpp index 4fb2cbc4b..106e89743 100644 --- a/src/core/hle/service/audio/audout_u.cpp +++ b/src/core/hle/service/audio/audout_u.cpp @@ -210,7 +210,7 @@ private: /// This is the event handle used to check if the audio buffer was released Kernel::EventPair buffer_event; - Memory::Memory& main_memory; + Core::Memory::Memory& main_memory; }; AudOutU::AudOutU(Core::System& system_) : ServiceFramework("audout:u"), system{system_} { diff --git a/src/core/hle/service/audio/audren_u.cpp b/src/core/hle/service/audio/audren_u.cpp index 82a5dbf14..175cabf45 100644 --- a/src/core/hle/service/audio/audren_u.cpp +++ b/src/core/hle/service/audio/audren_u.cpp @@ -129,7 +129,7 @@ private: LOG_DEBUG(Service_Audio, "called. rendering_time_limit_percent={}", rendering_time_limit_percent); - ASSERT(rendering_time_limit_percent >= 0 && rendering_time_limit_percent <= 100); + ASSERT(rendering_time_limit_percent <= 100); IPC::ResponseBuilder rb{ctx, 2}; rb.Push(RESULT_SUCCESS); diff --git a/src/core/hle/service/filesystem/filesystem.cpp b/src/core/hle/service/filesystem/filesystem.cpp index 102017d73..cadc03805 100644 --- a/src/core/hle/service/filesystem/filesystem.cpp +++ b/src/core/hle/service/filesystem/filesystem.cpp @@ -451,7 +451,8 @@ FileSys::SaveDataSize FileSystemController::ReadSaveDataSize(FileSys::SaveDataTy if (res != Loader::ResultStatus::Success) { FileSys::PatchManager pm{system.CurrentProcess()->GetTitleID()}; - auto [nacp_unique, discard] = pm.GetControlMetadata(); + const auto metadata = pm.GetControlMetadata(); + const auto& nacp_unique = metadata.first; if (nacp_unique != nullptr) { new_size = {nacp_unique->GetDefaultNormalSaveSize(), diff --git a/src/core/hle/service/filesystem/fsp_srv.cpp b/src/core/hle/service/filesystem/fsp_srv.cpp index e6811d5b5..61045c75c 100644 --- a/src/core/hle/service/filesystem/fsp_srv.cpp +++ b/src/core/hle/service/filesystem/fsp_srv.cpp @@ -575,6 +575,7 @@ private: 0, user_id->GetSize(), {}, + {}, }); continue; @@ -595,6 +596,7 @@ private: stoull_be(title_id->GetName()), title_id->GetSize(), {}, + {}, }); } } @@ -619,6 +621,7 @@ private: stoull_be(title_id->GetName()), title_id->GetSize(), {}, + {}, }); } } diff --git a/src/core/hle/service/friend/friend.cpp b/src/core/hle/service/friend/friend.cpp index 6aadb3ea8..7938b4b80 100644 --- a/src/core/hle/service/friend/friend.cpp +++ b/src/core/hle/service/friend/friend.cpp @@ -27,7 +27,7 @@ public: {10110, nullptr, "GetFriendProfileImage"}, {10200, nullptr, "SendFriendRequestForApplication"}, {10211, nullptr, "AddFacedFriendRequestForApplication"}, - {10400, nullptr, "GetBlockedUserListIds"}, + {10400, &IFriendService::GetBlockedUserListIds, "GetBlockedUserListIds"}, {10500, nullptr, "GetProfileList"}, {10600, nullptr, "DeclareOpenOnlinePlaySession"}, {10601, &IFriendService::DeclareCloseOnlinePlaySession, "DeclareCloseOnlinePlaySession"}, @@ -121,6 +121,15 @@ private: }; static_assert(sizeof(SizedFriendFilter) == 0x10, "SizedFriendFilter is an invalid size"); + void GetBlockedUserListIds(Kernel::HLERequestContext& ctx) { + // This is safe to stub, as there should be no adverse consequences from reporting no + // blocked users. + LOG_WARNING(Service_ACC, "(STUBBED) called"); + IPC::ResponseBuilder rb{ctx, 3}; + rb.Push(RESULT_SUCCESS); + rb.Push<u32>(0); // Indicates there are no blocked users + } + void DeclareCloseOnlinePlaySession(Kernel::HLERequestContext& ctx) { // Stub used by Splatoon 2 LOG_WARNING(Service_ACC, "(STUBBED) called"); diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp index c1e32b28c..2ccfffc19 100644 --- a/src/core/hle/service/hid/controllers/npad.cpp +++ b/src/core/hle/service/hid/controllers/npad.cpp @@ -107,6 +107,7 @@ void Controller_NPad::InitNewlyAddedControler(std::size_t controller_idx) { switch (controller_type) { case NPadControllerType::None: UNREACHABLE(); + break; case NPadControllerType::Handheld: controller.joy_styles.handheld.Assign(1); controller.device_type.handheld.Assign(1); @@ -363,6 +364,7 @@ void Controller_NPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* switch (controller_type) { case NPadControllerType::None: UNREACHABLE(); + break; case NPadControllerType::Handheld: handheld_entry.connection_status.raw = 0; handheld_entry.connection_status.IsWired.Assign(1); diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp index d6ed5f304..d6031a987 100644 --- a/src/core/hle/service/hid/hid.cpp +++ b/src/core/hle/service/hid/hid.cpp @@ -14,6 +14,7 @@ #include "core/hle/ipc_helpers.h" #include "core/hle/kernel/client_port.h" #include "core/hle/kernel/client_session.h" +#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/readable_event.h" #include "core/hle/kernel/shared_memory.h" #include "core/hle/kernel/writable_event.h" @@ -53,9 +54,7 @@ IAppletResource::IAppletResource(Core::System& system) RegisterHandlers(functions); auto& kernel = system.Kernel(); - shared_mem = Kernel::SharedMemory::Create( - kernel, nullptr, SHARED_MEMORY_SIZE, Kernel::MemoryPermission::ReadWrite, - Kernel::MemoryPermission::Read, 0, Kernel::MemoryRegion::BASE, "HID:SharedMemory"); + shared_mem = SharedFrom(&kernel.GetHidSharedMem()); MakeController<Controller_DebugPad>(HidController::DebugPad); MakeController<Controller_Touchscreen>(HidController::Touchscreen); diff --git a/src/core/hle/service/hid/irs.cpp b/src/core/hle/service/hid/irs.cpp index 5e79e2c1a..36ed6f7da 100644 --- a/src/core/hle/service/hid/irs.cpp +++ b/src/core/hle/service/hid/irs.cpp @@ -6,6 +6,7 @@ #include "core/core.h" #include "core/core_timing.h" #include "core/hle/ipc_helpers.h" +#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/shared_memory.h" #include "core/hle/service/hid/irs.h" @@ -38,9 +39,8 @@ IRS::IRS(Core::System& system) : ServiceFramework{"irs"}, system(system) { RegisterHandlers(functions); auto& kernel = system.Kernel(); - shared_mem = Kernel::SharedMemory::Create( - kernel, nullptr, 0x8000, Kernel::MemoryPermission::ReadWrite, - Kernel::MemoryPermission::Read, 0, Kernel::MemoryRegion::BASE, "IRS:SharedMemory"); + + shared_mem = SharedFrom(&kernel.GetIrsSharedMem()); } void IRS::ActivateIrsensor(Kernel::HLERequestContext& ctx) { diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp index 647943020..0cde7a557 100644 --- a/src/core/hle/service/ldr/ldr.cpp +++ b/src/core/hle/service/ldr/ldr.cpp @@ -8,14 +8,21 @@ #include "common/alignment.h" #include "common/hex_util.h" +#include "common/scope_exit.h" #include "core/hle/ipc_helpers.h" +#include "core/hle/kernel/errors.h" +#include "core/hle/kernel/memory/page_table.h" +#include "core/hle/kernel/memory/system_control.h" #include "core/hle/kernel/process.h" #include "core/hle/service/ldr/ldr.h" #include "core/hle/service/service.h" #include "core/loader/nro.h" +#include "core/memory.h" namespace Service::LDR { +constexpr ResultCode ERROR_INSUFFICIENT_ADDRESS_SPACE{ErrorModule::RO, 2}; + constexpr ResultCode ERROR_INVALID_MEMORY_STATE{ErrorModule::Loader, 51}; constexpr ResultCode ERROR_INVALID_NRO{ErrorModule::Loader, 52}; constexpr ResultCode ERROR_INVALID_NRR{ErrorModule::Loader, 53}; @@ -29,7 +36,61 @@ constexpr ResultCode ERROR_INVALID_NRO_ADDRESS{ErrorModule::Loader, 84}; constexpr ResultCode ERROR_INVALID_NRR_ADDRESS{ErrorModule::Loader, 85}; constexpr ResultCode ERROR_NOT_INITIALIZED{ErrorModule::Loader, 87}; -constexpr u64 MAXIMUM_LOADED_RO = 0x40; +constexpr std::size_t MAXIMUM_LOADED_RO{0x40}; +constexpr std::size_t MAXIMUM_MAP_RETRIES{0x200}; + +struct NRRHeader { + u32_le magic; + INSERT_PADDING_BYTES(12); + u64_le title_id_mask; + u64_le title_id_pattern; + INSERT_PADDING_BYTES(16); + std::array<u8, 0x100> modulus; + std::array<u8, 0x100> signature_1; + std::array<u8, 0x100> signature_2; + u64_le title_id; + u32_le size; + INSERT_PADDING_BYTES(4); + u32_le hash_offset; + u32_le hash_count; + INSERT_PADDING_BYTES(8); +}; +static_assert(sizeof(NRRHeader) == 0x350, "NRRHeader has incorrect size."); + +struct NROHeader { + INSERT_PADDING_WORDS(1); + u32_le mod_offset; + INSERT_PADDING_WORDS(2); + u32_le magic; + u32_le version; + u32_le nro_size; + u32_le flags; + u32_le text_offset; + u32_le text_size; + u32_le ro_offset; + u32_le ro_size; + u32_le rw_offset; + u32_le rw_size; + u32_le bss_size; + INSERT_PADDING_WORDS(1); + std::array<u8, 0x20> build_id; + INSERT_PADDING_BYTES(0x20); +}; +static_assert(sizeof(NROHeader) == 0x80, "NROHeader has invalid size."); + +using SHA256Hash = std::array<u8, 0x20>; + +struct NROInfo { + SHA256Hash hash{}; + VAddr nro_address{}; + std::size_t nro_size{}; + VAddr bss_address{}; + std::size_t bss_size{}; + std::size_t text_size{}; + std::size_t ro_size{}; + std::size_t data_size{}; + VAddr src_addr{}; +}; class DebugMonitor final : public ServiceFramework<DebugMonitor> { public: @@ -84,7 +145,7 @@ public: {0, &RelocatableObject::LoadNro, "LoadNro"}, {1, &RelocatableObject::UnloadNro, "UnloadNro"}, {2, &RelocatableObject::LoadNrr, "LoadNrr"}, - {3, &RelocatableObject::UnloadNrr, "UnloadNrr"}, + {3, nullptr, "UnloadNrr"}, {4, &RelocatableObject::Initialize, "Initialize"}, {10, nullptr, "LoadNrrEx"}, }; @@ -190,46 +251,125 @@ public: rb.Push(RESULT_SUCCESS); } - void UnloadNrr(Kernel::HLERequestContext& ctx) { - if (!initialized) { - LOG_ERROR(Service_LDR, "LDR:RO not initialized before use!"); - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ERROR_NOT_INITIALIZED); - return; + bool ValidateRegionForMap(Kernel::Memory::PageTable& page_table, VAddr start, + std::size_t size) const { + constexpr std::size_t padding_size{4 * Kernel::Memory::PageSize}; + const auto start_info{page_table.QueryInfo(start - 1)}; + + if (start_info.state != Kernel::Memory::MemoryState::Free) { + return {}; } - struct Parameters { - u64_le process_id; - u64_le nrr_address; - }; + if (start_info.GetAddress() > (start - padding_size)) { + return {}; + } - IPC::RequestParser rp{ctx}; - const auto [process_id, nrr_address] = rp.PopRaw<Parameters>(); + const auto end_info{page_table.QueryInfo(start + size)}; - LOG_DEBUG(Service_LDR, "called with process_id={:016X}, nrr_addr={:016X}", process_id, - nrr_address); + if (end_info.state != Kernel::Memory::MemoryState::Free) { + return {}; + } - if (!Common::Is4KBAligned(nrr_address)) { - LOG_ERROR(Service_LDR, "NRR Address has invalid alignment (actual {:016X})!", - nrr_address); - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ERROR_INVALID_ALIGNMENT); - return; + return (start + size + padding_size) <= (end_info.GetAddress() + end_info.GetSize()); + } + + VAddr GetRandomMapRegion(const Kernel::Memory::PageTable& page_table, std::size_t size) const { + VAddr addr{}; + const std::size_t end_pages{(page_table.GetAliasCodeRegionSize() - size) >> + Kernel::Memory::PageBits}; + do { + addr = page_table.GetAliasCodeRegionStart() + + (Kernel::Memory::SystemControl::GenerateRandomRange(0, end_pages) + << Kernel::Memory::PageBits); + } while (!page_table.IsInsideAddressSpace(addr, size) || + page_table.IsInsideHeapRegion(addr, size) || + page_table.IsInsideAliasRegion(addr, size)); + return addr; + } + + ResultVal<VAddr> MapProcessCodeMemory(Kernel::Process* process, VAddr baseAddress, + u64 size) const { + for (int retry{}; retry < MAXIMUM_MAP_RETRIES; retry++) { + auto& page_table{process->PageTable()}; + const VAddr addr{GetRandomMapRegion(page_table, size)}; + const ResultCode result{page_table.MapProcessCodeMemory(addr, baseAddress, size)}; + + if (result == Kernel::ERR_INVALID_ADDRESS_STATE) { + continue; + } + + CASCADE_CODE(result); + + if (ValidateRegionForMap(page_table, addr, size)) { + return MakeResult<VAddr>(addr); + } } - const auto iter = nrr.find(nrr_address); - if (iter == nrr.end()) { - LOG_ERROR(Service_LDR, - "Attempting to unload NRR which has not been loaded! (addr={:016X})", - nrr_address); - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ERROR_INVALID_NRR_ADDRESS); - return; + return ERROR_INSUFFICIENT_ADDRESS_SPACE; + } + + ResultVal<VAddr> MapNro(Kernel::Process* process, VAddr nro_addr, std::size_t nro_size, + VAddr bss_addr, std::size_t bss_size, std::size_t size) const { + + for (int retry{}; retry < MAXIMUM_MAP_RETRIES; retry++) { + auto& page_table{process->PageTable()}; + VAddr addr{}; + + CASCADE_RESULT(addr, MapProcessCodeMemory(process, nro_addr, nro_size)); + + if (bss_size) { + auto block_guard = detail::ScopeExit([&] { + page_table.UnmapProcessCodeMemory(addr + nro_size, bss_addr, bss_size); + page_table.UnmapProcessCodeMemory(addr, nro_addr, nro_size); + }); + + const ResultCode result{ + page_table.MapProcessCodeMemory(addr + nro_size, bss_addr, bss_size)}; + + if (result == Kernel::ERR_INVALID_ADDRESS_STATE) { + continue; + } + + if (result.IsError()) { + return result; + } + + block_guard.Cancel(); + } + + if (ValidateRegionForMap(page_table, addr, size)) { + return MakeResult<VAddr>(addr); + } } - nrr.erase(iter); - IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(RESULT_SUCCESS); + return ERROR_INSUFFICIENT_ADDRESS_SPACE; + } + + ResultCode LoadNro(Kernel::Process* process, const NROHeader& nro_header, VAddr nro_addr, + VAddr start) const { + const VAddr text_start{start + nro_header.text_offset}; + const VAddr ro_start{start + nro_header.ro_offset}; + const VAddr data_start{start + nro_header.rw_offset}; + const VAddr bss_start{data_start + nro_header.rw_size}; + const VAddr bss_end_addr{ + Common::AlignUp(bss_start + nro_header.bss_size, Kernel::Memory::PageSize)}; + + auto CopyCode{[&](VAddr src_addr, VAddr dst_addr, u64 size) { + std::vector<u8> source_data(size); + system.Memory().ReadBlock(src_addr, source_data.data(), source_data.size()); + system.Memory().WriteBlock(dst_addr, source_data.data(), source_data.size()); + }}; + CopyCode(nro_addr + nro_header.text_offset, text_start, nro_header.text_size); + CopyCode(nro_addr + nro_header.ro_offset, ro_start, nro_header.ro_size); + CopyCode(nro_addr + nro_header.rw_offset, data_start, nro_header.rw_size); + + CASCADE_CODE(process->PageTable().SetCodeMemoryPermission( + text_start, ro_start - text_start, Kernel::Memory::MemoryPermission::ReadAndExecute)); + CASCADE_CODE(process->PageTable().SetCodeMemoryPermission( + ro_start, data_start - ro_start, Kernel::Memory::MemoryPermission::Read)); + + return process->PageTable().SetCodeMemoryPermission( + data_start, bss_end_addr - data_start, Kernel::Memory::MemoryPermission::ReadAndWrite); } void LoadNro(Kernel::HLERequestContext& ctx) { @@ -317,9 +457,9 @@ public: return; } - NROHeader header; + // Load and validate the NRO header + NROHeader header{}; std::memcpy(&header, nro_data.data(), sizeof(NROHeader)); - if (!IsValidNRO(header, nro_size, bss_size)) { LOG_ERROR(Service_LDR, "NRO was invalid!"); IPC::ResponseBuilder rb{ctx, 2}; @@ -327,62 +467,48 @@ public: return; } - // Load NRO as new executable module - auto* process = system.CurrentProcess(); - auto& vm_manager = process->VMManager(); - auto map_address = vm_manager.FindFreeRegion(nro_size + bss_size); - - if (!map_address.Succeeded() || - *map_address + nro_size + bss_size > vm_manager.GetAddressSpaceEndAddress()) { - - LOG_ERROR(Service_LDR, - "General error while allocation memory or no available memory to allocate!"); + // Map memory for the NRO + const auto map_result{MapNro(system.CurrentProcess(), nro_address, nro_size, bss_address, + bss_size, nro_size + bss_size)}; + if (map_result.Failed()) { IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(ERROR_INVALID_MEMORY_STATE); - return; + rb.Push(map_result.Code()); } - // Mark text and read-only region as ModuleCode - ASSERT(vm_manager - .MirrorMemory(*map_address, nro_address, header.text_size + header.ro_size, - Kernel::MemoryState::ModuleCode) - .IsSuccess()); - // Mark read/write region as ModuleCodeData, which is necessary if this region is used for - // TransferMemory (e.g. Final Fantasy VIII Remastered does this) - ASSERT(vm_manager - .MirrorMemory(*map_address + header.rw_offset, nro_address + header.rw_offset, - header.rw_size, Kernel::MemoryState::ModuleCodeData) - .IsSuccess()); - // Revoke permissions from the old memory region - ASSERT(vm_manager.ReprotectRange(nro_address, nro_size, Kernel::VMAPermission::None) - .IsSuccess()); - - if (bss_size > 0) { - // Mark BSS region as ModuleCodeData, which is necessary if this region is used for - // TransferMemory (e.g. Final Fantasy VIII Remastered does this) - ASSERT(vm_manager - .MirrorMemory(*map_address + nro_size, bss_address, bss_size, - Kernel::MemoryState::ModuleCodeData) - .IsSuccess()); - ASSERT(vm_manager.ReprotectRange(bss_address, bss_size, Kernel::VMAPermission::None) - .IsSuccess()); + // Load the NRO into the mapped memory + if (const auto result{LoadNro(system.CurrentProcess(), header, nro_address, *map_result)}; + result.IsError()) { + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(map_result.Code()); } - vm_manager.ReprotectRange(*map_address, header.text_size, - Kernel::VMAPermission::ReadExecute); - vm_manager.ReprotectRange(*map_address + header.ro_offset, header.ro_size, - Kernel::VMAPermission::Read); - vm_manager.ReprotectRange(*map_address + header.rw_offset, header.rw_size, - Kernel::VMAPermission::ReadWrite); + // Track the loaded NRO + nro.insert_or_assign(*map_result, NROInfo{hash, *map_result, nro_size, bss_address, + bss_size, header.text_size, header.ro_size, + header.rw_size, nro_address}); + // Invalidate JIT caches for the newly mapped process code system.InvalidateCpuInstructionCaches(); - nro.insert_or_assign(*map_address, - NROInfo{hash, nro_address, nro_size, bss_address, bss_size}); - IPC::ResponseBuilder rb{ctx, 4}; rb.Push(RESULT_SUCCESS); - rb.Push(*map_address); + rb.Push(*map_result); + } + + ResultCode UnmapNro(const NROInfo& info) { + // Each region must be unmapped separately to validate memory state + auto& page_table{system.CurrentProcess()->PageTable()}; + CASCADE_CODE(page_table.UnmapProcessCodeMemory(info.nro_address + info.text_size + + info.ro_size + info.data_size, + info.bss_address, info.bss_size)); + CASCADE_CODE(page_table.UnmapProcessCodeMemory( + info.nro_address + info.text_size + info.ro_size, + info.src_addr + info.text_size + info.ro_size, info.data_size)); + CASCADE_CODE(page_table.UnmapProcessCodeMemory( + info.nro_address + info.text_size, info.src_addr + info.text_size, info.ro_size)); + CASCADE_CODE( + page_table.UnmapProcessCodeMemory(info.nro_address, info.src_addr, info.text_size)); + return RESULT_SUCCESS; } void UnloadNro(Kernel::HLERequestContext& ctx) { @@ -422,30 +548,15 @@ public: return; } - auto& vm_manager = system.CurrentProcess()->VMManager(); - const auto& nro_info = iter->second; - - // Unmap the mirrored memory - ASSERT( - vm_manager.UnmapRange(nro_address, nro_info.nro_size + nro_info.bss_size).IsSuccess()); - - // Reprotect the source memory - ASSERT(vm_manager - .ReprotectRange(nro_info.nro_address, nro_info.nro_size, - Kernel::VMAPermission::ReadWrite) - .IsSuccess()); - if (nro_info.bss_size > 0) { - ASSERT(vm_manager - .ReprotectRange(nro_info.bss_address, nro_info.bss_size, - Kernel::VMAPermission::ReadWrite) - .IsSuccess()); - } + const auto result{UnmapNro(iter->second)}; system.InvalidateCpuInstructionCaches(); nro.erase(iter); + IPC::ResponseBuilder rb{ctx, 2}; - rb.Push(RESULT_SUCCESS); + + rb.Push(result); } void Initialize(Kernel::HLERequestContext& ctx) { @@ -458,56 +569,7 @@ public: } private: - using SHA256Hash = std::array<u8, 0x20>; - - struct NROHeader { - INSERT_PADDING_WORDS(1); - u32_le mod_offset; - INSERT_PADDING_WORDS(2); - u32_le magic; - u32_le version; - u32_le nro_size; - u32_le flags; - u32_le text_offset; - u32_le text_size; - u32_le ro_offset; - u32_le ro_size; - u32_le rw_offset; - u32_le rw_size; - u32_le bss_size; - INSERT_PADDING_WORDS(1); - std::array<u8, 0x20> build_id; - INSERT_PADDING_BYTES(0x20); - }; - static_assert(sizeof(NROHeader) == 0x80, "NROHeader has invalid size."); - - struct NRRHeader { - u32_le magic; - INSERT_PADDING_BYTES(12); - u64_le title_id_mask; - u64_le title_id_pattern; - INSERT_PADDING_BYTES(16); - std::array<u8, 0x100> modulus; - std::array<u8, 0x100> signature_1; - std::array<u8, 0x100> signature_2; - u64_le title_id; - u32_le size; - INSERT_PADDING_BYTES(4); - u32_le hash_offset; - u32_le hash_count; - INSERT_PADDING_BYTES(8); - }; - static_assert(sizeof(NRRHeader) == 0x350, "NRRHeader has incorrect size."); - - struct NROInfo { - SHA256Hash hash; - VAddr nro_address; - u64 nro_size; - VAddr bss_address; - u64 bss_size; - }; - - bool initialized = false; + bool initialized{}; std::map<VAddr, NROInfo> nro; std::map<VAddr, std::vector<SHA256Hash>> nrr; diff --git a/src/core/hle/service/lm/lm.cpp b/src/core/hle/service/lm/lm.cpp index 346c8f899..dec96b771 100644 --- a/src/core/hle/service/lm/lm.cpp +++ b/src/core/hle/service/lm/lm.cpp @@ -17,7 +17,7 @@ namespace Service::LM { class ILogger final : public ServiceFramework<ILogger> { public: - explicit ILogger(Manager& manager_, Memory::Memory& memory_) + explicit ILogger(Manager& manager_, Core::Memory::Memory& memory_) : ServiceFramework("ILogger"), manager{manager_}, memory{memory_} { static const FunctionInfo functions[] = { {0, &ILogger::Log, "Log"}, @@ -75,12 +75,12 @@ private: } Manager& manager; - Memory::Memory& memory; + Core::Memory::Memory& memory; }; class LM final : public ServiceFramework<LM> { public: - explicit LM(Manager& manager_, Memory::Memory& memory_) + explicit LM(Manager& manager_, Core::Memory::Memory& memory_) : ServiceFramework{"lm"}, manager{manager_}, memory{memory_} { // clang-format off static const FunctionInfo functions[] = { @@ -101,7 +101,7 @@ private: } Manager& manager; - Memory::Memory& memory; + Core::Memory::Memory& memory; }; void InstallInterfaces(Core::System& system) { diff --git a/src/core/hle/service/ns/pl_u.cpp b/src/core/hle/service/ns/pl_u.cpp index 8da4e52c5..ab1746d28 100644 --- a/src/core/hle/service/ns/pl_u.cpp +++ b/src/core/hle/service/ns/pl_u.cpp @@ -19,6 +19,7 @@ #include "core/file_sys/romfs.h" #include "core/file_sys/system_archive/system_archive.h" #include "core/hle/ipc_helpers.h" +#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/physical_memory.h" #include "core/hle/kernel/shared_memory.h" #include "core/hle/service/filesystem/filesystem.h" @@ -265,16 +266,13 @@ void PL_U::GetSharedMemoryAddressOffset(Kernel::HLERequestContext& ctx) { void PL_U::GetSharedMemoryNativeHandle(Kernel::HLERequestContext& ctx) { // Map backing memory for the font data LOG_DEBUG(Service_NS, "called"); - system.CurrentProcess()->VMManager().MapMemoryBlock(SHARED_FONT_MEM_VADDR, impl->shared_font, 0, - SHARED_FONT_MEM_SIZE, - Kernel::MemoryState::Shared); // Create shared font memory object auto& kernel = system.Kernel(); - impl->shared_font_mem = Kernel::SharedMemory::Create( - kernel, system.CurrentProcess(), SHARED_FONT_MEM_SIZE, Kernel::MemoryPermission::ReadWrite, - Kernel::MemoryPermission::Read, SHARED_FONT_MEM_VADDR, Kernel::MemoryRegion::BASE, - "PL_U:shared_font_mem"); + impl->shared_font_mem = SharedFrom(&kernel.GetFontSharedMem()); + + std::memcpy(impl->shared_font_mem->GetPointer(), impl->shared_font->data(), + impl->shared_font->size()); IPC::ResponseBuilder rb{ctx, 2, 1}; rb.Push(RESULT_SUCCESS); diff --git a/src/core/hle/service/time/interface.cpp b/src/core/hle/service/time/interface.cpp index f509653a3..ba8fd6152 100644 --- a/src/core/hle/service/time/interface.cpp +++ b/src/core/hle/service/time/interface.cpp @@ -29,7 +29,7 @@ Time::Time(std::shared_ptr<Module> module, Core::System& system, const char* nam {300, &Time::CalculateMonotonicSystemClockBaseTimePoint, "CalculateMonotonicSystemClockBaseTimePoint"}, {400, &Time::GetClockSnapshot, "GetClockSnapshot"}, {401, &Time::GetClockSnapshotFromSystemClockContext, "GetClockSnapshotFromSystemClockContext"}, - {500, nullptr, "CalculateStandardUserSystemClockDifferenceByUser"}, + {500, &Time::CalculateStandardUserSystemClockDifferenceByUser, "CalculateStandardUserSystemClockDifferenceByUser"}, {501, &Time::CalculateSpanBetween, "CalculateSpanBetween"}, }; // clang-format on diff --git a/src/core/hle/service/time/standard_network_system_clock_core.h b/src/core/hle/service/time/standard_network_system_clock_core.h index 3f505c37c..c993bdf79 100644 --- a/src/core/hle/service/time/standard_network_system_clock_core.h +++ b/src/core/hle/service/time/standard_network_system_clock_core.h @@ -23,7 +23,7 @@ public: standard_network_clock_sufficient_accuracy = value; } - bool IsStandardNetworkSystemClockAccuracySufficient(Core::System& system) { + bool IsStandardNetworkSystemClockAccuracySufficient(Core::System& system) const { SystemClockContext context{}; if (GetClockContext(system, context) != RESULT_SUCCESS) { return {}; diff --git a/src/core/hle/service/time/steady_clock_core.h b/src/core/hle/service/time/steady_clock_core.h index 84af3d105..d80a2385f 100644 --- a/src/core/hle/service/time/steady_clock_core.h +++ b/src/core/hle/service/time/steady_clock_core.h @@ -16,6 +16,7 @@ namespace Service::Time::Clock { class SteadyClockCore { public: SteadyClockCore() = default; + virtual ~SteadyClockCore() = default; const Common::UUID& GetClockSourceId() const { return clock_source_id; diff --git a/src/core/hle/service/time/system_clock_context_update_callback.h b/src/core/hle/service/time/system_clock_context_update_callback.h index 6260de6c3..2b0fa7e75 100644 --- a/src/core/hle/service/time/system_clock_context_update_callback.h +++ b/src/core/hle/service/time/system_clock_context_update_callback.h @@ -20,7 +20,7 @@ namespace Service::Time::Clock { class SystemClockContextUpdateCallback { public: SystemClockContextUpdateCallback(); - ~SystemClockContextUpdateCallback(); + virtual ~SystemClockContextUpdateCallback(); bool NeedUpdate(const SystemClockContext& value) const; diff --git a/src/core/hle/service/time/system_clock_core.cpp b/src/core/hle/service/time/system_clock_core.cpp index 1a3ab8cfa..d31d4e2ca 100644 --- a/src/core/hle/service/time/system_clock_core.cpp +++ b/src/core/hle/service/time/system_clock_core.cpp @@ -9,7 +9,7 @@ namespace Service::Time::Clock { SystemClockCore::SystemClockCore(SteadyClockCore& steady_clock_core) - : steady_clock_core{steady_clock_core}, is_initialized{} { + : steady_clock_core{steady_clock_core} { context.steady_time_point.clock_source_id = steady_clock_core.GetClockSourceId(); } diff --git a/src/core/hle/service/time/system_clock_core.h b/src/core/hle/service/time/system_clock_core.h index 54407a6c5..608dd3b2e 100644 --- a/src/core/hle/service/time/system_clock_core.h +++ b/src/core/hle/service/time/system_clock_core.h @@ -22,7 +22,7 @@ class SystemClockContextUpdateCallback; class SystemClockCore { public: explicit SystemClockCore(SteadyClockCore& steady_clock_core); - ~SystemClockCore(); + virtual ~SystemClockCore(); SteadyClockCore& GetSteadyClockCore() const { return steady_clock_core; diff --git a/src/core/hle/service/time/time.cpp b/src/core/hle/service/time/time.cpp index ce859f18d..e722886de 100644 --- a/src/core/hle/service/time/time.cpp +++ b/src/core/hle/service/time/time.cpp @@ -308,6 +308,29 @@ void Module::Interface::GetClockSnapshotFromSystemClockContext(Kernel::HLEReques ctx.WriteBuffer(&clock_snapshot, sizeof(Clock::ClockSnapshot)); } +void Module::Interface::CalculateStandardUserSystemClockDifferenceByUser( + Kernel::HLERequestContext& ctx) { + LOG_DEBUG(Service_Time, "called"); + + IPC::RequestParser rp{ctx}; + const auto snapshot_a = rp.PopRaw<Clock::ClockSnapshot>(); + const auto snapshot_b = rp.PopRaw<Clock::ClockSnapshot>(); + + auto time_span_type{Clock::TimeSpanType::FromSeconds(snapshot_b.user_context.offset - + snapshot_a.user_context.offset)}; + + if ((snapshot_b.user_context.steady_time_point.clock_source_id != + snapshot_a.user_context.steady_time_point.clock_source_id) || + (snapshot_b.is_automatic_correction_enabled && + snapshot_a.is_automatic_correction_enabled)) { + time_span_type.nanoseconds = 0; + } + + IPC::ResponseBuilder rb{ctx, (sizeof(s64) / 4) + 2}; + rb.Push(RESULT_SUCCESS); + rb.PushRaw(time_span_type.nanoseconds); +} + void Module::Interface::CalculateSpanBetween(Kernel::HLERequestContext& ctx) { LOG_DEBUG(Service_Time, "called"); diff --git a/src/core/hle/service/time/time.h b/src/core/hle/service/time/time.h index 351988468..41f3002e9 100644 --- a/src/core/hle/service/time/time.h +++ b/src/core/hle/service/time/time.h @@ -32,6 +32,7 @@ public: void CalculateMonotonicSystemClockBaseTimePoint(Kernel::HLERequestContext& ctx); void GetClockSnapshot(Kernel::HLERequestContext& ctx); void GetClockSnapshotFromSystemClockContext(Kernel::HLERequestContext& ctx); + void CalculateStandardUserSystemClockDifferenceByUser(Kernel::HLERequestContext& ctx); void CalculateSpanBetween(Kernel::HLERequestContext& ctx); void GetSharedMemoryNativeHandle(Kernel::HLERequestContext& ctx); diff --git a/src/core/hle/service/time/time_sharedmemory.cpp b/src/core/hle/service/time/time_sharedmemory.cpp index fdaef233f..999ec1e51 100644 --- a/src/core/hle/service/time/time_sharedmemory.cpp +++ b/src/core/hle/service/time/time_sharedmemory.cpp @@ -6,6 +6,7 @@ #include "core/core_timing.h" #include "core/core_timing_util.h" #include "core/hardware_properties.h" +#include "core/hle/kernel/kernel.h" #include "core/hle/service/time/clock_types.h" #include "core/hle/service/time/steady_clock_core.h" #include "core/hle/service/time/time_sharedmemory.h" @@ -15,9 +16,7 @@ namespace Service::Time { static constexpr std::size_t SHARED_MEMORY_SIZE{0x1000}; SharedMemory::SharedMemory(Core::System& system) : system(system) { - shared_memory_holder = Kernel::SharedMemory::Create( - system.Kernel(), nullptr, SHARED_MEMORY_SIZE, Kernel::MemoryPermission::ReadWrite, - Kernel::MemoryPermission::Read, 0, Kernel::MemoryRegion::BASE, "Time:SharedMemory"); + shared_memory_holder = SharedFrom(&system.Kernel().GetTimeSharedMem()); std::memset(shared_memory_holder->GetPointer(), 0, SHARED_MEMORY_SIZE); } diff --git a/src/core/hle/service/time/time_zone_manager.cpp b/src/core/hle/service/time/time_zone_manager.cpp index 07b553a43..c8159bcd5 100644 --- a/src/core/hle/service/time/time_zone_manager.cpp +++ b/src/core/hle/service/time/time_zone_manager.cpp @@ -309,7 +309,7 @@ static bool ParsePosixName(const char* name, TimeZoneRule& rule) { offset = GetTZName(name, offset); std_len = offset; } - if (!std_len) { + if (std_len == 0) { return {}; } if (!GetOffset(name, offset, std_offset)) { @@ -320,7 +320,7 @@ static bool ParsePosixName(const char* name, TimeZoneRule& rule) { int dest_len{}; int dest_offset{}; const char* dest_name{name + offset}; - if (rule.chars.size() < char_count) { + if (rule.chars.size() < std::size_t(char_count)) { return {}; } @@ -343,7 +343,7 @@ static bool ParsePosixName(const char* name, TimeZoneRule& rule) { return {}; } char_count += dest_len + 1; - if (rule.chars.size() < char_count) { + if (rule.chars.size() < std::size_t(char_count)) { return {}; } if (name[offset] != '\0' && name[offset] != ',' && name[offset] != ';') { @@ -414,7 +414,7 @@ static bool ParsePosixName(const char* name, TimeZoneRule& rule) { if (is_reversed || (start_time < end_time && (end_time - start_time < (year_seconds + (std_offset - dest_offset))))) { - if (rule.ats.size() - 2 < time_count) { + if (rule.ats.size() - 2 < std::size_t(time_count)) { break; } @@ -609,7 +609,7 @@ static bool ParseTimeZoneBinary(TimeZoneRule& time_zone_rule, FileSys::VirtualFi } const u64 position{(read_offset - sizeof(TzifHeader))}; - const std::size_t bytes_read{vfs_file->GetSize() - sizeof(TzifHeader) - position}; + const s64 bytes_read = s64(vfs_file->GetSize() - sizeof(TzifHeader) - position); if (bytes_read < 0) { return {}; } @@ -621,11 +621,11 @@ static bool ParseTimeZoneBinary(TimeZoneRule& time_zone_rule, FileSys::VirtualFi std::array<char, time_zone_name_max + 1> temp_name{}; vfs_file->ReadArray(temp_name.data(), bytes_read, read_offset); if (bytes_read > 2 && temp_name[0] == '\n' && temp_name[bytes_read - 1] == '\n' && - time_zone_rule.type_count + 2 <= time_zone_rule.ttis.size()) { + std::size_t(time_zone_rule.type_count) + 2 <= time_zone_rule.ttis.size()) { temp_name[bytes_read - 1] = '\0'; std::array<char, time_zone_name_max> name{}; - std::memcpy(name.data(), temp_name.data() + 1, bytes_read - 1); + std::memcpy(name.data(), temp_name.data() + 1, std::size_t(bytes_read - 1)); TimeZoneRule temp_rule; if (ParsePosixName(name.data(), temp_rule)) { diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp index fdc62d05b..7f109f4eb 100644 --- a/src/core/hle/service/vi/vi.cpp +++ b/src/core/hle/service/vi/vi.cpp @@ -101,8 +101,8 @@ public: } std::u16string ReadInterfaceToken() { - u32 unknown = Read<u32_le>(); - u32 length = Read<u32_le>(); + [[maybe_unused]] const u32 unknown = Read<u32_le>(); + const u32 length = Read<u32_le>(); std::u16string token{}; diff --git a/src/core/loader/deconstructed_rom_directory.cpp b/src/core/loader/deconstructed_rom_directory.cpp index 53559e8b1..134e83412 100644 --- a/src/core/loader/deconstructed_rom_directory.cpp +++ b/src/core/loader/deconstructed_rom_directory.cpp @@ -14,6 +14,7 @@ #include "core/file_sys/romfs_factory.h" #include "core/gdbstub/gdbstub.h" #include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" #include "core/hle/service/filesystem/filesystem.h" #include "core/loader/deconstructed_rom_directory.h" @@ -129,27 +130,47 @@ AppLoader_DeconstructedRomDirectory::LoadResult AppLoader_DeconstructedRomDirect } metadata.Print(); - if (process.LoadFromMetadata(metadata).IsError()) { - return {ResultStatus::ErrorUnableToParseKernelMetadata, {}}; + const auto static_modules = {"rtld", "main", "subsdk0", "subsdk1", "subsdk2", "subsdk3", + "subsdk4", "subsdk5", "subsdk6", "subsdk7", "sdk"}; + + // Use the NSO module loader to figure out the code layout + std::size_t code_size{}; + for (const auto& module : static_modules) { + const FileSys::VirtualFile module_file{dir->GetFile(module)}; + if (!module_file) { + continue; + } + + const bool should_pass_arguments{std::strcmp(module, "rtld") == 0}; + const auto tentative_next_load_addr{AppLoader_NSO::LoadModule( + process, *module_file, code_size, should_pass_arguments, false)}; + if (!tentative_next_load_addr) { + return {ResultStatus::ErrorLoadingNSO, {}}; + } + + code_size = *tentative_next_load_addr; } - const FileSys::PatchManager pm(metadata.GetTitleID()); + // Setup the process code layout + if (process.LoadFromMetadata(metadata, code_size).IsError()) { + return {ResultStatus::ErrorUnableToParseKernelMetadata, {}}; + } // Load NSO modules modules.clear(); - const VAddr base_address = process.VMManager().GetCodeRegionBaseAddress(); - VAddr next_load_addr = base_address; - for (const auto& module : {"rtld", "main", "subsdk0", "subsdk1", "subsdk2", "subsdk3", - "subsdk4", "subsdk5", "subsdk6", "subsdk7", "sdk"}) { - const FileSys::VirtualFile module_file = dir->GetFile(module); - if (module_file == nullptr) { + const VAddr base_address{process.PageTable().GetCodeRegionStart()}; + VAddr next_load_addr{base_address}; + const FileSys::PatchManager pm{metadata.GetTitleID()}; + for (const auto& module : static_modules) { + const FileSys::VirtualFile module_file{dir->GetFile(module)}; + if (!module_file) { continue; } - const VAddr load_addr = next_load_addr; - const bool should_pass_arguments = std::strcmp(module, "rtld") == 0; - const auto tentative_next_load_addr = - AppLoader_NSO::LoadModule(process, *module_file, load_addr, should_pass_arguments, pm); + const VAddr load_addr{next_load_addr}; + const bool should_pass_arguments{std::strcmp(module, "rtld") == 0}; + const auto tentative_next_load_addr{AppLoader_NSO::LoadModule( + process, *module_file, load_addr, should_pass_arguments, true, pm)}; if (!tentative_next_load_addr) { return {ResultStatus::ErrorLoadingNSO, {}}; } diff --git a/src/core/loader/elf.cpp b/src/core/loader/elf.cpp index 8908e5328..1e9ed2837 100644 --- a/src/core/loader/elf.cpp +++ b/src/core/loader/elf.cpp @@ -10,8 +10,8 @@ #include "common/file_util.h" #include "common/logging/log.h" #include "core/hle/kernel/code_set.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/vm_manager.h" #include "core/loader/elf.h" #include "core/memory.h" @@ -393,7 +393,7 @@ AppLoader_ELF::LoadResult AppLoader_ELF::Load(Kernel::Process& process) { return {ResultStatus::ErrorIncorrectELFFileSize, {}}; } - const VAddr base_address = process.VMManager().GetCodeRegionBaseAddress(); + const VAddr base_address = process.PageTable().GetCodeRegionStart(); ElfReader elf_reader(&buffer[0]); Kernel::CodeSet codeset = elf_reader.LoadInto(base_address); const VAddr entry_point = codeset.entrypoint; @@ -401,7 +401,7 @@ AppLoader_ELF::LoadResult AppLoader_ELF::Load(Kernel::Process& process) { process.LoadModule(std::move(codeset), entry_point); is_loaded = true; - return {ResultStatus::Success, LoadParameters{48, Memory::DEFAULT_STACK_SIZE}}; + return {ResultStatus::Success, LoadParameters{48, Core::Memory::DEFAULT_STACK_SIZE}}; } } // namespace Loader diff --git a/src/core/loader/kip.cpp b/src/core/loader/kip.cpp index 092103abe..40fa03ad1 100644 --- a/src/core/loader/kip.cpp +++ b/src/core/loader/kip.cpp @@ -7,14 +7,16 @@ #include "core/file_sys/program_metadata.h" #include "core/gdbstub/gdbstub.h" #include "core/hle/kernel/code_set.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" #include "core/loader/kip.h" +#include "core/memory.h" namespace Loader { namespace { constexpr u32 PageAlignSize(u32 size) { - return (size + Memory::PAGE_MASK) & ~Memory::PAGE_MASK; + return (size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK; } } // Anonymous namespace @@ -68,7 +70,7 @@ AppLoader::LoadResult AppLoader_KIP::Load(Kernel::Process& process) { kip->GetMainThreadCpuCore(), kip->GetMainThreadStackSize(), kip->GetTitleID(), 0xFFFFFFFFFFFFFFFF, kip->GetKernelCapabilities()); - const VAddr base_address = process.VMManager().GetCodeRegionBaseAddress(); + const VAddr base_address = process.PageTable().GetCodeRegionStart(); Kernel::CodeSet codeset; Kernel::PhysicalMemory program_image; diff --git a/src/core/loader/nro.cpp b/src/core/loader/nro.cpp index 175898b91..5d7e8136e 100644 --- a/src/core/loader/nro.cpp +++ b/src/core/loader/nro.cpp @@ -16,8 +16,8 @@ #include "core/file_sys/vfs_offset.h" #include "core/gdbstub/gdbstub.h" #include "core/hle/kernel/code_set.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/vm_manager.h" #include "core/hle/service/filesystem/filesystem.h" #include "core/loader/nro.h" #include "core/loader/nso.h" @@ -127,7 +127,7 @@ FileType AppLoader_NRO::IdentifyType(const FileSys::VirtualFile& file) { } static constexpr u32 PageAlignSize(u32 size) { - return (size + Memory::PAGE_MASK) & ~Memory::PAGE_MASK; + return (size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK; } static bool LoadNroImpl(Kernel::Process& process, const std::vector<u8>& data, @@ -208,7 +208,7 @@ AppLoader_NRO::LoadResult AppLoader_NRO::Load(Kernel::Process& process) { } // Load NRO - const VAddr base_address = process.VMManager().GetCodeRegionBaseAddress(); + const VAddr base_address = process.PageTable().GetCodeRegionStart(); if (!LoadNro(process, *file, base_address)) { return {ResultStatus::ErrorLoadingNRO, {}}; @@ -221,7 +221,7 @@ AppLoader_NRO::LoadResult AppLoader_NRO::Load(Kernel::Process& process) { is_loaded = true; return {ResultStatus::Success, - LoadParameters{Kernel::THREADPRIO_DEFAULT, Memory::DEFAULT_STACK_SIZE}}; + LoadParameters{Kernel::THREADPRIO_DEFAULT, Core::Memory::DEFAULT_STACK_SIZE}}; } ResultStatus AppLoader_NRO::ReadIcon(std::vector<u8>& buffer) { diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp index 044067a5b..575330a86 100644 --- a/src/core/loader/nso.cpp +++ b/src/core/loader/nso.cpp @@ -16,8 +16,8 @@ #include "core/file_sys/patch_manager.h" #include "core/gdbstub/gdbstub.h" #include "core/hle/kernel/code_set.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/vm_manager.h" #include "core/loader/nso.h" #include "core/memory.h" #include "core/settings.h" @@ -37,7 +37,7 @@ static_assert(sizeof(MODHeader) == 0x1c, "MODHeader has incorrect size."); std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data, const NSOSegmentHeader& header) { - const std::vector<u8> uncompressed_data = + std::vector<u8> uncompressed_data = Common::Compression::DecompressDataLZ4(compressed_data, header.size); ASSERT_MSG(uncompressed_data.size() == header.size, "{} != {}", header.size, @@ -47,7 +47,7 @@ std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data, } constexpr u32 PageAlignSize(u32 size) { - return (size + Memory::PAGE_MASK) & ~Memory::PAGE_MASK; + return (size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK; } } // Anonymous namespace @@ -73,7 +73,7 @@ FileType AppLoader_NSO::IdentifyType(const FileSys::VirtualFile& file) { std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process, const FileSys::VfsFile& file, VAddr load_base, - bool should_pass_arguments, + bool should_pass_arguments, bool load_into_process, std::optional<FileSys::PatchManager> pm) { if (file.GetSize() < sizeof(NSOHeader)) { return {}; @@ -97,21 +97,17 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process, if (nso_header.IsSegmentCompressed(i)) { data = DecompressSegment(data, nso_header.segments[i]); } - program_image.resize(nso_header.segments[i].location + - PageAlignSize(static_cast<u32>(data.size()))); + program_image.resize(nso_header.segments[i].location + static_cast<u32>(data.size())); std::memcpy(program_image.data() + nso_header.segments[i].location, data.data(), data.size()); codeset.segments[i].addr = nso_header.segments[i].location; codeset.segments[i].offset = nso_header.segments[i].location; - codeset.segments[i].size = PageAlignSize(static_cast<u32>(data.size())); + codeset.segments[i].size = nso_header.segments[i].size; } - if (should_pass_arguments) { - std::vector<u8> arg_data{Settings::values.program_args.begin(), - Settings::values.program_args.end()}; - if (arg_data.empty()) { - arg_data.resize(NSO_ARGUMENT_DEFAULT_SIZE); - } + if (should_pass_arguments && !Settings::values.program_args.empty()) { + const auto arg_data{Settings::values.program_args}; + codeset.DataSegment().size += NSO_ARGUMENT_DATA_ALLOCATION_SIZE; NSOArgumentHeader args_header{ NSO_ARGUMENT_DATA_ALLOCATION_SIZE, static_cast<u32_le>(arg_data.size()), {}}; @@ -123,24 +119,15 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process, arg_data.size()); } - // MOD header pointer is at .text offset + 4 - u32 module_offset; - std::memcpy(&module_offset, program_image.data() + 4, sizeof(u32)); - - // Read MOD header - MODHeader mod_header{}; - // Default .bss to size in segment header if MOD0 section doesn't exist - u32 bss_size{PageAlignSize(nso_header.segments[2].bss_size)}; - std::memcpy(&mod_header, program_image.data() + module_offset, sizeof(MODHeader)); - const bool has_mod_header{mod_header.magic == Common::MakeMagic('M', 'O', 'D', '0')}; - if (has_mod_header) { - // Resize program image to include .bss section and page align each section - bss_size = PageAlignSize(mod_header.bss_end_offset - mod_header.bss_start_offset); - } - codeset.DataSegment().size += bss_size; - const u32 image_size{PageAlignSize(static_cast<u32>(program_image.size()) + bss_size)}; + codeset.DataSegment().size += nso_header.segments[2].bss_size; + const u32 image_size{ + PageAlignSize(static_cast<u32>(program_image.size()) + nso_header.segments[2].bss_size)}; program_image.resize(image_size); + for (std::size_t i = 0; i < nso_header.segments.size(); ++i) { + codeset.segments[i].size = PageAlignSize(codeset.segments[i].size); + } + // Apply patches if necessary if (pm && (pm->HasNSOPatch(nso_header.build_id) || Settings::values.dump_nso)) { std::vector<u8> pi_header; @@ -154,6 +141,11 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process, std::copy(pi_header.begin() + sizeof(NSOHeader), pi_header.end(), program_image.data()); } + // If we aren't actually loading (i.e. just computing the process code layout), we are done + if (!load_into_process) { + return load_base + image_size; + } + // Apply cheats if they exist and the program has a valid title ID if (pm) { auto& system = Core::System::GetInstance(); @@ -182,8 +174,8 @@ AppLoader_NSO::LoadResult AppLoader_NSO::Load(Kernel::Process& process) { modules.clear(); // Load module - const VAddr base_address = process.VMManager().GetCodeRegionBaseAddress(); - if (!LoadModule(process, *file, base_address, true)) { + const VAddr base_address = process.PageTable().GetCodeRegionStart(); + if (!LoadModule(process, *file, base_address, true, true)) { return {ResultStatus::ErrorLoadingNSO, {}}; } @@ -192,7 +184,7 @@ AppLoader_NSO::LoadResult AppLoader_NSO::Load(Kernel::Process& process) { is_loaded = true; return {ResultStatus::Success, - LoadParameters{Kernel::THREADPRIO_DEFAULT, Memory::DEFAULT_STACK_SIZE}}; + LoadParameters{Kernel::THREADPRIO_DEFAULT, Core::Memory::DEFAULT_STACK_SIZE}}; } ResultStatus AppLoader_NSO::ReadNSOModules(Modules& modules) { diff --git a/src/core/loader/nso.h b/src/core/loader/nso.h index d2d600cd9..b210830f0 100644 --- a/src/core/loader/nso.h +++ b/src/core/loader/nso.h @@ -56,8 +56,6 @@ static_assert(sizeof(NSOHeader) == 0x100, "NSOHeader has incorrect size."); static_assert(std::is_trivially_copyable_v<NSOHeader>, "NSOHeader must be trivially copyable."); constexpr u64 NSO_ARGUMENT_DATA_ALLOCATION_SIZE = 0x9000; -// NOTE: Official software default argument state is unverified. -constexpr u64 NSO_ARGUMENT_DEFAULT_SIZE = 1; struct NSOArgumentHeader { u32_le allocated_size; @@ -84,6 +82,7 @@ public: static std::optional<VAddr> LoadModule(Kernel::Process& process, const FileSys::VfsFile& file, VAddr load_base, bool should_pass_arguments, + bool load_into_process, std::optional<FileSys::PatchManager> pm = {}); LoadResult Load(Kernel::Process& process) override; diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 6061d37ae..9d87045a0 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp @@ -14,13 +14,14 @@ #include "common/swap.h" #include "core/arm/arm_interface.h" #include "core/core.h" +#include "core/device_memory.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/physical_memory.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/vm_manager.h" #include "core/memory.h" #include "video_core/gpu.h" -namespace Memory { +namespace Core::Memory { // Implementation class used to keep the specifics of the memory subsystem hidden // from outside classes. This also allows modification to the internals of the memory @@ -29,9 +30,9 @@ struct Memory::Impl { explicit Impl(Core::System& system_) : system{system_} {} void SetCurrentPageTable(Kernel::Process& process) { - current_page_table = &process.VMManager().page_table; + current_page_table = &process.PageTable().PageTableImpl(); - const std::size_t address_space_width = process.VMManager().GetAddressSpaceWidth(); + const std::size_t address_space_width = process.PageTable().GetAddressSpaceWidth(); system.ArmInterface(0).PageTableChanged(*current_page_table, address_space_width); system.ArmInterface(1).PageTableChanged(*current_page_table, address_space_width); @@ -39,12 +40,7 @@ struct Memory::Impl { system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width); } - void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, - Kernel::PhysicalMemory& memory, VAddr offset) { - MapMemoryRegion(page_table, base, size, memory.data() + offset); - } - - void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) { + void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory); @@ -52,46 +48,27 @@ struct Memory::Impl { void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size, Common::MemoryHookPointer mmio_handler) { - ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); - ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); - MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, - Common::PageType::Special); - - const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); - const Common::SpecialRegion region{Common::SpecialRegion::Type::IODevice, - std::move(mmio_handler)}; - page_table.special_regions.add( - std::make_pair(interval, std::set<Common::SpecialRegion>{region})); + UNIMPLEMENTED(); } void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); - MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, - Common::PageType::Unmapped); - - const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); - page_table.special_regions.erase(interval); + MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, 0, Common::PageType::Unmapped); } void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size, Common::MemoryHookPointer hook) { - const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); - const Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)}; - page_table.special_regions.add( - std::make_pair(interval, std::set<Common::SpecialRegion>{region})); + UNIMPLEMENTED(); } void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size, Common::MemoryHookPointer hook) { - const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); - const Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)}; - page_table.special_regions.subtract( - std::make_pair(interval, std::set<Common::SpecialRegion>{region})); + UNIMPLEMENTED(); } bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) const { - const auto& page_table = process.VMManager().page_table; + const auto& page_table = process.PageTable().PageTableImpl(); const u8* const page_pointer = page_table.pointers[vaddr >> PAGE_BITS]; if (page_pointer != nullptr) { @@ -113,55 +90,28 @@ struct Memory::Impl { return IsValidVirtualAddress(*system.CurrentProcess(), vaddr); } - /** - * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) - * using a VMA from the current process - */ - u8* GetPointerFromVMA(const Kernel::Process& process, VAddr vaddr) { - const auto& vm_manager = process.VMManager(); + u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const { + const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]}; - const auto it = vm_manager.FindVMA(vaddr); - DEBUG_ASSERT(vm_manager.IsValidHandle(it)); - - u8* direct_pointer = nullptr; - const auto& vma = it->second; - switch (vma.type) { - case Kernel::VMAType::AllocatedMemoryBlock: - direct_pointer = vma.backing_block->data() + vma.offset; - break; - case Kernel::VMAType::BackingMemory: - direct_pointer = vma.backing_memory; - break; - case Kernel::VMAType::Free: - return nullptr; - default: - UNREACHABLE(); + if (!paddr) { + return {}; } - return direct_pointer + (vaddr - vma.base); + return system.DeviceMemory().GetPointer(paddr) + vaddr; } - /** - * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) - * using a VMA from the current process. - */ - u8* GetPointerFromVMA(VAddr vaddr) { - return GetPointerFromVMA(*system.CurrentProcess(), vaddr); - } - - u8* GetPointer(const VAddr vaddr) { - u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; - if (page_pointer != nullptr) { + u8* GetPointer(const VAddr vaddr) const { + u8* const page_pointer{current_page_table->pointers[vaddr >> PAGE_BITS]}; + if (page_pointer) { return page_pointer + vaddr; } if (current_page_table->attributes[vaddr >> PAGE_BITS] == Common::PageType::RasterizerCachedMemory) { - return GetPointerFromVMA(vaddr); + return GetPointerFromRasterizerCachedMemory(vaddr); } - LOG_ERROR(HW_Memory, "Unknown GetPointer @ 0x{:016X}", vaddr); - return nullptr; + return {}; } u8 Read8(const VAddr addr) { @@ -169,15 +119,33 @@ struct Memory::Impl { } u16 Read16(const VAddr addr) { - return Read<u16_le>(addr); + if ((addr & 1) == 0) { + return Read<u16_le>(addr); + } else { + const u8 a{Read<u8>(addr)}; + const u8 b{Read<u8>(addr + sizeof(u8))}; + return (static_cast<u16>(b) << 8) | a; + } } u32 Read32(const VAddr addr) { - return Read<u32_le>(addr); + if ((addr & 3) == 0) { + return Read<u32_le>(addr); + } else { + const u16 a{Read16(addr)}; + const u16 b{Read16(addr + sizeof(u16))}; + return (static_cast<u32>(b) << 16) | a; + } } u64 Read64(const VAddr addr) { - return Read<u64_le>(addr); + if ((addr & 7) == 0) { + return Read<u64_le>(addr); + } else { + const u32 a{Read32(addr)}; + const u32 b{Read32(addr + sizeof(u32))}; + return (static_cast<u64>(b) << 32) | a; + } } void Write8(const VAddr addr, const u8 data) { @@ -185,15 +153,30 @@ struct Memory::Impl { } void Write16(const VAddr addr, const u16 data) { - Write<u16_le>(addr, data); + if ((addr & 1) == 0) { + Write<u16_le>(addr, data); + } else { + Write<u8>(addr, static_cast<u8>(data)); + Write<u8>(addr + sizeof(u8), static_cast<u8>(data >> 8)); + } } void Write32(const VAddr addr, const u32 data) { - Write<u32_le>(addr, data); + if ((addr & 3) == 0) { + Write<u32_le>(addr, data); + } else { + Write16(addr, static_cast<u16>(data)); + Write16(addr + sizeof(u16), static_cast<u16>(data >> 16)); + } } void Write64(const VAddr addr, const u64 data) { - Write<u64_le>(addr, data); + if ((addr & 7) == 0) { + Write<u64_le>(addr, data); + } else { + Write32(addr, static_cast<u32>(data)); + Write32(addr + sizeof(u32), static_cast<u32>(data >> 32)); + } } std::string ReadCString(VAddr vaddr, std::size_t max_length) { @@ -213,7 +196,7 @@ struct Memory::Impl { void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, const std::size_t size) { - const auto& page_table = process.VMManager().page_table; + const auto& page_table = process.PageTable().PageTableImpl(); std::size_t remaining_size = size; std::size_t page_index = src_addr >> PAGE_BITS; @@ -241,7 +224,7 @@ struct Memory::Impl { break; } case Common::PageType::RasterizerCachedMemory: { - const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); + const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; system.GPU().FlushRegion(current_vaddr, copy_amount); std::memcpy(dest_buffer, host_ptr, copy_amount); break; @@ -259,7 +242,7 @@ struct Memory::Impl { void ReadBlockUnsafe(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, const std::size_t size) { - const auto& page_table = process.VMManager().page_table; + const auto& page_table = process.PageTable().PageTableImpl(); std::size_t remaining_size = size; std::size_t page_index = src_addr >> PAGE_BITS; @@ -287,7 +270,7 @@ struct Memory::Impl { break; } case Common::PageType::RasterizerCachedMemory: { - const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); + const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; std::memcpy(dest_buffer, host_ptr, copy_amount); break; } @@ -312,7 +295,7 @@ struct Memory::Impl { void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer, const std::size_t size) { - const auto& page_table = process.VMManager().page_table; + const auto& page_table = process.PageTable().PageTableImpl(); std::size_t remaining_size = size; std::size_t page_index = dest_addr >> PAGE_BITS; std::size_t page_offset = dest_addr & PAGE_MASK; @@ -338,7 +321,7 @@ struct Memory::Impl { break; } case Common::PageType::RasterizerCachedMemory: { - u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); + u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; system.GPU().InvalidateRegion(current_vaddr, copy_amount); std::memcpy(host_ptr, src_buffer, copy_amount); break; @@ -356,7 +339,7 @@ struct Memory::Impl { void WriteBlockUnsafe(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer, const std::size_t size) { - const auto& page_table = process.VMManager().page_table; + const auto& page_table = process.PageTable().PageTableImpl(); std::size_t remaining_size = size; std::size_t page_index = dest_addr >> PAGE_BITS; std::size_t page_offset = dest_addr & PAGE_MASK; @@ -382,7 +365,7 @@ struct Memory::Impl { break; } case Common::PageType::RasterizerCachedMemory: { - u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); + u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; std::memcpy(host_ptr, src_buffer, copy_amount); break; } @@ -406,7 +389,7 @@ struct Memory::Impl { } void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std::size_t size) { - const auto& page_table = process.VMManager().page_table; + const auto& page_table = process.PageTable().PageTableImpl(); std::size_t remaining_size = size; std::size_t page_index = dest_addr >> PAGE_BITS; std::size_t page_offset = dest_addr & PAGE_MASK; @@ -432,7 +415,7 @@ struct Memory::Impl { break; } case Common::PageType::RasterizerCachedMemory: { - u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); + u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; system.GPU().InvalidateRegion(current_vaddr, copy_amount); std::memset(host_ptr, 0, copy_amount); break; @@ -453,7 +436,7 @@ struct Memory::Impl { void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr, const std::size_t size) { - const auto& page_table = process.VMManager().page_table; + const auto& page_table = process.PageTable().PageTableImpl(); std::size_t remaining_size = size; std::size_t page_index = src_addr >> PAGE_BITS; std::size_t page_offset = src_addr & PAGE_MASK; @@ -479,7 +462,7 @@ struct Memory::Impl { break; } case Common::PageType::RasterizerCachedMemory: { - const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); + const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; system.GPU().FlushRegion(current_vaddr, copy_amount); WriteBlock(process, dest_addr, host_ptr, copy_amount); break; @@ -512,7 +495,7 @@ struct Memory::Impl { u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { - Common::PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS]; + Common::PageType& page_type{current_page_table->attributes[vaddr >> PAGE_BITS]}; if (cached) { // Switch page type to cached if now cached @@ -544,7 +527,7 @@ struct Memory::Impl { // that this area is already unmarked as cached. break; case Common::PageType::RasterizerCachedMemory: { - u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK); + u8* pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~PAGE_MASK)}; if (pointer == nullptr) { // It's possible that this function has been called while updating the // pagetable after unmapping a VMA. In that case the underlying VMA will no @@ -573,9 +556,9 @@ struct Memory::Impl { * @param memory The memory to map. * @param type The page type to map the memory as. */ - void MapPages(Common::PageTable& page_table, VAddr base, u64 size, u8* memory, + void MapPages(Common::PageTable& page_table, VAddr base, u64 size, PAddr target, Common::PageType type) { - LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE, + LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * PAGE_SIZE, (base + size) * PAGE_SIZE); // During boot, current_page_table might not be set yet, in which case we need not flush @@ -593,19 +576,26 @@ struct Memory::Impl { ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", base + page_table.pointers.size()); - std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type); + if (!target) { + while (base != end) { + page_table.pointers[base] = nullptr; + page_table.attributes[base] = type; + page_table.backing_addr[base] = 0; - if (memory == nullptr) { - std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end, - memory); + base += 1; + } } else { while (base != end) { - page_table.pointers[base] = memory - (base << PAGE_BITS); + page_table.pointers[base] = + system.DeviceMemory().GetPointer(target) - (base << PAGE_BITS); + page_table.attributes[base] = type; + page_table.backing_addr[base] = target - (base << PAGE_BITS); + ASSERT_MSG(page_table.pointers[base], "memory mapping base yield a nullptr within the table"); base += 1; - memory += PAGE_SIZE; + target += PAGE_SIZE; } } } @@ -640,7 +630,7 @@ struct Memory::Impl { ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); break; case Common::PageType::RasterizerCachedMemory: { - const u8* const host_ptr = GetPointerFromVMA(vaddr); + const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)}; system.GPU().FlushRegion(vaddr, sizeof(T)); T value; std::memcpy(&value, host_ptr, sizeof(T)); @@ -682,7 +672,7 @@ struct Memory::Impl { ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); break; case Common::PageType::RasterizerCachedMemory: { - u8* const host_ptr{GetPointerFromVMA(vaddr)}; + u8* const host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)}; system.GPU().InvalidateRegion(vaddr, sizeof(T)); std::memcpy(host_ptr, &data, sizeof(T)); break; @@ -703,12 +693,7 @@ void Memory::SetCurrentPageTable(Kernel::Process& process) { impl->SetCurrentPageTable(process); } -void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, - Kernel::PhysicalMemory& memory, VAddr offset) { - impl->MapMemoryRegion(page_table, base, size, memory, offset); -} - -void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) { +void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { impl->MapMemoryRegion(page_table, base, size, target); } @@ -845,4 +830,4 @@ bool IsKernelVirtualAddress(const VAddr vaddr) { return KERNEL_REGION_VADDR <= vaddr && vaddr < KERNEL_REGION_END; } -} // namespace Memory +} // namespace Core::Memory diff --git a/src/core/memory.h b/src/core/memory.h index b92d678a4..9292f3b0a 100644 --- a/src/core/memory.h +++ b/src/core/memory.h @@ -23,7 +23,7 @@ class PhysicalMemory; class Process; } // namespace Kernel -namespace Memory { +namespace Core::Memory { /** * Page size used by the ARM architecture. This is the smallest granularity with which memory can @@ -67,19 +67,6 @@ public: void SetCurrentPageTable(Kernel::Process& process); /** - * Maps an physical buffer onto a region of the emulated process address space. - * - * @param page_table The page table of the emulated process. - * @param base The address to start mapping at. Must be page-aligned. - * @param size The amount of bytes to map. Must be page-aligned. - * @param memory Physical buffer with the memory backing the mapping. Must be of length - * at least `size + offset`. - * @param offset The offset within the physical memory. Must be page-aligned. - */ - void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, - Kernel::PhysicalMemory& memory, VAddr offset); - - /** * Maps an allocated buffer onto a region of the emulated process address space. * * @param page_table The page table of the emulated process. @@ -88,7 +75,7 @@ public: * @param target Buffer with the memory backing the mapping. Must be of length at least * `size`. */ - void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target); + void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target); /** * Maps a region of the emulated process address space as a IO region. @@ -503,4 +490,4 @@ private: /// Determines if the given VAddr is a kernel address bool IsKernelVirtualAddress(VAddr vaddr); -} // namespace Memory +} // namespace Core::Memory diff --git a/src/core/memory/cheat_engine.cpp b/src/core/memory/cheat_engine.cpp index 4472500d2..b139e8465 100644 --- a/src/core/memory/cheat_engine.cpp +++ b/src/core/memory/cheat_engine.cpp @@ -10,13 +10,15 @@ #include "core/core_timing.h" #include "core/core_timing_util.h" #include "core/hardware_properties.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" #include "core/hle/service/hid/controllers/npad.h" #include "core/hle/service/hid/hid.h" #include "core/hle/service/sm/sm.h" +#include "core/memory.h" #include "core/memory/cheat_engine.h" -namespace Memory { +namespace Core::Memory { constexpr s64 CHEAT_ENGINE_TICKS = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 12); constexpr u32 KEYPAD_BITMASK = 0x3FFFFFF; @@ -194,11 +196,12 @@ void CheatEngine::Initialize() { metadata.process_id = system.CurrentProcess()->GetProcessID(); metadata.title_id = system.CurrentProcess()->GetTitleID(); - const auto& vm_manager = system.CurrentProcess()->VMManager(); - metadata.heap_extents = {vm_manager.GetHeapRegionBaseAddress(), vm_manager.GetHeapRegionSize()}; - metadata.address_space_extents = {vm_manager.GetAddressSpaceBaseAddress(), - vm_manager.GetAddressSpaceSize()}; - metadata.alias_extents = {vm_manager.GetMapRegionBaseAddress(), vm_manager.GetMapRegionSize()}; + const auto& page_table = system.CurrentProcess()->PageTable(); + metadata.heap_extents = {page_table.GetHeapRegionStart(), page_table.GetHeapRegionSize()}; + metadata.address_space_extents = {page_table.GetAddressSpaceStart(), + page_table.GetAddressSpaceSize()}; + metadata.alias_extents = {page_table.GetAliasCodeRegionStart(), + page_table.GetAliasCodeRegionSize()}; is_pending_reload.exchange(true); } @@ -230,4 +233,4 @@ void CheatEngine::FrameCallback(u64 userdata, s64 cycles_late) { core_timing.ScheduleEvent(CHEAT_ENGINE_TICKS - cycles_late, event); } -} // namespace Memory +} // namespace Core::Memory diff --git a/src/core/memory/cheat_engine.h b/src/core/memory/cheat_engine.h index 3d6b2298a..2649423f8 100644 --- a/src/core/memory/cheat_engine.h +++ b/src/core/memory/cheat_engine.h @@ -20,7 +20,7 @@ class CoreTiming; struct EventType; } // namespace Core::Timing -namespace Memory { +namespace Core::Memory { class StandardVmCallbacks : public DmntCheatVm::Callbacks { public: @@ -84,4 +84,4 @@ private: Core::System& system; }; -} // namespace Memory +} // namespace Core::Memory diff --git a/src/core/memory/dmnt_cheat_types.h b/src/core/memory/dmnt_cheat_types.h index bf68fa0fe..5e60733dc 100644 --- a/src/core/memory/dmnt_cheat_types.h +++ b/src/core/memory/dmnt_cheat_types.h @@ -26,7 +26,7 @@ #include "common/common_types.h" -namespace Memory { +namespace Core::Memory { struct MemoryRegionExtents { u64 base{}; @@ -55,4 +55,4 @@ struct CheatEntry { CheatDefinition definition{}; }; -} // namespace Memory +} // namespace Core::Memory diff --git a/src/core/memory/dmnt_cheat_vm.cpp b/src/core/memory/dmnt_cheat_vm.cpp index 4f4fa5099..fb9f36bfd 100644 --- a/src/core/memory/dmnt_cheat_vm.cpp +++ b/src/core/memory/dmnt_cheat_vm.cpp @@ -27,7 +27,7 @@ #include "core/memory/dmnt_cheat_types.h" #include "core/memory/dmnt_cheat_vm.h" -namespace Memory { +namespace Core::Memory { DmntCheatVm::DmntCheatVm(std::unique_ptr<Callbacks> callbacks) : callbacks(std::move(callbacks)) {} @@ -55,7 +55,7 @@ void DmntCheatVm::LogOpcode(const CheatVmOpcode& opcode) { fmt::format("Cond Type: {:X}", static_cast<u32>(begin_cond->cond_type))); callbacks->CommandLog(fmt::format("Rel Addr: {:X}", begin_cond->rel_address)); callbacks->CommandLog(fmt::format("Value: {:X}", begin_cond->value.bit64)); - } else if (auto end_cond = std::get_if<EndConditionalOpcode>(&opcode.opcode)) { + } else if (std::holds_alternative<EndConditionalOpcode>(opcode.opcode)) { callbacks->CommandLog("Opcode: End Conditional"); } else if (auto ctrl_loop = std::get_if<ControlLoopOpcode>(&opcode.opcode)) { if (ctrl_loop->start_loop) { @@ -399,6 +399,7 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) { // 8kkkkkkk // Just parse the mask. begin_keypress_cond.key_mask = first_dword & 0x0FFFFFFF; + opcode.opcode = begin_keypress_cond; } break; case CheatVmOpcodeType::PerformArithmeticRegister: { PerformArithmeticRegisterOpcode perform_math_reg{}; @@ -779,7 +780,7 @@ void DmntCheatVm::Execute(const CheatProcessMetadata& metadata) { if (!cond_met) { SkipConditionalBlock(); } - } else if (auto end_cond = std::get_if<EndConditionalOpcode>(&cur_opcode.opcode)) { + } else if (std::holds_alternative<EndConditionalOpcode>(cur_opcode.opcode)) { // Decrement the condition depth. // We will assume, graciously, that mismatched conditional block ends are a nop. if (condition_depth > 0) { @@ -1209,4 +1210,4 @@ void DmntCheatVm::Execute(const CheatProcessMetadata& metadata) { } } -} // namespace Memory +} // namespace Core::Memory diff --git a/src/core/memory/dmnt_cheat_vm.h b/src/core/memory/dmnt_cheat_vm.h index c36212cf1..8351fd798 100644 --- a/src/core/memory/dmnt_cheat_vm.h +++ b/src/core/memory/dmnt_cheat_vm.h @@ -30,7 +30,7 @@ #include "common/common_types.h" #include "core/memory/dmnt_cheat_types.h" -namespace Memory { +namespace Core::Memory { enum class CheatVmOpcodeType : u32 { StoreStatic = 0, @@ -318,4 +318,4 @@ private: MemoryAccessType mem_type, u64 rel_address); }; -}; // namespace Memory +}; // namespace Core::Memory diff --git a/src/core/reporter.cpp b/src/core/reporter.cpp index 85ac81ef7..558cbe6d7 100644 --- a/src/core/reporter.cpp +++ b/src/core/reporter.cpp @@ -16,9 +16,11 @@ #include "core/arm/arm_interface.h" #include "core/core.h" #include "core/hle/kernel/hle_ipc.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" #include "core/hle/result.h" #include "core/hle/service/lm/manager.h" +#include "core/memory.h" #include "core/reporter.h" #include "core/settings.h" @@ -108,14 +110,13 @@ json GetProcessorStateData(const std::string& architecture, u64 entry_point, u64 json GetProcessorStateDataAuto(Core::System& system) { const auto* process{system.CurrentProcess()}; - const auto& vm_manager{process->VMManager()}; auto& arm{system.CurrentArmInterface()}; Core::ARM_Interface::ThreadContext64 context{}; arm.SaveContext(context); return GetProcessorStateData(process->Is64BitProcess() ? "AArch64" : "AArch32", - vm_manager.GetCodeRegionBaseAddress(), context.sp, context.pc, + process->PageTable().GetCodeRegionStart(), context.sp, context.pc, context.pstate, context.cpu_registers); } @@ -147,7 +148,8 @@ json GetFullDataAuto(const std::string& timestamp, u64 title_id, Core::System& s } template <bool read_value, typename DescriptorType> -json GetHLEBufferDescriptorData(const std::vector<DescriptorType>& buffer, Memory::Memory& memory) { +json GetHLEBufferDescriptorData(const std::vector<DescriptorType>& buffer, + Core::Memory::Memory& memory) { auto buffer_out = json::array(); for (const auto& desc : buffer) { auto entry = json{ @@ -167,7 +169,7 @@ json GetHLEBufferDescriptorData(const std::vector<DescriptorType>& buffer, Memor return buffer_out; } -json GetHLERequestContextData(Kernel::HLERequestContext& ctx, Memory::Memory& memory) { +json GetHLERequestContextData(Kernel::HLERequestContext& ctx, Core::Memory::Memory& memory) { json out; auto cmd_buf = json::array(); diff --git a/src/core/telemetry_session.cpp b/src/core/telemetry_session.cpp index 0f3685d1c..fd5a3ee9f 100644 --- a/src/core/telemetry_session.cpp +++ b/src/core/telemetry_session.cpp @@ -153,9 +153,9 @@ void TelemetrySession::AddInitialInfo(Loader::AppLoader& app_loader) { app_loader.ReadTitle(name); if (name.empty()) { - auto [nacp, icon_file] = FileSys::PatchManager(program_id).GetControlMetadata(); - if (nacp != nullptr) { - name = nacp->GetApplicationName(); + const auto metadata = FileSys::PatchManager(program_id).GetControlMetadata(); + if (metadata.first != nullptr) { + name = metadata.first->GetApplicationName(); } } diff --git a/src/core/tools/freezer.cpp b/src/core/tools/freezer.cpp index 1e060f009..b2c6c537e 100644 --- a/src/core/tools/freezer.cpp +++ b/src/core/tools/freezer.cpp @@ -16,7 +16,7 @@ namespace { constexpr s64 MEMORY_FREEZER_TICKS = static_cast<s64>(Core::Hardware::BASE_CLOCK_RATE / 60); -u64 MemoryReadWidth(Memory::Memory& memory, u32 width, VAddr addr) { +u64 MemoryReadWidth(Core::Memory::Memory& memory, u32 width, VAddr addr) { switch (width) { case 1: return memory.Read8(addr); @@ -32,7 +32,7 @@ u64 MemoryReadWidth(Memory::Memory& memory, u32 width, VAddr addr) { } } -void MemoryWriteWidth(Memory::Memory& memory, u32 width, VAddr addr, u64 value) { +void MemoryWriteWidth(Core::Memory::Memory& memory, u32 width, VAddr addr, u64 value) { switch (width) { case 1: memory.Write8(addr, static_cast<u8>(value)); @@ -53,7 +53,7 @@ void MemoryWriteWidth(Memory::Memory& memory, u32 width, VAddr addr, u64 value) } // Anonymous namespace -Freezer::Freezer(Core::Timing::CoreTiming& core_timing_, Memory::Memory& memory_) +Freezer::Freezer(Core::Timing::CoreTiming& core_timing_, Core::Memory::Memory& memory_) : core_timing{core_timing_}, memory{memory_} { event = Core::Timing::CreateEvent( "MemoryFreezer::FrameCallback", diff --git a/src/core/tools/freezer.h b/src/core/tools/freezer.h index 916339c6c..62fc6aa6c 100644 --- a/src/core/tools/freezer.h +++ b/src/core/tools/freezer.h @@ -16,7 +16,7 @@ class CoreTiming; struct EventType; } // namespace Core::Timing -namespace Memory { +namespace Core::Memory { class Memory; } @@ -38,7 +38,7 @@ public: u64 value; }; - explicit Freezer(Core::Timing::CoreTiming& core_timing_, Memory::Memory& memory_); + explicit Freezer(Core::Timing::CoreTiming& core_timing_, Core::Memory::Memory& memory_); ~Freezer(); // Enables or disables the entire memory freezer. @@ -82,7 +82,7 @@ private: std::shared_ptr<Core::Timing::EventType> event; Core::Timing::CoreTiming& core_timing; - Memory::Memory& memory; + Core::Memory::Memory& memory; }; } // namespace Tools diff --git a/src/input_common/CMakeLists.txt b/src/input_common/CMakeLists.txt index 2520ba321..a9c2392b1 100644 --- a/src/input_common/CMakeLists.txt +++ b/src/input_common/CMakeLists.txt @@ -27,4 +27,4 @@ if(SDL2_FOUND) endif() create_target_directory_groups(input_common) -target_link_libraries(input_common PUBLIC core PRIVATE common ${Boost_LIBRARIES}) +target_link_libraries(input_common PUBLIC core PRIVATE common Boost::boost) diff --git a/src/input_common/sdl/sdl_impl.cpp b/src/input_common/sdl/sdl_impl.cpp index a2e0c0bd2..675b477fa 100644 --- a/src/input_common/sdl/sdl_impl.cpp +++ b/src/input_common/sdl/sdl_impl.cpp @@ -603,6 +603,7 @@ public: if (std::abs(event.jaxis.value / 32767.0) < 0.5) { break; } + [[fallthrough]]; case SDL_JOYBUTTONUP: case SDL_JOYHATMOTION: return SDLEventToButtonParamPackage(state, event); diff --git a/src/tests/core/arm/arm_test_common.cpp b/src/tests/core/arm/arm_test_common.cpp index 17043346b..e54674d11 100644 --- a/src/tests/core/arm/arm_test_common.cpp +++ b/src/tests/core/arm/arm_test_common.cpp @@ -6,6 +6,7 @@ #include "common/page_table.h" #include "core/core.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" #include "core/memory.h" #include "tests/core/arm/arm_test_common.h" @@ -18,12 +19,7 @@ TestEnvironment::TestEnvironment(bool mutable_memory_) auto& system = Core::System::GetInstance(); auto process = Kernel::Process::Create(system, "", Kernel::Process::ProcessType::Userland); - page_table = &process->VMManager().page_table; - - std::fill(page_table->pointers.begin(), page_table->pointers.end(), nullptr); - page_table->special_regions.clear(); - std::fill(page_table->attributes.begin(), page_table->attributes.end(), - Common::PageType::Unmapped); + page_table = &process->PageTable().PageTableImpl(); system.Memory().MapIoRegion(*page_table, 0x00000000, 0x80000000, test_memory); system.Memory().MapIoRegion(*page_table, 0x80000000, 0x80000000, test_memory); diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h index b57c0d4d4..83e7a1cde 100644 --- a/src/video_core/buffer_cache/buffer_cache.h +++ b/src/video_core/buffer_cache/buffer_cache.h @@ -29,10 +29,10 @@ namespace VideoCommon { using MapInterval = std::shared_ptr<MapIntervalBase>; -template <typename TBuffer, typename TBufferType, typename StreamBuffer> +template <typename OwnerBuffer, typename BufferType, typename StreamBuffer> class BufferCache { public: - using BufferInfo = std::pair<const TBufferType*, u64>; + using BufferInfo = std::pair<BufferType, u64>; BufferInfo UploadMemory(GPUVAddr gpu_addr, std::size_t size, std::size_t alignment = 4, bool is_written = false, bool use_fast_cbuf = false) { @@ -89,9 +89,7 @@ public: } } - const u64 offset = static_cast<u64>(block->GetOffset(cpu_addr)); - - return {ToHandle(block), offset}; + return {ToHandle(block), static_cast<u64>(block->GetOffset(cpu_addr))}; } /// Uploads from a host memory. Returns the OpenGL buffer where it's located and its offset. @@ -156,7 +154,7 @@ public: } } - virtual const TBufferType* GetEmptyBuffer(std::size_t size) = 0; + virtual BufferType GetEmptyBuffer(std::size_t size) = 0; protected: explicit BufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system, @@ -166,19 +164,19 @@ protected: ~BufferCache() = default; - virtual const TBufferType* ToHandle(const TBuffer& storage) = 0; + virtual BufferType ToHandle(const OwnerBuffer& storage) = 0; virtual void WriteBarrier() = 0; - virtual TBuffer CreateBlock(VAddr cpu_addr, std::size_t size) = 0; + virtual OwnerBuffer CreateBlock(VAddr cpu_addr, std::size_t size) = 0; - virtual void UploadBlockData(const TBuffer& buffer, std::size_t offset, std::size_t size, + virtual void UploadBlockData(const OwnerBuffer& buffer, std::size_t offset, std::size_t size, const u8* data) = 0; - virtual void DownloadBlockData(const TBuffer& buffer, std::size_t offset, std::size_t size, + virtual void DownloadBlockData(const OwnerBuffer& buffer, std::size_t offset, std::size_t size, u8* data) = 0; - virtual void CopyBlock(const TBuffer& src, const TBuffer& dst, std::size_t src_offset, + virtual void CopyBlock(const OwnerBuffer& src, const OwnerBuffer& dst, std::size_t src_offset, std::size_t dst_offset, std::size_t size) = 0; virtual BufferInfo ConstBufferUpload(const void* raw_pointer, std::size_t size) { @@ -221,9 +219,8 @@ private: return std::make_shared<MapIntervalBase>(start, end, gpu_addr); } - MapInterval MapAddress(const TBuffer& block, const GPUVAddr gpu_addr, const VAddr cpu_addr, + MapInterval MapAddress(const OwnerBuffer& block, const GPUVAddr gpu_addr, const VAddr cpu_addr, const std::size_t size) { - std::vector<MapInterval> overlaps = GetMapsInRange(cpu_addr, size); if (overlaps.empty()) { auto& memory_manager = system.GPU().MemoryManager(); @@ -272,7 +269,7 @@ private: return new_map; } - void UpdateBlock(const TBuffer& block, VAddr start, VAddr end, + void UpdateBlock(const OwnerBuffer& block, VAddr start, VAddr end, std::vector<MapInterval>& overlaps) { const IntervalType base_interval{start, end}; IntervalSet interval_set{}; @@ -313,7 +310,7 @@ private: void FlushMap(MapInterval map) { std::size_t size = map->GetEnd() - map->GetStart(); - TBuffer block = blocks[map->GetStart() >> block_page_bits]; + OwnerBuffer block = blocks[map->GetStart() >> block_page_bits]; staging_buffer.resize(size); DownloadBlockData(block, block->GetOffset(map->GetStart()), size, staging_buffer.data()); system.Memory().WriteBlockUnsafe(map->GetStart(), staging_buffer.data(), size); @@ -328,7 +325,7 @@ private: buffer_ptr += size; buffer_offset += size; - return {&stream_buffer_handle, uploaded_offset}; + return {stream_buffer_handle, uploaded_offset}; } void AlignBuffer(std::size_t alignment) { @@ -338,11 +335,11 @@ private: buffer_offset = offset_aligned; } - TBuffer EnlargeBlock(TBuffer buffer) { + OwnerBuffer EnlargeBlock(OwnerBuffer buffer) { const std::size_t old_size = buffer->GetSize(); const std::size_t new_size = old_size + block_page_size; const VAddr cpu_addr = buffer->GetCpuAddr(); - TBuffer new_buffer = CreateBlock(cpu_addr, new_size); + OwnerBuffer new_buffer = CreateBlock(cpu_addr, new_size); CopyBlock(buffer, new_buffer, 0, 0, old_size); buffer->SetEpoch(epoch); pending_destruction.push_back(buffer); @@ -356,14 +353,14 @@ private: return new_buffer; } - TBuffer MergeBlocks(TBuffer first, TBuffer second) { + OwnerBuffer MergeBlocks(OwnerBuffer first, OwnerBuffer second) { const std::size_t size_1 = first->GetSize(); const std::size_t size_2 = second->GetSize(); const VAddr first_addr = first->GetCpuAddr(); const VAddr second_addr = second->GetCpuAddr(); const VAddr new_addr = std::min(first_addr, second_addr); const std::size_t new_size = size_1 + size_2; - TBuffer new_buffer = CreateBlock(new_addr, new_size); + OwnerBuffer new_buffer = CreateBlock(new_addr, new_size); CopyBlock(first, new_buffer, 0, new_buffer->GetOffset(first_addr), size_1); CopyBlock(second, new_buffer, 0, new_buffer->GetOffset(second_addr), size_2); first->SetEpoch(epoch); @@ -380,8 +377,8 @@ private: return new_buffer; } - TBuffer GetBlock(const VAddr cpu_addr, const std::size_t size) { - TBuffer found{}; + OwnerBuffer GetBlock(const VAddr cpu_addr, const std::size_t size) { + OwnerBuffer found; const VAddr cpu_addr_end = cpu_addr + size - 1; u64 page_start = cpu_addr >> block_page_bits; const u64 page_end = cpu_addr_end >> block_page_bits; @@ -457,7 +454,7 @@ private: Core::System& system; std::unique_ptr<StreamBuffer> stream_buffer; - TBufferType stream_buffer_handle{}; + BufferType stream_buffer_handle{}; bool invalidated = false; @@ -475,9 +472,9 @@ private: static constexpr u64 block_page_bits = 21; static constexpr u64 block_page_size = 1ULL << block_page_bits; - std::unordered_map<u64, TBuffer> blocks; + std::unordered_map<u64, OwnerBuffer> blocks; - std::list<TBuffer> pending_destruction; + std::list<OwnerBuffer> pending_destruction; u64 epoch = 0; u64 modified_ticks = 0; diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h index 2977a7d81..5cf6a4cc3 100644 --- a/src/video_core/engines/maxwell_3d.h +++ b/src/video_core/engines/maxwell_3d.h @@ -303,6 +303,10 @@ public: return (type == Type::SignedNorm) || (type == Type::UnsignedNorm); } + bool IsConstant() const { + return constant; + } + bool IsValid() const { return size != Size::Invalid; } diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h index c66c66f6c..7231597d4 100644 --- a/src/video_core/engines/shader_bytecode.h +++ b/src/video_core/engines/shader_bytecode.h @@ -1006,6 +1006,12 @@ union Instruction { } stg; union { + BitField<23, 3, AtomicOp> operation; + BitField<48, 1, u64> extended; + BitField<20, 3, GlobalAtomicType> type; + } red; + + union { BitField<52, 4, AtomicOp> operation; BitField<49, 3, GlobalAtomicType> type; BitField<28, 20, s64> offset; @@ -1501,7 +1507,7 @@ union Instruction { TextureType GetTextureType() const { // The TLDS instruction has a weird encoding for the texture type. - if (texture_info >= 0 && texture_info <= 1) { + if (texture_info <= 1) { return TextureType::Texture1D; } if (texture_info == 2 || texture_info == 8 || texture_info == 12 || @@ -1787,6 +1793,7 @@ public: ST_S, ST, // Store in generic memory STG, // Store in global memory + RED, // Reduction operation ATOM, // Atomic operation on global memory ATOMS, // Atomic operation on shared memory AL2P, // Transforms attribute memory into physical memory @@ -1871,7 +1878,8 @@ public: ICMP_R, ICMP_CR, ICMP_IMM, - FCMP_R, + FCMP_RR, + FCMP_RC, MUFU, // Multi-Function Operator RRO_C, // Range Reduction Operator RRO_R, @@ -2096,6 +2104,7 @@ private: INST("1110111101010---", Id::ST_L, Type::Memory, "ST_L"), INST("101-------------", Id::ST, Type::Memory, "ST"), INST("1110111011011---", Id::STG, Type::Memory, "STG"), + INST("1110101111111---", Id::RED, Type::Memory, "RED"), INST("11101101--------", Id::ATOM, Type::Memory, "ATOM"), INST("11101100--------", Id::ATOMS, Type::Memory, "ATOMS"), INST("1110111110100---", Id::AL2P, Type::Memory, "AL2P"), @@ -2179,7 +2188,8 @@ private: INST("0101110100100---", Id::HSETP2_R, Type::HalfSetPredicate, "HSETP2_R"), INST("0111111-0-------", Id::HSETP2_IMM, Type::HalfSetPredicate, "HSETP2_IMM"), INST("0101110100011---", Id::HSET2_R, Type::HalfSet, "HSET2_R"), - INST("010110111010----", Id::FCMP_R, Type::Arithmetic, "FCMP_R"), + INST("010110111010----", Id::FCMP_RR, Type::Arithmetic, "FCMP_RR"), + INST("010010111010----", Id::FCMP_RC, Type::Arithmetic, "FCMP_RC"), INST("0101000010000---", Id::MUFU, Type::Arithmetic, "MUFU"), INST("0100110010010---", Id::RRO_C, Type::Arithmetic, "RRO_C"), INST("0101110010010---", Id::RRO_R, Type::Arithmetic, "RRO_R"), diff --git a/src/video_core/gpu_asynch.cpp b/src/video_core/gpu_asynch.cpp index cc434faf7..20e73a37e 100644 --- a/src/video_core/gpu_asynch.cpp +++ b/src/video_core/gpu_asynch.cpp @@ -12,8 +12,9 @@ namespace VideoCommon { GPUAsynch::GPUAsynch(Core::System& system, std::unique_ptr<VideoCore::RendererBase>&& renderer_, std::unique_ptr<Core::Frontend::GraphicsContext>&& context) - : GPU(system, std::move(renderer_), true), gpu_thread{system}, gpu_context(std::move(context)), - cpu_context(renderer->GetRenderWindow().CreateSharedContext()) {} + : GPU(system, std::move(renderer_), true), gpu_thread{system}, + cpu_context(renderer->GetRenderWindow().CreateSharedContext()), + gpu_context(std::move(context)) {} GPUAsynch::~GPUAsynch() = default; diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index a3389d0d2..fd49bc2a9 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -6,8 +6,8 @@ #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/hle/kernel/memory/page_table.h" #include "core/hle/kernel/process.h" -#include "core/hle/kernel/vm_manager.h" #include "core/memory.h" #include "video_core/gpu.h" #include "video_core/memory_manager.h" @@ -17,10 +17,7 @@ namespace Tegra { MemoryManager::MemoryManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer) : rasterizer{rasterizer}, system{system} { - std::fill(page_table.pointers.begin(), page_table.pointers.end(), nullptr); - std::fill(page_table.attributes.begin(), page_table.attributes.end(), - Common::PageType::Unmapped); - page_table.Resize(address_space_width); + page_table.Resize(address_space_width, page_bits, false); // Initialize the map with a single free region covering the entire managed space. VirtualMemoryArea initial_vma; @@ -55,9 +52,9 @@ GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) { MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr); ASSERT(system.CurrentProcess() - ->VMManager() - .SetMemoryAttribute(cpu_addr, size, Kernel::MemoryAttribute::DeviceMapped, - Kernel::MemoryAttribute::DeviceMapped) + ->PageTable() + .SetMemoryAttribute(cpu_addr, size, Kernel::Memory::MemoryAttribute::DeviceShared, + Kernel::Memory::MemoryAttribute::DeviceShared) .IsSuccess()); return gpu_addr; @@ -70,9 +67,9 @@ GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size) MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr); ASSERT(system.CurrentProcess() - ->VMManager() - .SetMemoryAttribute(cpu_addr, size, Kernel::MemoryAttribute::DeviceMapped, - Kernel::MemoryAttribute::DeviceMapped) + ->PageTable() + .SetMemoryAttribute(cpu_addr, size, Kernel::Memory::MemoryAttribute::DeviceShared, + Kernel::Memory::MemoryAttribute::DeviceShared) .IsSuccess()); return gpu_addr; } @@ -89,9 +86,10 @@ GPUVAddr MemoryManager::UnmapBuffer(GPUVAddr gpu_addr, u64 size) { UnmapRange(gpu_addr, aligned_size); ASSERT(system.CurrentProcess() - ->VMManager() - .SetMemoryAttribute(cpu_addr.value(), size, Kernel::MemoryAttribute::DeviceMapped, - Kernel::MemoryAttribute::None) + ->PageTable() + .SetMemoryAttribute(cpu_addr.value(), size, + Kernel::Memory::MemoryAttribute::DeviceShared, + Kernel::Memory::MemoryAttribute::None) .IsSuccess()); return gpu_addr; @@ -147,16 +145,8 @@ T MemoryManager::Read(GPUVAddr addr) const { return value; } - switch (page_table.attributes[addr >> page_bits]) { - case Common::PageType::Unmapped: - LOG_ERROR(HW_GPU, "Unmapped Read{} @ 0x{:08X}", sizeof(T) * 8, addr); - return 0; - case Common::PageType::Memory: - ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", addr); - break; - default: - UNREACHABLE(); - } + UNREACHABLE(); + return {}; } @@ -173,17 +163,7 @@ void MemoryManager::Write(GPUVAddr addr, T data) { return; } - switch (page_table.attributes[addr >> page_bits]) { - case Common::PageType::Unmapped: - LOG_ERROR(HW_GPU, "Unmapped Write{} 0x{:08X} @ 0x{:016X}", sizeof(data) * 8, - static_cast<u32>(data), addr); - return; - case Common::PageType::Memory: - ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", addr); - break; - default: - UNREACHABLE(); - } + UNREACHABLE(); } template u8 MemoryManager::Read<u8>(GPUVAddr addr) const; @@ -249,18 +229,11 @@ void MemoryManager::ReadBlock(GPUVAddr src_addr, void* dest_buffer, const std::s const std::size_t copy_amount{ std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; - switch (page_table.attributes[page_index]) { - case Common::PageType::Memory: { - const VAddr src_addr{page_table.backing_addr[page_index] + page_offset}; - // Flush must happen on the rasterizer interface, such that memory is always synchronous - // when it is read (even when in asynchronous GPU mode). Fixes Dead Cells title menu. - rasterizer.FlushRegion(src_addr, copy_amount); - memory.ReadBlockUnsafe(src_addr, dest_buffer, copy_amount); - break; - } - default: - UNREACHABLE(); - } + const VAddr src_addr{page_table.backing_addr[page_index] + page_offset}; + // Flush must happen on the rasterizer interface, such that memory is always synchronous + // when it is read (even when in asynchronous GPU mode). Fixes Dead Cells title menu. + rasterizer.FlushRegion(src_addr, copy_amount); + memory.ReadBlockUnsafe(src_addr, dest_buffer, copy_amount); page_index++; page_offset = 0; @@ -305,18 +278,11 @@ void MemoryManager::WriteBlock(GPUVAddr dest_addr, const void* src_buffer, const const std::size_t copy_amount{ std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; - switch (page_table.attributes[page_index]) { - case Common::PageType::Memory: { - const VAddr dest_addr{page_table.backing_addr[page_index] + page_offset}; - // Invalidate must happen on the rasterizer interface, such that memory is always - // synchronous when it is written (even when in asynchronous GPU mode). - rasterizer.InvalidateRegion(dest_addr, copy_amount); - memory.WriteBlockUnsafe(dest_addr, src_buffer, copy_amount); - break; - } - default: - UNREACHABLE(); - } + const VAddr dest_addr{page_table.backing_addr[page_index] + page_offset}; + // Invalidate must happen on the rasterizer interface, such that memory is always + // synchronous when it is written (even when in asynchronous GPU mode). + rasterizer.InvalidateRegion(dest_addr, copy_amount); + memory.WriteBlockUnsafe(dest_addr, src_buffer, copy_amount); page_index++; page_offset = 0; @@ -362,8 +328,8 @@ void MemoryManager::CopyBlockUnsafe(GPUVAddr dest_addr, GPUVAddr src_addr, const bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) { const VAddr addr = page_table.backing_addr[gpu_addr >> page_bits]; - const std::size_t page = (addr & Memory::PAGE_MASK) + size; - return page <= Memory::PAGE_SIZE; + const std::size_t page = (addr & Core::Memory::PAGE_MASK) + size; + return page <= Core::Memory::PAGE_SIZE; } void MemoryManager::MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageType type, @@ -375,12 +341,13 @@ void MemoryManager::MapPages(GPUVAddr base, u64 size, u8* memory, Common::PageTy ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", base + page_table.pointers.size()); - std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type); - if (memory == nullptr) { - std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end, memory); - std::fill(page_table.backing_addr.begin() + base, page_table.backing_addr.begin() + end, - backing_addr); + while (base != end) { + page_table.pointers[base] = nullptr; + page_table.backing_addr[base] = 0; + + base += 1; + } } else { while (base != end) { page_table.pointers[base] = memory; diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 0d9468535..0ddd52d5a 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -179,7 +179,7 @@ private: /// End of address space, based on address space in bits. static constexpr GPUVAddr address_space_end{1ULL << address_space_width}; - Common::BackingPageTable page_table{page_bits}; + Common::PageTable page_table; VMAMap vma_map; VideoCore::RasterizerInterface& rasterizer; diff --git a/src/video_core/rasterizer_accelerated.cpp b/src/video_core/rasterizer_accelerated.cpp index d01db97da..53622ca05 100644 --- a/src/video_core/rasterizer_accelerated.cpp +++ b/src/video_core/rasterizer_accelerated.cpp @@ -23,15 +23,15 @@ constexpr auto RangeFromInterval(Map& map, const Interval& interval) { } // Anonymous namespace -RasterizerAccelerated::RasterizerAccelerated(Memory::Memory& cpu_memory_) +RasterizerAccelerated::RasterizerAccelerated(Core::Memory::Memory& cpu_memory_) : cpu_memory{cpu_memory_} {} RasterizerAccelerated::~RasterizerAccelerated() = default; void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) { std::lock_guard lock{pages_mutex}; - const u64 page_start{addr >> Memory::PAGE_BITS}; - const u64 page_end{(addr + size + Memory::PAGE_SIZE - 1) >> Memory::PAGE_BITS}; + const u64 page_start{addr >> Core::Memory::PAGE_BITS}; + const u64 page_end{(addr + size + Core::Memory::PAGE_SIZE - 1) >> Core::Memory::PAGE_BITS}; // Interval maps will erase segments if count reaches 0, so if delta is negative we have to // subtract after iterating @@ -44,8 +44,8 @@ void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int del const auto interval = pair.first & pages_interval; const int count = pair.second; - const VAddr interval_start_addr = boost::icl::first(interval) << Memory::PAGE_BITS; - const VAddr interval_end_addr = boost::icl::last_next(interval) << Memory::PAGE_BITS; + const VAddr interval_start_addr = boost::icl::first(interval) << Core::Memory::PAGE_BITS; + const VAddr interval_end_addr = boost::icl::last_next(interval) << Core::Memory::PAGE_BITS; const u64 interval_size = interval_end_addr - interval_start_addr; if (delta > 0 && count == delta) { diff --git a/src/video_core/rasterizer_accelerated.h b/src/video_core/rasterizer_accelerated.h index 315798e7c..91866d7dd 100644 --- a/src/video_core/rasterizer_accelerated.h +++ b/src/video_core/rasterizer_accelerated.h @@ -11,7 +11,7 @@ #include "common/common_types.h" #include "video_core/rasterizer_interface.h" -namespace Memory { +namespace Core::Memory { class Memory; } @@ -20,7 +20,7 @@ namespace VideoCore { /// Implements the shared part in GPU accelerated rasterizers in RasterizerInterface. class RasterizerAccelerated : public RasterizerInterface { public: - explicit RasterizerAccelerated(Memory::Memory& cpu_memory_); + explicit RasterizerAccelerated(Core::Memory::Memory& cpu_memory_); ~RasterizerAccelerated() override; void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) override; @@ -30,7 +30,7 @@ private: CachedPageMap cached_pages; std::mutex pages_mutex; - Memory::Memory& cpu_memory; + Core::Memory::Memory& cpu_memory; }; } // namespace VideoCore diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp index 4eb37a96c..cb5792407 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp @@ -55,33 +55,31 @@ void OGLBufferCache::WriteBarrier() { glMemoryBarrier(GL_ALL_BARRIER_BITS); } -const GLuint* OGLBufferCache::ToHandle(const Buffer& buffer) { +GLuint OGLBufferCache::ToHandle(const Buffer& buffer) { return buffer->GetHandle(); } -const GLuint* OGLBufferCache::GetEmptyBuffer(std::size_t) { - static const GLuint null_buffer = 0; - return &null_buffer; +GLuint OGLBufferCache::GetEmptyBuffer(std::size_t) { + return 0; } void OGLBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, const u8* data) { - glNamedBufferSubData(*buffer->GetHandle(), static_cast<GLintptr>(offset), + glNamedBufferSubData(buffer->GetHandle(), static_cast<GLintptr>(offset), static_cast<GLsizeiptr>(size), data); } void OGLBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, u8* data) { MICROPROFILE_SCOPE(OpenGL_Buffer_Download); - glGetNamedBufferSubData(*buffer->GetHandle(), static_cast<GLintptr>(offset), + glGetNamedBufferSubData(buffer->GetHandle(), static_cast<GLintptr>(offset), static_cast<GLsizeiptr>(size), data); } void OGLBufferCache::CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset, std::size_t dst_offset, std::size_t size) { - glCopyNamedBufferSubData(*src->GetHandle(), *dst->GetHandle(), - static_cast<GLintptr>(src_offset), static_cast<GLintptr>(dst_offset), - static_cast<GLsizeiptr>(size)); + glCopyNamedBufferSubData(src->GetHandle(), dst->GetHandle(), static_cast<GLintptr>(src_offset), + static_cast<GLintptr>(dst_offset), static_cast<GLsizeiptr>(size)); } OGLBufferCache::BufferInfo OGLBufferCache::ConstBufferUpload(const void* raw_pointer, @@ -89,7 +87,7 @@ OGLBufferCache::BufferInfo OGLBufferCache::ConstBufferUpload(const void* raw_poi DEBUG_ASSERT(cbuf_cursor < std::size(cbufs)); const GLuint& cbuf = cbufs[cbuf_cursor++]; glNamedBufferSubData(cbuf, 0, static_cast<GLsizeiptr>(size), raw_pointer); - return {&cbuf, 0}; + return {cbuf, 0}; } } // namespace OpenGL diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h index d94a11252..a74817857 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.h +++ b/src/video_core/renderer_opengl/gl_buffer_cache.h @@ -34,12 +34,12 @@ public: explicit CachedBufferBlock(VAddr cpu_addr, const std::size_t size); ~CachedBufferBlock(); - const GLuint* GetHandle() const { - return &gl_buffer.handle; + GLuint GetHandle() const { + return gl_buffer.handle; } private: - OGLBuffer gl_buffer{}; + OGLBuffer gl_buffer; }; class OGLBufferCache final : public GenericBufferCache { @@ -48,7 +48,7 @@ public: const Device& device, std::size_t stream_size); ~OGLBufferCache(); - const GLuint* GetEmptyBuffer(std::size_t) override; + GLuint GetEmptyBuffer(std::size_t) override; void Acquire() noexcept { cbuf_cursor = 0; @@ -57,9 +57,9 @@ public: protected: Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override; - void WriteBarrier() override; + GLuint ToHandle(const Buffer& buffer) override; - const GLuint* ToHandle(const Buffer& buffer) override; + void WriteBarrier() override; void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, const u8* data) override; diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp index c286502ba..d83dca25a 100644 --- a/src/video_core/renderer_opengl/gl_device.cpp +++ b/src/video_core/renderer_opengl/gl_device.cpp @@ -87,7 +87,7 @@ u32 Extract(u32& base, u32& num, u32 amount, std::optional<GLenum> limit = {}) { std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> BuildBaseBindings() noexcept { std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> bindings; - static std::array<std::size_t, 5> stage_swizzle = {0, 1, 2, 3, 4}; + static constexpr std::array<std::size_t, 5> stage_swizzle{0, 1, 2, 3, 4}; const u32 total_ubos = GetInteger<u32>(GL_MAX_UNIFORM_BUFFER_BINDINGS); const u32 total_ssbos = GetInteger<u32>(GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS); const u32 total_samplers = GetInteger<u32>(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS); diff --git a/src/video_core/renderer_opengl/gl_query_cache.cpp b/src/video_core/renderer_opengl/gl_query_cache.cpp index f12e9f55f..d7ba57aca 100644 --- a/src/video_core/renderer_opengl/gl_query_cache.cpp +++ b/src/video_core/renderer_opengl/gl_query_cache.cpp @@ -94,9 +94,9 @@ CachedQuery::CachedQuery(CachedQuery&& rhs) noexcept : VideoCommon::CachedQueryBase<HostCounter>(std::move(rhs)), cache{rhs.cache}, type{rhs.type} {} CachedQuery& CachedQuery::operator=(CachedQuery&& rhs) noexcept { - VideoCommon::CachedQueryBase<HostCounter>::operator=(std::move(rhs)); cache = rhs.cache; type = rhs.type; + CachedQueryBase<HostCounter>::operator=(std::move(rhs)); return *this; } diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index f31d960c7..175374f0d 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -140,8 +140,8 @@ void RasterizerOpenGL::SetupVertexFormat() { const auto attrib = gpu.regs.vertex_attrib_format[index]; const auto gl_index = static_cast<GLuint>(index); - // Ignore invalid attributes. - if (!attrib.IsValid()) { + // Disable constant attributes. + if (attrib.IsConstant()) { glDisableVertexAttribArray(gl_index); continue; } @@ -188,10 +188,8 @@ void RasterizerOpenGL::SetupVertexBuffer() { ASSERT(end > start); const u64 size = end - start + 1; const auto [vertex_buffer, vertex_buffer_offset] = buffer_cache.UploadMemory(start, size); - - // Bind the vertex array to the buffer at the current offset. - vertex_array_pushbuffer.SetVertexBuffer(static_cast<GLuint>(index), vertex_buffer, - vertex_buffer_offset, vertex_array.stride); + glBindVertexBuffer(static_cast<GLuint>(index), vertex_buffer, vertex_buffer_offset, + vertex_array.stride); } } @@ -222,7 +220,7 @@ GLintptr RasterizerOpenGL::SetupIndexBuffer() { const auto& regs = system.GPU().Maxwell3D().regs; const std::size_t size = CalculateIndexBufferSize(); const auto [buffer, offset] = buffer_cache.UploadMemory(regs.index_array.IndexStart(), size); - vertex_array_pushbuffer.SetIndexBuffer(buffer); + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, buffer); return offset; } @@ -524,7 +522,6 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { // Prepare vertex array format. SetupVertexFormat(); - vertex_array_pushbuffer.Setup(); // Upload vertex and index data. SetupVertexBuffer(); @@ -534,17 +531,13 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { index_buffer_offset = SetupIndexBuffer(); } - // Prepare packed bindings. - bind_ubo_pushbuffer.Setup(); - bind_ssbo_pushbuffer.Setup(); - // Setup emulation uniform buffer. GLShader::MaxwellUniformData ubo; ubo.SetFromRegs(gpu); const auto [buffer, offset] = buffer_cache.UploadHostMemory(&ubo, sizeof(ubo), device.GetUniformBufferAlignment()); - bind_ubo_pushbuffer.Push(EmulationUniformBlockBinding, buffer, offset, - static_cast<GLsizeiptr>(sizeof(ubo))); + glBindBufferRange(GL_UNIFORM_BUFFER, EmulationUniformBlockBinding, buffer, offset, + static_cast<GLsizeiptr>(sizeof(ubo))); // Setup shaders and their used resources. texture_cache.GuardSamplers(true); @@ -557,11 +550,6 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) { // Signal the buffer cache that we are not going to upload more things. buffer_cache.Unmap(); - // Now that we are no longer uploading data, we can safely bind the buffers to OpenGL. - vertex_array_pushbuffer.Bind(); - bind_ubo_pushbuffer.Bind(); - bind_ssbo_pushbuffer.Bind(); - program_manager.BindGraphicsPipeline(); if (texture_cache.TextureBarrier()) { @@ -630,17 +618,11 @@ void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) { (Maxwell::MaxConstBufferSize + device.GetUniformBufferAlignment()); buffer_cache.Map(buffer_size); - bind_ubo_pushbuffer.Setup(); - bind_ssbo_pushbuffer.Setup(); - SetupComputeConstBuffers(kernel); SetupComputeGlobalMemory(kernel); buffer_cache.Unmap(); - bind_ubo_pushbuffer.Bind(); - bind_ssbo_pushbuffer.Bind(); - const auto& launch_desc = system.GPU().KeplerCompute().launch_description; glDispatchCompute(launch_desc.grid_dim_x, launch_desc.grid_dim_y, launch_desc.grid_dim_z); ++num_queued_commands; @@ -771,8 +753,8 @@ void RasterizerOpenGL::SetupConstBuffer(u32 binding, const Tegra::Engines::Const const ConstBufferEntry& entry) { if (!buffer.enabled) { // Set values to zero to unbind buffers - bind_ubo_pushbuffer.Push(binding, buffer_cache.GetEmptyBuffer(sizeof(float)), 0, - sizeof(float)); + glBindBufferRange(GL_UNIFORM_BUFFER, binding, buffer_cache.GetEmptyBuffer(sizeof(float)), 0, + sizeof(float)); return; } @@ -783,7 +765,7 @@ void RasterizerOpenGL::SetupConstBuffer(u32 binding, const Tegra::Engines::Const const auto alignment = device.GetUniformBufferAlignment(); const auto [cbuf, offset] = buffer_cache.UploadMemory(buffer.address, size, alignment, false, device.HasFastBufferSubData()); - bind_ubo_pushbuffer.Push(binding, cbuf, offset, size); + glBindBufferRange(GL_UNIFORM_BUFFER, binding, cbuf, offset, size); } void RasterizerOpenGL::SetupDrawGlobalMemory(std::size_t stage_index, const Shader& shader) { @@ -819,7 +801,8 @@ void RasterizerOpenGL::SetupGlobalMemory(u32 binding, const GlobalMemoryEntry& e const auto alignment{device.GetShaderStorageBufferAlignment()}; const auto [ssbo, buffer_offset] = buffer_cache.UploadMemory(gpu_addr, size, alignment, entry.IsWritten()); - bind_ssbo_pushbuffer.Push(binding, ssbo, buffer_offset, static_cast<GLsizeiptr>(size)); + glBindBufferRange(GL_SHADER_STORAGE_BUFFER, binding, ssbo, buffer_offset, + static_cast<GLsizeiptr>(size)); } void RasterizerOpenGL::SetupDrawTextures(std::size_t stage_index, const Shader& shader) { @@ -1432,7 +1415,7 @@ void RasterizerOpenGL::EndTransformFeedback() { const GPUVAddr gpu_addr = binding.Address(); const std::size_t size = binding.buffer_size; const auto [dest_buffer, offset] = buffer_cache.UploadMemory(gpu_addr, size, 4, true); - glCopyNamedBufferSubData(handle, *dest_buffer, 0, offset, static_cast<GLsizeiptr>(size)); + glCopyNamedBufferSubData(handle, dest_buffer, 0, offset, static_cast<GLsizeiptr>(size)); } } diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index 435da4425..caea174d2 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -231,9 +231,7 @@ private: static constexpr std::size_t STREAM_BUFFER_SIZE = 128 * 1024 * 1024; OGLBufferCache buffer_cache; - VertexArrayPushBuffer vertex_array_pushbuffer{state_tracker}; - BindBuffersRangePushBuffer bind_ubo_pushbuffer{GL_UNIFORM_BUFFER}; - BindBuffersRangePushBuffer bind_ssbo_pushbuffer{GL_SHADER_STORAGE_BUFFER}; + GLint vertex_binding = 0; std::array<OGLBuffer, Tegra::Engines::Maxwell3D::Regs::NumTransformFeedbackBuffers> transform_feedback_buffers; diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp index 1f1f01313..22242cce9 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp @@ -835,7 +835,8 @@ private: void DeclareConstantBuffers() { u32 binding = device.GetBaseBindings(stage).uniform_buffer; - for (const auto& [index, cbuf] : ir.GetConstantBuffers()) { + for (const auto& buffers : ir.GetConstantBuffers()) { + const auto index = buffers.first; code.AddLine("layout (std140, binding = {}) uniform {} {{", binding++, GetConstBufferBlock(index)); code.AddLine(" uvec4 {}[{}];", GetConstBuffer(index), MAX_CONSTBUFFER_ELEMENTS); @@ -1144,6 +1145,7 @@ private: return {"gl_FragCoord"s + GetSwizzle(element), Type::Float}; default: UNREACHABLE(); + return {"0", Type::Int}; } case Attribute::Index::FrontColor: return {"gl_Color"s + GetSwizzle(element), Type::Float}; @@ -1821,13 +1823,15 @@ private: Expression HMergeH0(Operation operation) { const std::string dest = VisitOperand(operation, 0).AsUint(); const std::string src = VisitOperand(operation, 1).AsUint(); - return {fmt::format("bitfieldInsert({}, {}, 0, 16)", dest, src), Type::Uint}; + return {fmt::format("vec2(unpackHalf2x16({}).x, unpackHalf2x16({}).y)", src, dest), + Type::HalfFloat}; } Expression HMergeH1(Operation operation) { const std::string dest = VisitOperand(operation, 0).AsUint(); const std::string src = VisitOperand(operation, 1).AsUint(); - return {fmt::format("bitfieldInsert({}, {}, 16, 16)", dest, src), Type::Uint}; + return {fmt::format("vec2(unpackHalf2x16({}).x, unpackHalf2x16({}).y)", dest, src), + Type::HalfFloat}; } Expression HPack2(Operation operation) { @@ -2117,8 +2121,14 @@ private: return {}; } return {fmt::format("atomic{}({}, {})", opname, Visit(operation[0]).GetCode(), - Visit(operation[1]).As(type)), - type}; + Visit(operation[1]).AsUint()), + Type::Uint}; + } + + template <const std::string_view& opname, Type type> + Expression Reduce(Operation operation) { + code.AddLine("{};", Atomic<opname, type>(operation).GetCode()); + return {}; } Expression Branch(Operation operation) { @@ -2477,6 +2487,20 @@ private: &GLSLDecompiler::Atomic<Func::Or, Type::Int>, &GLSLDecompiler::Atomic<Func::Xor, Type::Int>, + &GLSLDecompiler::Reduce<Func::Add, Type::Uint>, + &GLSLDecompiler::Reduce<Func::Min, Type::Uint>, + &GLSLDecompiler::Reduce<Func::Max, Type::Uint>, + &GLSLDecompiler::Reduce<Func::And, Type::Uint>, + &GLSLDecompiler::Reduce<Func::Or, Type::Uint>, + &GLSLDecompiler::Reduce<Func::Xor, Type::Uint>, + + &GLSLDecompiler::Reduce<Func::Add, Type::Int>, + &GLSLDecompiler::Reduce<Func::Min, Type::Int>, + &GLSLDecompiler::Reduce<Func::Max, Type::Int>, + &GLSLDecompiler::Reduce<Func::And, Type::Int>, + &GLSLDecompiler::Reduce<Func::Or, Type::Int>, + &GLSLDecompiler::Reduce<Func::Xor, Type::Int>, + &GLSLDecompiler::Branch, &GLSLDecompiler::BranchIndirect, &GLSLDecompiler::PushFlowStack, diff --git a/src/video_core/renderer_opengl/gl_texture_cache.cpp b/src/video_core/renderer_opengl/gl_texture_cache.cpp index 0b4d999d7..2729d1265 100644 --- a/src/video_core/renderer_opengl/gl_texture_cache.cpp +++ b/src/video_core/renderer_opengl/gl_texture_cache.cpp @@ -417,7 +417,7 @@ void CachedSurfaceView::Attach(GLenum attachment, GLenum target) const { switch (params.target) { case SurfaceTarget::Texture2DArray: - glFramebufferTexture(target, attachment, GetTexture(), params.base_level); + glFramebufferTexture(target, attachment, GetTexture(), 0); break; default: UNIMPLEMENTED(); diff --git a/src/video_core/renderer_opengl/maxwell_to_gl.h b/src/video_core/renderer_opengl/maxwell_to_gl.h index 89f0e04ef..2c0c77c28 100644 --- a/src/video_core/renderer_opengl/maxwell_to_gl.h +++ b/src/video_core/renderer_opengl/maxwell_to_gl.h @@ -191,6 +191,7 @@ inline GLenum TextureFilterMode(Tegra::Texture::TextureFilter filter_mode, case Tegra::Texture::TextureMipmapFilter::Linear: return GL_LINEAR_MIPMAP_LINEAR; } + break; } case Tegra::Texture::TextureFilter::Nearest: { switch (mip_filter_mode) { @@ -201,6 +202,7 @@ inline GLenum TextureFilterMode(Tegra::Texture::TextureFilter filter_mode, case Tegra::Texture::TextureMipmapFilter::Linear: return GL_NEAREST_MIPMAP_LINEAR; } + break; } } LOG_ERROR(Render_OpenGL, "Unimplemented texture filter mode={}", static_cast<u32>(filter_mode)); diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp index f1a28cc21..b2a179746 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.cpp +++ b/src/video_core/renderer_opengl/renderer_opengl.cpp @@ -315,8 +315,8 @@ public: RendererOpenGL::RendererOpenGL(Core::Frontend::EmuWindow& emu_window, Core::System& system, Core::Frontend::GraphicsContext& context) - : VideoCore::RendererBase{emu_window}, emu_window{emu_window}, system{system}, - frame_mailbox{}, context{context}, has_debug_tool{HasDebugTool()} {} + : RendererBase{emu_window}, emu_window{emu_window}, system{system}, context{context}, + has_debug_tool{HasDebugTool()} {} RendererOpenGL::~RendererOpenGL() = default; diff --git a/src/video_core/renderer_opengl/utils.cpp b/src/video_core/renderer_opengl/utils.cpp index b751086fa..6d7bb16b2 100644 --- a/src/video_core/renderer_opengl/utils.cpp +++ b/src/video_core/renderer_opengl/utils.cpp @@ -14,68 +14,6 @@ namespace OpenGL { -struct VertexArrayPushBuffer::Entry { - GLuint binding_index{}; - const GLuint* buffer{}; - GLintptr offset{}; - GLsizei stride{}; -}; - -VertexArrayPushBuffer::VertexArrayPushBuffer(StateTracker& state_tracker) - : state_tracker{state_tracker} {} - -VertexArrayPushBuffer::~VertexArrayPushBuffer() = default; - -void VertexArrayPushBuffer::Setup() { - index_buffer = nullptr; - vertex_buffers.clear(); -} - -void VertexArrayPushBuffer::SetIndexBuffer(const GLuint* buffer) { - index_buffer = buffer; -} - -void VertexArrayPushBuffer::SetVertexBuffer(GLuint binding_index, const GLuint* buffer, - GLintptr offset, GLsizei stride) { - vertex_buffers.push_back(Entry{binding_index, buffer, offset, stride}); -} - -void VertexArrayPushBuffer::Bind() { - if (index_buffer) { - state_tracker.BindIndexBuffer(*index_buffer); - } - - for (const auto& entry : vertex_buffers) { - glBindVertexBuffer(entry.binding_index, *entry.buffer, entry.offset, entry.stride); - } -} - -struct BindBuffersRangePushBuffer::Entry { - GLuint binding; - const GLuint* buffer; - GLintptr offset; - GLsizeiptr size; -}; - -BindBuffersRangePushBuffer::BindBuffersRangePushBuffer(GLenum target) : target{target} {} - -BindBuffersRangePushBuffer::~BindBuffersRangePushBuffer() = default; - -void BindBuffersRangePushBuffer::Setup() { - entries.clear(); -} - -void BindBuffersRangePushBuffer::Push(GLuint binding, const GLuint* buffer, GLintptr offset, - GLsizeiptr size) { - entries.push_back(Entry{binding, buffer, offset, size}); -} - -void BindBuffersRangePushBuffer::Bind() { - for (const Entry& entry : entries) { - glBindBufferRange(target, entry.binding, *entry.buffer, entry.offset, entry.size); - } -} - void LabelGLObject(GLenum identifier, GLuint handle, VAddr addr, std::string_view extra_info) { if (!GLAD_GL_KHR_debug) { // We don't need to throw an error as this is just for debugging diff --git a/src/video_core/renderer_opengl/utils.h b/src/video_core/renderer_opengl/utils.h index 47ee3177b..9c09ee12c 100644 --- a/src/video_core/renderer_opengl/utils.h +++ b/src/video_core/renderer_opengl/utils.h @@ -11,49 +11,6 @@ namespace OpenGL { -class StateTracker; - -class VertexArrayPushBuffer final { -public: - explicit VertexArrayPushBuffer(StateTracker& state_tracker); - ~VertexArrayPushBuffer(); - - void Setup(); - - void SetIndexBuffer(const GLuint* buffer); - - void SetVertexBuffer(GLuint binding_index, const GLuint* buffer, GLintptr offset, - GLsizei stride); - - void Bind(); - -private: - struct Entry; - - StateTracker& state_tracker; - - const GLuint* index_buffer{}; - std::vector<Entry> vertex_buffers; -}; - -class BindBuffersRangePushBuffer final { -public: - explicit BindBuffersRangePushBuffer(GLenum target); - ~BindBuffersRangePushBuffer(); - - void Setup(); - - void Push(GLuint binding, const GLuint* buffer, GLintptr offset, GLsizeiptr size); - - void Bind(); - -private: - struct Entry; - - GLenum target; - std::vector<Entry> entries; -}; - void LabelGLObject(GLenum identifier, GLuint handle, VAddr addr, std::string_view extra_info = {}); } // namespace OpenGL diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp index 143478863..8681b821f 100644 --- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp +++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp @@ -360,6 +360,7 @@ VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttrib default: break; } + break; case Maxwell::VertexAttribute::Type::UnsignedInt: switch (size) { case Maxwell::VertexAttribute::Size::Size_8: @@ -370,6 +371,14 @@ VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttrib return VK_FORMAT_R8G8B8_UINT; case Maxwell::VertexAttribute::Size::Size_8_8_8_8: return VK_FORMAT_R8G8B8A8_UINT; + case Maxwell::VertexAttribute::Size::Size_16: + return VK_FORMAT_R16_UINT; + case Maxwell::VertexAttribute::Size::Size_16_16: + return VK_FORMAT_R16G16_UINT; + case Maxwell::VertexAttribute::Size::Size_16_16_16: + return VK_FORMAT_R16G16B16_UINT; + case Maxwell::VertexAttribute::Size::Size_16_16_16_16: + return VK_FORMAT_R16G16B16A16_UINT; case Maxwell::VertexAttribute::Size::Size_32: return VK_FORMAT_R32_UINT; case Maxwell::VertexAttribute::Size::Size_32_32: @@ -381,6 +390,7 @@ VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttrib default: break; } + break; case Maxwell::VertexAttribute::Type::UnsignedScaled: switch (size) { case Maxwell::VertexAttribute::Size::Size_8: diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp index dd590c38b..04532f8f8 100644 --- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp +++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp @@ -42,7 +42,7 @@ #include <vulkan/vulkan_win32.h> #endif -#ifdef __linux__ +#if !defined(_WIN32) && !defined(__APPLE__) #include <X11/Xlib.h> #include <vulkan/vulkan_wayland.h> #include <vulkan/vulkan_xlib.h> @@ -119,7 +119,7 @@ vk::Instance CreateInstance(Common::DynamicLibrary& library, vk::InstanceDispatc extensions.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME); break; #endif -#ifdef __linux__ +#if !defined(_WIN32) && !defined(__APPLE__) case Core::Frontend::WindowSystemType::X11: extensions.push_back(VK_KHR_XLIB_SURFACE_EXTENSION_NAME); break; @@ -345,7 +345,7 @@ bool RendererVulkan::CreateSurface() { } } #endif -#ifdef __linux__ +#if !defined(_WIN32) && !defined(__APPLE__) if (window_info.type == Core::Frontend::WindowSystemType::X11) { const VkXlibSurfaceCreateInfoKHR xlib_ci{ VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR, nullptr, 0, diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp index 21644a7e7..fbd406f2b 100644 --- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp +++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp @@ -535,7 +535,9 @@ void VKBlitScreen::CreateGraphicsPipeline() { viewport_state_ci.pNext = nullptr; viewport_state_ci.flags = 0; viewport_state_ci.viewportCount = 1; + viewport_state_ci.pViewports = nullptr; viewport_state_ci.scissorCount = 1; + viewport_state_ci.pScissors = nullptr; VkPipelineRasterizationStateCreateInfo rasterization_ci; rasterization_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp index 0d167afbd..81e1de2be 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp @@ -74,18 +74,18 @@ Buffer VKBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) { return std::make_shared<CachedBufferBlock>(device, memory_manager, cpu_addr, size); } -const VkBuffer* VKBufferCache::ToHandle(const Buffer& buffer) { +VkBuffer VKBufferCache::ToHandle(const Buffer& buffer) { return buffer->GetHandle(); } -const VkBuffer* VKBufferCache::GetEmptyBuffer(std::size_t size) { +VkBuffer VKBufferCache::GetEmptyBuffer(std::size_t size) { size = std::max(size, std::size_t(4)); const auto& empty = staging_pool.GetUnusedBuffer(size, false); scheduler.RequestOutsideRenderPassOperationContext(); scheduler.Record([size, buffer = *empty.handle](vk::CommandBuffer cmdbuf) { cmdbuf.FillBuffer(buffer, 0, size, 0); }); - return empty.handle.address(); + return *empty.handle; } void VKBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, @@ -94,7 +94,7 @@ void VKBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, st std::memcpy(staging.commit->Map(size), data, size); scheduler.RequestOutsideRenderPassOperationContext(); - scheduler.Record([staging = *staging.handle, buffer = *buffer->GetHandle(), offset, + scheduler.Record([staging = *staging.handle, buffer = buffer->GetHandle(), offset, size](vk::CommandBuffer cmdbuf) { cmdbuf.CopyBuffer(staging, buffer, VkBufferCopy{0, offset, size}); @@ -117,7 +117,7 @@ void VKBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset, u8* data) { const auto& staging = staging_pool.GetUnusedBuffer(size, true); scheduler.RequestOutsideRenderPassOperationContext(); - scheduler.Record([staging = *staging.handle, buffer = *buffer->GetHandle(), offset, + scheduler.Record([staging = *staging.handle, buffer = buffer->GetHandle(), offset, size](vk::CommandBuffer cmdbuf) { VkBufferMemoryBarrier barrier; barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; @@ -144,7 +144,7 @@ void VKBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset, void VKBufferCache::CopyBlock(const Buffer& src, const Buffer& dst, std::size_t src_offset, std::size_t dst_offset, std::size_t size) { scheduler.RequestOutsideRenderPassOperationContext(); - scheduler.Record([src_buffer = *src->GetHandle(), dst_buffer = *dst->GetHandle(), src_offset, + scheduler.Record([src_buffer = src->GetHandle(), dst_buffer = dst->GetHandle(), src_offset, dst_offset, size](vk::CommandBuffer cmdbuf) { cmdbuf.CopyBuffer(src_buffer, dst_buffer, VkBufferCopy{src_offset, dst_offset, size}); diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h index d3c23da98..3cd2e2774 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.h +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h @@ -33,8 +33,8 @@ public: VAddr cpu_addr, std::size_t size); ~CachedBufferBlock(); - const VkBuffer* GetHandle() const { - return buffer.handle.address(); + VkBuffer GetHandle() const { + return *buffer.handle; } private: @@ -50,15 +50,15 @@ public: VKScheduler& scheduler, VKStagingBufferPool& staging_pool); ~VKBufferCache(); - const VkBuffer* GetEmptyBuffer(std::size_t size) override; + VkBuffer GetEmptyBuffer(std::size_t size) override; protected: + VkBuffer ToHandle(const Buffer& buffer) override; + void WriteBarrier() override {} Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override; - const VkBuffer* ToHandle(const Buffer& buffer) override; - void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, const u8* data) override; diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp index 9d92305f4..878a78755 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp +++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp @@ -343,13 +343,13 @@ QuadArrayPass::QuadArrayPass(const VKDevice& device, VKScheduler& scheduler, QuadArrayPass::~QuadArrayPass() = default; -std::pair<const VkBuffer*, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) { +std::pair<VkBuffer, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) { const u32 num_triangle_vertices = num_vertices * 6 / 4; const std::size_t staging_size = num_triangle_vertices * sizeof(u32); auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); update_descriptor_queue.Acquire(); - update_descriptor_queue.AddBuffer(buffer.handle.address(), 0, staging_size); + update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size); const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence()); scheduler.RequestOutsideRenderPassOperationContext(); @@ -377,7 +377,7 @@ std::pair<const VkBuffer*, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertice cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, {barrier}, {}); }); - return {buffer.handle.address(), 0}; + return {*buffer.handle, 0}; } Uint8Pass::Uint8Pass(const VKDevice& device, VKScheduler& scheduler, @@ -391,14 +391,14 @@ Uint8Pass::Uint8Pass(const VKDevice& device, VKScheduler& scheduler, Uint8Pass::~Uint8Pass() = default; -std::pair<const VkBuffer*, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer, - u64 src_offset) { +std::pair<VkBuffer, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer, + u64 src_offset) { const auto staging_size = static_cast<u32>(num_vertices * sizeof(u16)); auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); update_descriptor_queue.Acquire(); - update_descriptor_queue.AddBuffer(&src_buffer, src_offset, num_vertices); - update_descriptor_queue.AddBuffer(buffer.handle.address(), 0, staging_size); + update_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices); + update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size); const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence()); scheduler.RequestOutsideRenderPassOperationContext(); @@ -422,7 +422,7 @@ std::pair<const VkBuffer*, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer s cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {}); }); - return {buffer.handle.address(), 0}; + return {*buffer.handle, 0}; } } // namespace Vulkan diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h index c62516bff..ec80c8683 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pass.h +++ b/src/video_core/renderer_vulkan/vk_compute_pass.h @@ -50,7 +50,7 @@ public: VKUpdateDescriptorQueue& update_descriptor_queue); ~QuadArrayPass(); - std::pair<const VkBuffer*, VkDeviceSize> Assemble(u32 num_vertices, u32 first); + std::pair<VkBuffer, VkDeviceSize> Assemble(u32 num_vertices, u32 first); private: VKScheduler& scheduler; @@ -65,7 +65,7 @@ public: VKUpdateDescriptorQueue& update_descriptor_queue); ~Uint8Pass(); - std::pair<const VkBuffer*, u64> Assemble(u32 num_vertices, VkBuffer src_buffer, u64 src_offset); + std::pair<VkBuffer, u64> Assemble(u32 num_vertices, VkBuffer src_buffer, u64 src_offset); private: VKScheduler& scheduler; diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.h b/src/video_core/renderer_vulkan/vk_memory_manager.h index 35ee54d30..5b6858e9b 100644 --- a/src/video_core/renderer_vulkan/vk_memory_manager.h +++ b/src/video_core/renderer_vulkan/vk_memory_manager.h @@ -32,7 +32,7 @@ public: * memory. When passing false, it will try to allocate device local memory. * @returns A memory commit. */ - VKMemoryCommit Commit(const VkMemoryRequirements& reqs, bool host_visible); + VKMemoryCommit Commit(const VkMemoryRequirements& requirements, bool host_visible); /// Commits memory required by the buffer and binds it. VKMemoryCommit Commit(const vk::Buffer& buffer, bool host_visible); diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp index 33cbc0bb6..4ca0febb8 100644 --- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp +++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp @@ -62,13 +62,16 @@ constexpr auto ComputeShaderIndex = static_cast<std::size_t>(Tegra::Engines::Sha VkViewport GetViewportState(const VKDevice& device, const Maxwell& regs, std::size_t index) { const auto& src = regs.viewport_transform[index]; + const float width = src.scale_x * 2.0f; + const float height = src.scale_y * 2.0f; + VkViewport viewport; viewport.x = src.translate_x - src.scale_x; viewport.y = src.translate_y - src.scale_y; - viewport.width = src.scale_x * 2.0f; - viewport.height = src.scale_y * 2.0f; + viewport.width = width != 0.0f ? width : 1.0f; + viewport.height = height != 0.0f ? height : 1.0f; - const float reduce_z = regs.depth_mode == Maxwell::DepthMode::MinusOneToOne; + const float reduce_z = regs.depth_mode == Maxwell::DepthMode::MinusOneToOne ? 1.0f : 0.0f; viewport.minDepth = src.translate_z - src.scale_z * reduce_z; viewport.maxDepth = src.translate_z + src.scale_z; if (!device.IsExtDepthRangeUnrestrictedSupported()) { @@ -134,13 +137,13 @@ Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry class BufferBindings final { public: - void AddVertexBinding(const VkBuffer* buffer, VkDeviceSize offset) { - vertex.buffer_ptrs[vertex.num_buffers] = buffer; + void AddVertexBinding(VkBuffer buffer, VkDeviceSize offset) { + vertex.buffers[vertex.num_buffers] = buffer; vertex.offsets[vertex.num_buffers] = offset; ++vertex.num_buffers; } - void SetIndexBinding(const VkBuffer* buffer, VkDeviceSize offset, VkIndexType type) { + void SetIndexBinding(VkBuffer buffer, VkDeviceSize offset, VkIndexType type) { index.buffer = buffer; index.offset = offset; index.type = type; @@ -224,19 +227,19 @@ private: // Some of these fields are intentionally left uninitialized to avoid initializing them twice. struct { std::size_t num_buffers = 0; - std::array<const VkBuffer*, Maxwell::NumVertexArrays> buffer_ptrs; + std::array<VkBuffer, Maxwell::NumVertexArrays> buffers; std::array<VkDeviceSize, Maxwell::NumVertexArrays> offsets; } vertex; struct { - const VkBuffer* buffer = nullptr; + VkBuffer buffer = nullptr; VkDeviceSize offset; VkIndexType type; } index; template <std::size_t N> void BindStatic(VKScheduler& scheduler) const { - if (index.buffer != nullptr) { + if (index.buffer) { BindStatic<N, true>(scheduler); } else { BindStatic<N, false>(scheduler); @@ -251,18 +254,14 @@ private: } std::array<VkBuffer, N> buffers; - std::transform(vertex.buffer_ptrs.begin(), vertex.buffer_ptrs.begin() + N, buffers.begin(), - [](const auto ptr) { return *ptr; }); - std::array<VkDeviceSize, N> offsets; + std::copy(vertex.buffers.begin(), vertex.buffers.begin() + N, buffers.begin()); std::copy(vertex.offsets.begin(), vertex.offsets.begin() + N, offsets.begin()); if constexpr (is_indexed) { // Indexed draw - scheduler.Record([buffers, offsets, index_buffer = *index.buffer, - index_offset = index.offset, - index_type = index.type](vk::CommandBuffer cmdbuf) { - cmdbuf.BindIndexBuffer(index_buffer, index_offset, index_type); + scheduler.Record([buffers, offsets, index = index](vk::CommandBuffer cmdbuf) { + cmdbuf.BindIndexBuffer(index.buffer, index.offset, index.type); cmdbuf.BindVertexBuffers(0, static_cast<u32>(N), buffers.data(), offsets.data()); }); } else { @@ -787,7 +786,7 @@ void RasterizerVulkan::BeginTransformFeedback() { const std::size_t size = binding.buffer_size; const auto [buffer, offset] = buffer_cache.UploadMemory(gpu_addr, size, 4, true); - scheduler.Record([buffer = *buffer, offset = offset, size](vk::CommandBuffer cmdbuf) { + scheduler.Record([buffer = buffer, offset = offset, size](vk::CommandBuffer cmdbuf) { cmdbuf.BindTransformFeedbackBuffersEXT(0, 1, &buffer, &offset, &size); cmdbuf.BeginTransformFeedbackEXT(0, 0, nullptr, nullptr); }); @@ -867,7 +866,7 @@ void RasterizerVulkan::SetupIndexBuffer(BufferBindings& buffer_bindings, DrawPar auto format = regs.index_array.format; const bool is_uint8 = format == Maxwell::IndexFormat::UnsignedByte; if (is_uint8 && !device.IsExtIndexTypeUint8Supported()) { - std::tie(buffer, offset) = uint8_pass.Assemble(params.num_vertices, *buffer, offset); + std::tie(buffer, offset) = uint8_pass.Assemble(params.num_vertices, buffer, offset); format = Maxwell::IndexFormat::UnsignedShort; } @@ -1004,8 +1003,8 @@ void RasterizerVulkan::SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAdd const auto size = memory_manager.Read<u32>(address + 8); if (size == 0) { - // Sometimes global memory pointers don't have a proper size. Upload a dummy entry because - // Vulkan doesn't like empty buffers. + // Sometimes global memory pointers don't have a proper size. Upload a dummy entry + // because Vulkan doesn't like empty buffers. constexpr std::size_t dummy_size = 4; const auto buffer = buffer_cache.GetEmptyBuffer(dummy_size); update_descriptor_queue.AddBuffer(buffer, 0, dummy_size); diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp index 62e4ca488..aaa138f52 100644 --- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp +++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp @@ -1938,11 +1938,8 @@ private: return {}; } - template <Id (Module::*func)(Id, Id, Id, Id, Id), Type result_type, - Type value_type = result_type> + template <Id (Module::*func)(Id, Id, Id, Id, Id)> Expression Atomic(Operation operation) { - const Id type_def = GetTypeDefinition(result_type); - Id pointer; if (const auto smem = std::get_if<SmemNode>(&*operation[0])) { pointer = GetSharedMemoryPointer(*smem); @@ -1950,15 +1947,19 @@ private: pointer = GetGlobalMemoryPointer(*gmem); } else { UNREACHABLE(); - return {Constant(type_def, 0), result_type}; + return {v_float_zero, Type::Float}; } - - const Id value = As(Visit(operation[1]), value_type); - const Id scope = Constant(t_uint, static_cast<u32>(spv::Scope::Device)); - const Id semantics = Constant(type_def, 0); + const Id semantics = Constant(t_uint, 0); + const Id value = AsUint(Visit(operation[1])); + + return {(this->*func)(t_uint, pointer, scope, semantics, value), Type::Uint}; + } - return {(this->*func)(type_def, pointer, scope, semantics, value), result_type}; + template <Id (Module::*func)(Id, Id, Id, Id, Id)> + Expression Reduce(Operation operation) { + Atomic<func>(operation); + return {}; } Expression Branch(Operation operation) { @@ -2547,21 +2548,35 @@ private: &SPIRVDecompiler::AtomicImageXor, &SPIRVDecompiler::AtomicImageExchange, - &SPIRVDecompiler::Atomic<&Module::OpAtomicExchange, Type::Uint>, - &SPIRVDecompiler::Atomic<&Module::OpAtomicIAdd, Type::Uint>, - &SPIRVDecompiler::Atomic<&Module::OpAtomicUMin, Type::Uint>, - &SPIRVDecompiler::Atomic<&Module::OpAtomicUMax, Type::Uint>, - &SPIRVDecompiler::Atomic<&Module::OpAtomicAnd, Type::Uint>, - &SPIRVDecompiler::Atomic<&Module::OpAtomicOr, Type::Uint>, - &SPIRVDecompiler::Atomic<&Module::OpAtomicXor, Type::Uint>, - - &SPIRVDecompiler::Atomic<&Module::OpAtomicExchange, Type::Int>, - &SPIRVDecompiler::Atomic<&Module::OpAtomicIAdd, Type::Int>, - &SPIRVDecompiler::Atomic<&Module::OpAtomicSMin, Type::Int>, - &SPIRVDecompiler::Atomic<&Module::OpAtomicSMax, Type::Int>, - &SPIRVDecompiler::Atomic<&Module::OpAtomicAnd, Type::Int>, - &SPIRVDecompiler::Atomic<&Module::OpAtomicOr, Type::Int>, - &SPIRVDecompiler::Atomic<&Module::OpAtomicXor, Type::Int>, + &SPIRVDecompiler::Atomic<&Module::OpAtomicExchange>, + &SPIRVDecompiler::Atomic<&Module::OpAtomicIAdd>, + &SPIRVDecompiler::Atomic<&Module::OpAtomicUMin>, + &SPIRVDecompiler::Atomic<&Module::OpAtomicUMax>, + &SPIRVDecompiler::Atomic<&Module::OpAtomicAnd>, + &SPIRVDecompiler::Atomic<&Module::OpAtomicOr>, + &SPIRVDecompiler::Atomic<&Module::OpAtomicXor>, + + &SPIRVDecompiler::Atomic<&Module::OpAtomicExchange>, + &SPIRVDecompiler::Atomic<&Module::OpAtomicIAdd>, + &SPIRVDecompiler::Atomic<&Module::OpAtomicSMin>, + &SPIRVDecompiler::Atomic<&Module::OpAtomicSMax>, + &SPIRVDecompiler::Atomic<&Module::OpAtomicAnd>, + &SPIRVDecompiler::Atomic<&Module::OpAtomicOr>, + &SPIRVDecompiler::Atomic<&Module::OpAtomicXor>, + + &SPIRVDecompiler::Reduce<&Module::OpAtomicIAdd>, + &SPIRVDecompiler::Reduce<&Module::OpAtomicUMin>, + &SPIRVDecompiler::Reduce<&Module::OpAtomicUMax>, + &SPIRVDecompiler::Reduce<&Module::OpAtomicAnd>, + &SPIRVDecompiler::Reduce<&Module::OpAtomicOr>, + &SPIRVDecompiler::Reduce<&Module::OpAtomicXor>, + + &SPIRVDecompiler::Reduce<&Module::OpAtomicIAdd>, + &SPIRVDecompiler::Reduce<&Module::OpAtomicSMin>, + &SPIRVDecompiler::Reduce<&Module::OpAtomicSMax>, + &SPIRVDecompiler::Reduce<&Module::OpAtomicAnd>, + &SPIRVDecompiler::Reduce<&Module::OpAtomicOr>, + &SPIRVDecompiler::Reduce<&Module::OpAtomicXor>, &SPIRVDecompiler::Branch, &SPIRVDecompiler::BranchIndirect, diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.cpp b/src/video_core/renderer_vulkan/vk_update_descriptor.cpp index 4bfec0077..681ecde98 100644 --- a/src/video_core/renderer_vulkan/vk_update_descriptor.cpp +++ b/src/video_core/renderer_vulkan/vk_update_descriptor.cpp @@ -35,12 +35,13 @@ void VKUpdateDescriptorQueue::Send(VkDescriptorUpdateTemplateKHR update_template payload.clear(); } + // TODO(Rodrigo): Rework to write the payload directly const auto payload_start = payload.data() + payload.size(); for (const auto& entry : entries) { if (const auto image = std::get_if<VkDescriptorImageInfo>(&entry)) { payload.push_back(*image); - } else if (const auto buffer = std::get_if<Buffer>(&entry)) { - payload.emplace_back(*buffer->buffer, buffer->offset, buffer->size); + } else if (const auto buffer = std::get_if<VkDescriptorBufferInfo>(&entry)) { + payload.push_back(*buffer); } else if (const auto texel = std::get_if<VkBufferView>(&entry)) { payload.push_back(*texel); } else { diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.h b/src/video_core/renderer_vulkan/vk_update_descriptor.h index a9e3d5dba..6ba2c9997 100644 --- a/src/video_core/renderer_vulkan/vk_update_descriptor.h +++ b/src/video_core/renderer_vulkan/vk_update_descriptor.h @@ -18,12 +18,11 @@ class VKScheduler; class DescriptorUpdateEntry { public: - explicit DescriptorUpdateEntry() : image{} {} + explicit DescriptorUpdateEntry() {} DescriptorUpdateEntry(VkDescriptorImageInfo image) : image{image} {} - DescriptorUpdateEntry(VkBuffer buffer, VkDeviceSize offset, VkDeviceSize size) - : buffer{buffer, offset, size} {} + DescriptorUpdateEntry(VkDescriptorBufferInfo buffer) : buffer{buffer} {} DescriptorUpdateEntry(VkBufferView texel_buffer) : texel_buffer{texel_buffer} {} @@ -54,8 +53,8 @@ public: entries.emplace_back(VkDescriptorImageInfo{{}, image_view, {}}); } - void AddBuffer(const VkBuffer* buffer, u64 offset, std::size_t size) { - entries.push_back(Buffer{buffer, offset, size}); + void AddBuffer(VkBuffer buffer, u64 offset, std::size_t size) { + entries.emplace_back(VkDescriptorBufferInfo{buffer, offset, size}); } void AddTexelBuffer(VkBufferView texel_buffer) { @@ -67,12 +66,7 @@ public: } private: - struct Buffer { - const VkBuffer* buffer = nullptr; - u64 offset = 0; - std::size_t size = 0; - }; - using Variant = std::variant<VkDescriptorImageInfo, Buffer, VkBufferView>; + using Variant = std::variant<VkDescriptorImageInfo, VkDescriptorBufferInfo, VkBufferView>; const VKDevice& device; VKScheduler& scheduler; diff --git a/src/video_core/shader/control_flow.cpp b/src/video_core/shader/control_flow.cpp index 2e2711350..6d313963a 100644 --- a/src/video_core/shader/control_flow.cpp +++ b/src/video_core/shader/control_flow.cpp @@ -484,17 +484,17 @@ bool TryInspectAddress(CFGRebuildState& state) { } case BlockCollision::Inside: { // This case is the tricky one: - // We need to Split the block in 2 sepparate blocks + // We need to split the block into 2 separate blocks const u32 end = state.block_info[block_index].end; BlockInfo& new_block = CreateBlockInfo(state, address, end); BlockInfo& current_block = state.block_info[block_index]; current_block.end = address - 1; - new_block.branch = current_block.branch; + new_block.branch = std::move(current_block.branch); BlockBranchInfo forward_branch = MakeBranchInfo<SingleBranch>(); const auto branch = std::get_if<SingleBranch>(forward_branch.get()); branch->address = address; branch->ignore = true; - current_block.branch = forward_branch; + current_block.branch = std::move(forward_branch); return true; } default: diff --git a/src/video_core/shader/decode/arithmetic.cpp b/src/video_core/shader/decode/arithmetic.cpp index 478394682..4db329fa5 100644 --- a/src/video_core/shader/decode/arithmetic.cpp +++ b/src/video_core/shader/decode/arithmetic.cpp @@ -136,7 +136,8 @@ u32 ShaderIR::DecodeArithmetic(NodeBlock& bb, u32 pc) { SetRegister(bb, instr.gpr0, value); break; } - case OpCode::Id::FCMP_R: { + case OpCode::Id::FCMP_RR: + case OpCode::Id::FCMP_RC: { UNIMPLEMENTED_IF(instr.fcmp.ftz == 0); Node op_c = GetRegister(instr.gpr39); Node comp = GetPredicateComparisonFloat(instr.fcmp.cond, std::move(op_c), Immediate(0.0f)); diff --git a/src/video_core/shader/decode/image.cpp b/src/video_core/shader/decode/image.cpp index 0dd7a1196..85ee9aa5e 100644 --- a/src/video_core/shader/decode/image.cpp +++ b/src/video_core/shader/decode/image.cpp @@ -352,8 +352,10 @@ u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) { registry.ObtainBoundSampler(static_cast<u32>(instr.image.index.Value())); } else { const Node image_register = GetRegister(instr.gpr39); - const auto [base_image, buffer, offset] = TrackCbuf( - image_register, global_code, static_cast<s64>(global_code.size())); + const auto result = TrackCbuf(image_register, global_code, + static_cast<s64>(global_code.size())); + const auto buffer = std::get<1>(result); + const auto offset = std::get<2>(result); descriptor = registry.ObtainBindlessSampler(buffer, offset); } if (!descriptor) { @@ -497,9 +499,12 @@ Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType t Image& ShaderIR::GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::ImageType type) { const Node image_register = GetRegister(reg); - const auto [base_image, buffer, offset] = + const auto result = TrackCbuf(image_register, global_code, static_cast<s64>(global_code.size())); + const auto buffer = std::get<1>(result); + const auto offset = std::get<2>(result); + const auto it = std::find_if(std::begin(used_images), std::end(used_images), [buffer = buffer, offset = offset](const Image& entry) { diff --git a/src/video_core/shader/decode/memory.cpp b/src/video_core/shader/decode/memory.cpp index b8f63922f..8112ead3e 100644 --- a/src/video_core/shader/decode/memory.cpp +++ b/src/video_core/shader/decode/memory.cpp @@ -3,7 +3,9 @@ // Refer to the license.txt file included. #include <algorithm> +#include <utility> #include <vector> + #include <fmt/format.h> #include "common/alignment.h" @@ -16,6 +18,7 @@ namespace VideoCommon::Shader { +using std::move; using Tegra::Shader::AtomicOp; using Tegra::Shader::AtomicType; using Tegra::Shader::Attribute; @@ -27,29 +30,26 @@ using Tegra::Shader::StoreType; namespace { -Node GetAtomOperation(AtomicOp op, bool is_signed, Node memory, Node data) { - const OperationCode operation_code = [op] { - switch (op) { - case AtomicOp::Add: - return OperationCode::AtomicIAdd; - case AtomicOp::Min: - return OperationCode::AtomicIMin; - case AtomicOp::Max: - return OperationCode::AtomicIMax; - case AtomicOp::And: - return OperationCode::AtomicIAnd; - case AtomicOp::Or: - return OperationCode::AtomicIOr; - case AtomicOp::Xor: - return OperationCode::AtomicIXor; - case AtomicOp::Exch: - return OperationCode::AtomicIExchange; - default: - UNIMPLEMENTED_MSG("op={}", static_cast<int>(op)); - return OperationCode::AtomicIAdd; - } - }(); - return SignedOperation(operation_code, is_signed, std::move(memory), std::move(data)); +OperationCode GetAtomOperation(AtomicOp op) { + switch (op) { + case AtomicOp::Add: + return OperationCode::AtomicIAdd; + case AtomicOp::Min: + return OperationCode::AtomicIMin; + case AtomicOp::Max: + return OperationCode::AtomicIMax; + case AtomicOp::And: + return OperationCode::AtomicIAnd; + case AtomicOp::Or: + return OperationCode::AtomicIOr; + case AtomicOp::Xor: + return OperationCode::AtomicIXor; + case AtomicOp::Exch: + return OperationCode::AtomicIExchange; + default: + UNIMPLEMENTED_MSG("op={}", static_cast<int>(op)); + return OperationCode::AtomicIAdd; + } } bool IsUnaligned(Tegra::Shader::UniformType uniform_type) { @@ -90,23 +90,22 @@ u32 GetMemorySize(Tegra::Shader::UniformType uniform_type) { Node ExtractUnaligned(Node value, Node address, u32 mask, u32 size) { Node offset = Operation(OperationCode::UBitwiseAnd, address, Immediate(mask)); - offset = Operation(OperationCode::ULogicalShiftLeft, std::move(offset), Immediate(3)); - return Operation(OperationCode::UBitfieldExtract, std::move(value), std::move(offset), - Immediate(size)); + offset = Operation(OperationCode::ULogicalShiftLeft, move(offset), Immediate(3)); + return Operation(OperationCode::UBitfieldExtract, move(value), move(offset), Immediate(size)); } Node InsertUnaligned(Node dest, Node value, Node address, u32 mask, u32 size) { - Node offset = Operation(OperationCode::UBitwiseAnd, std::move(address), Immediate(mask)); - offset = Operation(OperationCode::ULogicalShiftLeft, std::move(offset), Immediate(3)); - return Operation(OperationCode::UBitfieldInsert, std::move(dest), std::move(value), - std::move(offset), Immediate(size)); + Node offset = Operation(OperationCode::UBitwiseAnd, move(address), Immediate(mask)); + offset = Operation(OperationCode::ULogicalShiftLeft, move(offset), Immediate(3)); + return Operation(OperationCode::UBitfieldInsert, move(dest), move(value), move(offset), + Immediate(size)); } Node Sign16Extend(Node value) { Node sign = Operation(OperationCode::UBitwiseAnd, value, Immediate(1U << 15)); - Node is_sign = Operation(OperationCode::LogicalUEqual, std::move(sign), Immediate(1U << 15)); + Node is_sign = Operation(OperationCode::LogicalUEqual, move(sign), Immediate(1U << 15)); Node extend = Operation(OperationCode::Select, is_sign, Immediate(0xFFFF0000), Immediate(0)); - return Operation(OperationCode::UBitwiseOr, std::move(value), std::move(extend)); + return Operation(OperationCode::UBitwiseOr, move(value), move(extend)); } } // Anonymous namespace @@ -379,20 +378,36 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { if (IsUnaligned(type)) { const u32 mask = GetUnalignedMask(type); - value = InsertUnaligned(gmem, std::move(value), real_address, mask, size); + value = InsertUnaligned(gmem, move(value), real_address, mask, size); } bb.push_back(Operation(OperationCode::Assign, gmem, value)); } break; } + case OpCode::Id::RED: { + UNIMPLEMENTED_IF_MSG(instr.red.type != GlobalAtomicType::U32); + UNIMPLEMENTED_IF_MSG(instr.red.operation != AtomicOp::Add); + const auto [real_address, base_address, descriptor] = + TrackGlobalMemory(bb, instr, true, true); + if (!real_address || !base_address) { + // Tracking failed, skip atomic. + break; + } + Node gmem = MakeNode<GmemNode>(real_address, base_address, descriptor); + Node value = GetRegister(instr.gpr0); + bb.push_back(Operation(OperationCode::ReduceIAdd, move(gmem), move(value))); + break; + } case OpCode::Id::ATOM: { UNIMPLEMENTED_IF_MSG(instr.atom.operation == AtomicOp::Inc || instr.atom.operation == AtomicOp::Dec || instr.atom.operation == AtomicOp::SafeAdd, "operation={}", static_cast<int>(instr.atom.operation.Value())); UNIMPLEMENTED_IF_MSG(instr.atom.type == GlobalAtomicType::S64 || - instr.atom.type == GlobalAtomicType::U64, + instr.atom.type == GlobalAtomicType::U64 || + instr.atom.type == GlobalAtomicType::F16x2_FTZ_RN || + instr.atom.type == GlobalAtomicType::F32_FTZ_RN, "type={}", static_cast<int>(instr.atom.type.Value())); const auto [real_address, base_address, descriptor] = @@ -403,11 +418,11 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { } const bool is_signed = - instr.atoms.type == AtomicType::S32 || instr.atoms.type == AtomicType::S64; + instr.atom.type == GlobalAtomicType::S32 || instr.atom.type == GlobalAtomicType::S64; Node gmem = MakeNode<GmemNode>(real_address, base_address, descriptor); - Node value = GetAtomOperation(static_cast<AtomicOp>(instr.atom.operation), is_signed, gmem, - GetRegister(instr.gpr20)); - SetRegister(bb, instr.gpr0, std::move(value)); + SetRegister(bb, instr.gpr0, + SignedOperation(GetAtomOperation(instr.atom.operation), is_signed, gmem, + GetRegister(instr.gpr20))); break; } case OpCode::Id::ATOMS: { @@ -421,11 +436,10 @@ u32 ShaderIR::DecodeMemory(NodeBlock& bb, u32 pc) { instr.atoms.type == AtomicType::S32 || instr.atoms.type == AtomicType::S64; const s32 offset = instr.atoms.GetImmediateOffset(); Node address = GetRegister(instr.gpr8); - address = Operation(OperationCode::IAdd, std::move(address), Immediate(offset)); - Node value = - GetAtomOperation(static_cast<AtomicOp>(instr.atoms.operation), is_signed, - GetSharedMemory(std::move(address)), GetRegister(instr.gpr20)); - SetRegister(bb, instr.gpr0, std::move(value)); + address = Operation(OperationCode::IAdd, move(address), Immediate(offset)); + SetRegister(bb, instr.gpr0, + SignedOperation(GetAtomOperation(instr.atoms.operation), is_signed, + GetSharedMemory(move(address)), GetRegister(instr.gpr20))); break; } case OpCode::Id::AL2P: { diff --git a/src/video_core/shader/decode/shift.cpp b/src/video_core/shader/decode/shift.cpp index 3b391d3e6..d4ffa8014 100644 --- a/src/video_core/shader/decode/shift.cpp +++ b/src/video_core/shader/decode/shift.cpp @@ -23,7 +23,6 @@ Node IsFull(Node shift) { } Node Shift(OperationCode opcode, Node value, Node shift) { - Node is_full = Operation(OperationCode::LogicalIEqual, shift, Immediate(32)); Node shifted = Operation(opcode, move(value), shift); return Operation(OperationCode::Select, IsFull(move(shift)), Immediate(0), move(shifted)); } diff --git a/src/video_core/shader/node.h b/src/video_core/shader/node.h index 5fcc9da60..3eee961f5 100644 --- a/src/video_core/shader/node.h +++ b/src/video_core/shader/node.h @@ -178,6 +178,20 @@ enum class OperationCode { AtomicIOr, /// (memory, int) -> int AtomicIXor, /// (memory, int) -> int + ReduceUAdd, /// (memory, uint) -> void + ReduceUMin, /// (memory, uint) -> void + ReduceUMax, /// (memory, uint) -> void + ReduceUAnd, /// (memory, uint) -> void + ReduceUOr, /// (memory, uint) -> void + ReduceUXor, /// (memory, uint) -> void + + ReduceIAdd, /// (memory, int) -> void + ReduceIMin, /// (memory, int) -> void + ReduceIMax, /// (memory, int) -> void + ReduceIAnd, /// (memory, int) -> void + ReduceIOr, /// (memory, int) -> void + ReduceIXor, /// (memory, int) -> void + Branch, /// (uint branch_target) -> void BranchIndirect, /// (uint branch_target) -> void PushFlowStack, /// (uint branch_target) -> void diff --git a/src/video_core/shader/shader_ir.cpp b/src/video_core/shader/shader_ir.cpp index 8852c8a1b..822674926 100644 --- a/src/video_core/shader/shader_ir.cpp +++ b/src/video_core/shader/shader_ir.cpp @@ -56,8 +56,7 @@ Node ShaderIR::GetConstBuffer(u64 index_, u64 offset_) { const auto index = static_cast<u32>(index_); const auto offset = static_cast<u32>(offset_); - const auto [entry, is_new] = used_cbufs.try_emplace(index); - entry->second.MarkAsUsed(offset); + used_cbufs.try_emplace(index).first->second.MarkAsUsed(offset); return MakeNode<CbufNode>(index, Immediate(offset)); } @@ -66,8 +65,7 @@ Node ShaderIR::GetConstBufferIndirect(u64 index_, u64 offset_, Node node) { const auto index = static_cast<u32>(index_); const auto offset = static_cast<u32>(offset_); - const auto [entry, is_new] = used_cbufs.try_emplace(index); - entry->second.MarkAsUsedIndirect(); + used_cbufs.try_emplace(index).first->second.MarkAsUsedIndirect(); Node final_offset = [&] { // Attempt to inline constant buffer without a variable offset. This is done to allow @@ -166,6 +164,7 @@ Node ShaderIR::ConvertIntegerSize(Node value, Register::Size size, bool is_signe std::move(value), Immediate(16)); value = SignedOperation(OperationCode::IArithmeticShiftRight, is_signed, NO_PRECISE, std::move(value), Immediate(16)); + return value; case Register::Size::Word: // Default - do nothing return value; diff --git a/src/video_core/shader/track.cpp b/src/video_core/shader/track.cpp index 10739b37d..513e9bf49 100644 --- a/src/video_core/shader/track.cpp +++ b/src/video_core/shader/track.cpp @@ -27,8 +27,9 @@ std::pair<Node, s64> FindOperation(const NodeBlock& code, s64 cursor, if (const auto conditional = std::get_if<ConditionalNode>(&*node)) { const auto& conditional_code = conditional->GetCode(); - auto [found, internal_cursor] = FindOperation( + auto result = FindOperation( conditional_code, static_cast<s64>(conditional_code.size() - 1), operation_code); + auto& found = result.first; if (found) { return {std::move(found), cursor}; } @@ -75,12 +76,13 @@ std::tuple<Node, TrackSampler> ShaderIR::TrackBindlessSampler(Node tracked, cons s64 cursor) { if (const auto cbuf = std::get_if<CbufNode>(&*tracked)) { // Constant buffer found, test if it's an immediate - const auto offset = cbuf->GetOffset(); + const auto& offset = cbuf->GetOffset(); if (const auto immediate = std::get_if<ImmediateNode>(&*offset)) { auto track = MakeTrackSampler<BindlessSamplerNode>(cbuf->GetIndex(), immediate->GetValue()); return {tracked, track}; - } else if (const auto operation = std::get_if<OperationNode>(&*offset)) { + } + if (const auto operation = std::get_if<OperationNode>(&*offset)) { const u32 bound_buffer = registry.GetBoundBuffer(); if (bound_buffer != cbuf->GetIndex()) { return {}; @@ -93,12 +95,12 @@ std::tuple<Node, TrackSampler> ShaderIR::TrackBindlessSampler(Node tracked, cons const auto offset_inm = std::get_if<ImmediateNode>(&*base_offset); const auto& gpu_driver = registry.AccessGuestDriverProfile(); const u32 bindless_cv = NewCustomVariable(); - const Node op = + Node op = Operation(OperationCode::UDiv, gpr, Immediate(gpu_driver.GetTextureHandlerSize())); const Node cv_node = GetCustomVariable(bindless_cv); Node amend_op = Operation(OperationCode::Assign, cv_node, std::move(op)); - const std::size_t amend_index = DeclareAmend(amend_op); + const std::size_t amend_index = DeclareAmend(std::move(amend_op)); AmendNodeCv(amend_index, code[cursor]); // TODO Implement Bindless Index custom variable auto track = MakeTrackSampler<ArraySamplerNode>(cbuf->GetIndex(), @@ -141,7 +143,7 @@ std::tuple<Node, u32, u32> ShaderIR::TrackCbuf(Node tracked, const NodeBlock& co s64 cursor) const { if (const auto cbuf = std::get_if<CbufNode>(&*tracked)) { // Constant buffer found, test if it's an immediate - const auto offset = cbuf->GetOffset(); + const auto& offset = cbuf->GetOffset(); if (const auto immediate = std::get_if<ImmediateNode>(&*offset)) { return {tracked, cbuf->GetIndex(), immediate->GetValue()}; } @@ -186,8 +188,8 @@ std::tuple<Node, u32, u32> ShaderIR::TrackCbuf(Node tracked, const NodeBlock& co std::optional<u32> ShaderIR::TrackImmediate(Node tracked, const NodeBlock& code, s64 cursor) const { // Reduce the cursor in one to avoid infinite loops when the instruction sets the same register // that it uses as operand - const auto [found, found_cursor] = - TrackRegister(&std::get<GprNode>(*tracked), code, cursor - 1); + const auto result = TrackRegister(&std::get<GprNode>(*tracked), code, cursor - 1); + const auto& found = result.first; if (!found) { return {}; } diff --git a/src/video_core/texture_cache/surface_base.cpp b/src/video_core/texture_cache/surface_base.cpp index 7af0e792c..715f39d0d 100644 --- a/src/video_core/texture_cache/surface_base.cpp +++ b/src/video_core/texture_cache/surface_base.cpp @@ -248,8 +248,14 @@ void SurfaceBaseImpl::FlushBuffer(Tegra::MemoryManager& memory_manager, // Use an extra temporal buffer auto& tmp_buffer = staging_cache.GetBuffer(1); + // Special case for 3D Texture Segments + const bool must_read_current_data = + params.block_depth > 0 && params.target == VideoCore::Surface::SurfaceTarget::Texture2D; tmp_buffer.resize(guest_memory_size); host_ptr = tmp_buffer.data(); + if (must_read_current_data) { + memory_manager.ReadBlockUnsafe(gpu_addr, host_ptr, guest_memory_size); + } if (params.is_tiled) { ASSERT_MSG(params.block_width == 0, "Block width is defined as {}", params.block_width); diff --git a/src/video_core/texture_cache/surface_base.h b/src/video_core/texture_cache/surface_base.h index a39a8661b..c5ab21f56 100644 --- a/src/video_core/texture_cache/surface_base.h +++ b/src/video_core/texture_cache/surface_base.h @@ -72,9 +72,9 @@ public: return (cpu_addr < end) && (cpu_addr_end > start); } - bool IsInside(const GPUVAddr other_start, const GPUVAddr other_end) { + bool IsInside(const GPUVAddr other_start, const GPUVAddr other_end) const { const GPUVAddr gpu_addr_end = gpu_addr + guest_memory_size; - return (gpu_addr <= other_start && other_end <= gpu_addr_end); + return gpu_addr <= other_start && other_end <= gpu_addr_end; } // Use only when recycling a surface diff --git a/src/video_core/texture_cache/surface_params.cpp b/src/video_core/texture_cache/surface_params.cpp index 6f3ef45be..0de499946 100644 --- a/src/video_core/texture_cache/surface_params.cpp +++ b/src/video_core/texture_cache/surface_params.cpp @@ -167,7 +167,6 @@ SurfaceParams SurfaceParams::CreateForImage(const FormatLookupTable& lookup_tabl SurfaceParams SurfaceParams::CreateForDepthBuffer(Core::System& system) { const auto& regs = system.GPU().Maxwell3D().regs; - regs.zeta_width, regs.zeta_height, regs.zeta.format, regs.zeta.memory_layout.type; SurfaceParams params; params.is_tiled = regs.zeta.memory_layout.type == Tegra::Engines::Maxwell3D::Regs::InvMemoryLayout::BlockLinear; diff --git a/src/video_core/texture_cache/surface_view.cpp b/src/video_core/texture_cache/surface_view.cpp index 57a1f5803..6b5f5984b 100644 --- a/src/video_core/texture_cache/surface_view.cpp +++ b/src/video_core/texture_cache/surface_view.cpp @@ -20,4 +20,8 @@ bool ViewParams::operator==(const ViewParams& rhs) const { std::tie(rhs.base_layer, rhs.num_layers, rhs.base_level, rhs.num_levels, rhs.target); } +bool ViewParams::operator!=(const ViewParams& rhs) const { + return !operator==(rhs); +} + } // namespace VideoCommon diff --git a/src/video_core/texture_cache/surface_view.h b/src/video_core/texture_cache/surface_view.h index b17fd11a9..90a8bb0ae 100644 --- a/src/video_core/texture_cache/surface_view.h +++ b/src/video_core/texture_cache/surface_view.h @@ -21,6 +21,7 @@ struct ViewParams { std::size_t Hash() const; bool operator==(const ViewParams& rhs) const; + bool operator!=(const ViewParams& rhs) const; bool IsLayered() const { switch (target) { diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h index cfc7fe6e9..69ca08fd1 100644 --- a/src/video_core/texture_cache/texture_cache.h +++ b/src/video_core/texture_cache/texture_cache.h @@ -509,7 +509,9 @@ private: } const auto& final_params = new_surface->GetSurfaceParams(); if (cr_params.type != final_params.type) { - BufferCopy(current_surface, new_surface); + if (Settings::values.use_accurate_gpu_emulation) { + BufferCopy(current_surface, new_surface); + } } else { std::vector<CopyParams> bricks = current_surface->BreakDown(final_params); for (auto& brick : bricks) { @@ -612,10 +614,10 @@ private: * textures within the GPU if possible. Falls back to LLE when it isn't possible to use any of * the HLE methods. * - * @param overlaps The overlapping surfaces registered in the cache. - * @param params The parameters on the new surface. - * @param gpu_addr The starting address of the new surface. - * @param cache_addr The starting address of the new surface on physical memory. + * @param overlaps The overlapping surfaces registered in the cache. + * @param params The parameters on the new surface. + * @param gpu_addr The starting address of the new surface. + * @param cpu_addr The starting address of the new surface on physical memory. */ std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(std::vector<TSurface>& overlaps, const SurfaceParams& params, @@ -645,7 +647,8 @@ private: break; } const u32 offset = static_cast<u32>(surface->GetCpuAddr() - cpu_addr); - const auto [x, y, z] = params.GetBlockOffsetXYZ(offset); + const auto offsets = params.GetBlockOffsetXYZ(offset); + const auto z = std::get<2>(offsets); modified |= surface->IsModified(); const CopyParams copy_params(0, 0, 0, 0, 0, z, 0, 0, params.width, params.height, 1); diff --git a/src/web_service/CMakeLists.txt b/src/web_service/CMakeLists.txt index 01f2d129d..0c9bb0d55 100644 --- a/src/web_service/CMakeLists.txt +++ b/src/web_service/CMakeLists.txt @@ -8,9 +8,4 @@ add_library(web_service STATIC ) create_target_directory_groups(web_service) - -get_directory_property(OPENSSL_LIBS - DIRECTORY ${PROJECT_SOURCE_DIR}/externals/libressl - DEFINITION OPENSSL_LIBS) -target_compile_definitions(web_service PRIVATE -DCPPHTTPLIB_OPENSSL_SUPPORT) -target_link_libraries(web_service PRIVATE common json-headers ${OPENSSL_LIBS} httplib lurlparser) +target_link_libraries(web_service PRIVATE common json-headers httplib lurlparser) diff --git a/src/web_service/web_backend.cpp b/src/web_service/web_backend.cpp index 737ffe409..09d1651ac 100644 --- a/src/web_service/web_backend.cpp +++ b/src/web_service/web_backend.cpp @@ -43,7 +43,7 @@ struct Client::Impl { if (jwt.empty() && !allow_anonymous) { LOG_ERROR(WebService, "Credentials must be provided for authenticated requests"); return Common::WebResult{Common::WebResult::Code::CredentialsMissing, - "Credentials needed"}; + "Credentials needed", ""}; } auto result = GenericRequest(method, path, data, accept, jwt); @@ -81,12 +81,12 @@ struct Client::Impl { cli = std::make_unique<httplib::SSLClient>(parsedUrl.m_Host.c_str(), port); } else { LOG_ERROR(WebService, "Bad URL scheme {}", parsedUrl.m_Scheme); - return Common::WebResult{Common::WebResult::Code::InvalidURL, "Bad URL scheme"}; + return Common::WebResult{Common::WebResult::Code::InvalidURL, "Bad URL scheme", ""}; } } if (cli == nullptr) { LOG_ERROR(WebService, "Invalid URL {}", host + path); - return Common::WebResult{Common::WebResult::Code::InvalidURL, "Invalid URL"}; + return Common::WebResult{Common::WebResult::Code::InvalidURL, "Invalid URL", ""}; } cli->set_timeout_sec(TIMEOUT_SECONDS); @@ -118,27 +118,27 @@ struct Client::Impl { if (!cli->send(request, response)) { LOG_ERROR(WebService, "{} to {} returned null", method, host + path); - return Common::WebResult{Common::WebResult::Code::LibError, "Null response"}; + return Common::WebResult{Common::WebResult::Code::LibError, "Null response", ""}; } if (response.status >= 400) { LOG_ERROR(WebService, "{} to {} returned error status code: {}", method, host + path, response.status); return Common::WebResult{Common::WebResult::Code::HttpError, - std::to_string(response.status)}; + std::to_string(response.status), ""}; } auto content_type = response.headers.find("content-type"); if (content_type == response.headers.end()) { LOG_ERROR(WebService, "{} to {} returned no content", method, host + path); - return Common::WebResult{Common::WebResult::Code::WrongContent, ""}; + return Common::WebResult{Common::WebResult::Code::WrongContent, "", ""}; } if (content_type->second.find(accept) == std::string::npos) { LOG_ERROR(WebService, "{} to {} returned wrong content: {}", method, host + path, content_type->second); - return Common::WebResult{Common::WebResult::Code::WrongContent, "Wrong content"}; + return Common::WebResult{Common::WebResult::Code::WrongContent, "Wrong content", ""}; } return Common::WebResult{Common::WebResult::Code::Success, "", response.body}; } diff --git a/src/yuzu/debugger/profiler.cpp b/src/yuzu/debugger/profiler.cpp index f594ef076..53049ffd6 100644 --- a/src/yuzu/debugger/profiler.cpp +++ b/src/yuzu/debugger/profiler.cpp @@ -51,7 +51,8 @@ MicroProfileDialog::MicroProfileDialog(QWidget* parent) : QWidget(parent, Qt::Di setWindowTitle(tr("MicroProfile")); resize(1000, 600); // Remove the "?" button from the titlebar and enable the maximize button - setWindowFlags(windowFlags() & ~Qt::WindowContextHelpButtonHint | Qt::WindowMaximizeButtonHint); + setWindowFlags((windowFlags() & ~Qt::WindowContextHelpButtonHint) | + Qt::WindowMaximizeButtonHint); #if MICROPROFILE_ENABLED diff --git a/src/yuzu/game_list_worker.cpp b/src/yuzu/game_list_worker.cpp index da2c27aa2..2018150db 100644 --- a/src/yuzu/game_list_worker.cpp +++ b/src/yuzu/game_list_worker.cpp @@ -91,7 +91,8 @@ std::pair<std::vector<u8>, std::string> GetGameListCachedObject( return generator(); } - if (file1.write(reinterpret_cast<const char*>(icon.data()), icon.size()) != icon.size()) { + if (file1.write(reinterpret_cast<const char*>(icon.data()), icon.size()) != + s64(icon.size())) { LOG_ERROR(Frontend, "Failed to write data to cache file."); return generator(); } diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp index a2702ac3c..05baec7e1 100644 --- a/src/yuzu/main.cpp +++ b/src/yuzu/main.cpp @@ -1017,9 +1017,9 @@ void GMainWindow::BootGame(const QString& filename) { std::string title_name; const auto res = Core::System::GetInstance().GetGameName(title_name); if (res != Loader::ResultStatus::Success) { - const auto [nacp, icon_file] = FileSys::PatchManager(title_id).GetControlMetadata(); - if (nacp != nullptr) - title_name = nacp->GetApplicationName(); + const auto metadata = FileSys::PatchManager(title_id).GetControlMetadata(); + if (metadata.first != nullptr) + title_name = metadata.first->GetApplicationName(); if (title_name.empty()) title_name = FileUtil::GetFilename(filename.toStdString()); @@ -1626,7 +1626,7 @@ void GMainWindow::OnMenuInstallToNAND() { } FileSys::InstallResult res; - if (index >= static_cast<size_t>(FileSys::TitleType::Application)) { + if (index >= static_cast<s32>(FileSys::TitleType::Application)) { res = Core::System::GetInstance() .GetFileSystemController() .GetUserNANDContents() |