diff options
Diffstat (limited to 'src/core/hle/kernel')
23 files changed, 181 insertions, 207 deletions
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index 4d2a9b35d..b882eaa0f 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp @@ -24,7 +24,6 @@ namespace Kernel { // Wake up num_to_wake (or all) threads in a vector. void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, s32 num_to_wake) { - auto& time_manager = system.Kernel().TimeManager(); // Only process up to 'target' threads, unless 'target' is <= 0, in which case process // them all. std::size_t last = waiting_threads.size(); @@ -82,7 +81,7 @@ ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 do { current_value = monitor.ExclusiveRead32(current_core, address); - if (current_value != value) { + if (current_value != static_cast<u32>(value)) { return ERR_INVALID_STATE; } current_value++; diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp index 35448b576..fb30b6f8b 100644 --- a/src/core/hle/kernel/handle_table.cpp +++ b/src/core/hle/kernel/handle_table.cpp @@ -8,7 +8,9 @@ #include "core/core.h" #include "core/hle/kernel/errors.h" #include "core/hle/kernel/handle_table.h" +#include "core/hle/kernel/kernel.h" #include "core/hle/kernel/process.h" +#include "core/hle/kernel/scheduler.h" #include "core/hle/kernel/thread.h" namespace Kernel { @@ -22,7 +24,7 @@ constexpr u16 GetGeneration(Handle handle) { } } // Anonymous namespace -HandleTable::HandleTable() { +HandleTable::HandleTable(KernelCore& kernel) : kernel{kernel} { Clear(); } @@ -103,9 +105,9 @@ bool HandleTable::IsValid(Handle handle) const { std::shared_ptr<Object> HandleTable::GetGeneric(Handle handle) const { if (handle == CurrentThread) { - return SharedFrom(GetCurrentThread()); + return SharedFrom(kernel.CurrentScheduler().GetCurrentThread()); } else if (handle == CurrentProcess) { - return SharedFrom(Core::System::GetInstance().CurrentProcess()); + return SharedFrom(kernel.CurrentProcess()); } if (!IsValid(handle)) { diff --git a/src/core/hle/kernel/handle_table.h b/src/core/hle/kernel/handle_table.h index 8029660ed..c9dab8cdd 100644 --- a/src/core/hle/kernel/handle_table.h +++ b/src/core/hle/kernel/handle_table.h @@ -14,6 +14,8 @@ namespace Kernel { +class KernelCore; + enum KernelHandle : Handle { InvalidHandle = 0, CurrentThread = 0xFFFF8000, @@ -48,7 +50,7 @@ public: /// This is the maximum limit of handles allowed per process in Horizon static constexpr std::size_t MAX_COUNT = 1024; - HandleTable(); + explicit HandleTable(KernelCore& kernel); ~HandleTable(); /** @@ -134,6 +136,9 @@ private: /// Head of the free slots linked list. u16 next_free_slot = 0; + + /// Underlying kernel instance that this handle table operates under. + KernelCore& kernel; }; } // namespace Kernel diff --git a/src/core/hle/kernel/hle_ipc.cpp b/src/core/hle/kernel/hle_ipc.cpp index 9277b5d08..81f85643b 100644 --- a/src/core/hle/kernel/hle_ipc.cpp +++ b/src/core/hle/kernel/hle_ipc.cpp @@ -293,13 +293,15 @@ std::vector<u8> HLERequestContext::ReadBuffer(std::size_t buffer_index) const { BufferDescriptorA()[buffer_index].Size()}; if (is_buffer_a) { - ASSERT_OR_EXECUTE_MSG(BufferDescriptorA().size() > buffer_index, { return buffer; }, - "BufferDescriptorA invalid buffer_index {}", buffer_index); + ASSERT_OR_EXECUTE_MSG( + BufferDescriptorA().size() > buffer_index, { return buffer; }, + "BufferDescriptorA invalid buffer_index {}", buffer_index); buffer.resize(BufferDescriptorA()[buffer_index].Size()); memory.ReadBlock(BufferDescriptorA()[buffer_index].Address(), buffer.data(), buffer.size()); } else { - ASSERT_OR_EXECUTE_MSG(BufferDescriptorX().size() > buffer_index, { return buffer; }, - "BufferDescriptorX invalid buffer_index {}", buffer_index); + ASSERT_OR_EXECUTE_MSG( + BufferDescriptorX().size() > buffer_index, { return buffer; }, + "BufferDescriptorX invalid buffer_index {}", buffer_index); buffer.resize(BufferDescriptorX()[buffer_index].Size()); memory.ReadBlock(BufferDescriptorX()[buffer_index].Address(), buffer.data(), buffer.size()); } @@ -324,16 +326,16 @@ std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size, } if (is_buffer_b) { - ASSERT_OR_EXECUTE_MSG(BufferDescriptorB().size() > buffer_index && - BufferDescriptorB()[buffer_index].Size() >= size, - { return 0; }, "BufferDescriptorB is invalid, index={}, size={}", - buffer_index, size); + ASSERT_OR_EXECUTE_MSG( + BufferDescriptorB().size() > buffer_index && + BufferDescriptorB()[buffer_index].Size() >= size, + { return 0; }, "BufferDescriptorB is invalid, index={}, size={}", buffer_index, size); memory.WriteBlock(BufferDescriptorB()[buffer_index].Address(), buffer, size); } else { - ASSERT_OR_EXECUTE_MSG(BufferDescriptorC().size() > buffer_index && - BufferDescriptorC()[buffer_index].Size() >= size, - { return 0; }, "BufferDescriptorC is invalid, index={}, size={}", - buffer_index, size); + ASSERT_OR_EXECUTE_MSG( + BufferDescriptorC().size() > buffer_index && + BufferDescriptorC()[buffer_index].Size() >= size, + { return 0; }, "BufferDescriptorC is invalid, index={}, size={}", buffer_index, size); memory.WriteBlock(BufferDescriptorC()[buffer_index].Address(), buffer, size); } @@ -344,12 +346,14 @@ std::size_t HLERequestContext::GetReadBufferSize(std::size_t buffer_index) const const bool is_buffer_a{BufferDescriptorA().size() > buffer_index && BufferDescriptorA()[buffer_index].Size()}; if (is_buffer_a) { - ASSERT_OR_EXECUTE_MSG(BufferDescriptorA().size() > buffer_index, { return 0; }, - "BufferDescriptorA invalid buffer_index {}", buffer_index); + ASSERT_OR_EXECUTE_MSG( + BufferDescriptorA().size() > buffer_index, { return 0; }, + "BufferDescriptorA invalid buffer_index {}", buffer_index); return BufferDescriptorA()[buffer_index].Size(); } else { - ASSERT_OR_EXECUTE_MSG(BufferDescriptorX().size() > buffer_index, { return 0; }, - "BufferDescriptorX invalid buffer_index {}", buffer_index); + ASSERT_OR_EXECUTE_MSG( + BufferDescriptorX().size() > buffer_index, { return 0; }, + "BufferDescriptorX invalid buffer_index {}", buffer_index); return BufferDescriptorX()[buffer_index].Size(); } } @@ -358,12 +362,14 @@ std::size_t HLERequestContext::GetWriteBufferSize(std::size_t buffer_index) cons const bool is_buffer_b{BufferDescriptorB().size() > buffer_index && BufferDescriptorB()[buffer_index].Size()}; if (is_buffer_b) { - ASSERT_OR_EXECUTE_MSG(BufferDescriptorB().size() > buffer_index, { return 0; }, - "BufferDescriptorB invalid buffer_index {}", buffer_index); + ASSERT_OR_EXECUTE_MSG( + BufferDescriptorB().size() > buffer_index, { return 0; }, + "BufferDescriptorB invalid buffer_index {}", buffer_index); return BufferDescriptorB()[buffer_index].Size(); } else { - ASSERT_OR_EXECUTE_MSG(BufferDescriptorC().size() > buffer_index, { return 0; }, - "BufferDescriptorC invalid buffer_index {}", buffer_index); + ASSERT_OR_EXECUTE_MSG( + BufferDescriptorC().size() > buffer_index, { return 0; }, + "BufferDescriptorC invalid buffer_index {}", buffer_index); return BufferDescriptorC()[buffer_index].Size(); } return 0; diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h index b31673928..f3277b766 100644 --- a/src/core/hle/kernel/hle_ipc.h +++ b/src/core/hle/kernel/hle_ipc.h @@ -13,6 +13,7 @@ #include <vector> #include <boost/container/small_vector.hpp> #include "common/common_types.h" +#include "common/concepts.h" #include "common/swap.h" #include "core/hle/ipc.h" #include "core/hle/kernel/object.h" @@ -193,23 +194,24 @@ public: /* Helper function to write a buffer using the appropriate buffer descriptor * - * @tparam ContiguousContainer an arbitrary container that satisfies the - * ContiguousContainer concept in the C++ standard library. + * @tparam T an arbitrary container that satisfies the + * ContiguousContainer concept in the C++ standard library or a trivially copyable type. * - * @param container The container to write the data of into a buffer. + * @param data The container/data to write into a buffer. * @param buffer_index The buffer in particular to write to. */ - template <typename ContiguousContainer, - typename = std::enable_if_t<!std::is_pointer_v<ContiguousContainer>>> - std::size_t WriteBuffer(const ContiguousContainer& container, - std::size_t buffer_index = 0) const { - using ContiguousType = typename ContiguousContainer::value_type; - - static_assert(std::is_trivially_copyable_v<ContiguousType>, - "Container to WriteBuffer must contain trivially copyable objects"); - - return WriteBuffer(std::data(container), std::size(container) * sizeof(ContiguousType), - buffer_index); + template <typename T, typename = std::enable_if_t<!std::is_pointer_v<T>>> + std::size_t WriteBuffer(const T& data, std::size_t buffer_index = 0) const { + if constexpr (Common::IsSTLContainer<T>) { + using ContiguousType = typename T::value_type; + static_assert(std::is_trivially_copyable_v<ContiguousType>, + "Container to WriteBuffer must contain trivially copyable objects"); + return WriteBuffer(std::data(data), std::size(data) * sizeof(ContiguousType), + buffer_index); + } else { + static_assert(std::is_trivially_copyable_v<T>, "T must be trivially copyable"); + return WriteBuffer(&data, sizeof(T), buffer_index); + } } /// Helper function to get the size of the input buffer diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 1f2af7a1b..f2b0fe2fd 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -50,7 +50,8 @@ namespace Kernel { struct KernelCore::Impl { explicit Impl(Core::System& system, KernelCore& kernel) - : global_scheduler{kernel}, synchronization{system}, time_manager{system}, system{system} {} + : global_scheduler{kernel}, synchronization{system}, time_manager{system}, + global_handle_table{kernel}, system{system} {} void SetMulticore(bool is_multicore) { this->is_multicore = is_multicore; @@ -144,29 +145,32 @@ struct KernelCore::Impl { void InitializePreemption(KernelCore& kernel) { preemption_event = Core::Timing::CreateEvent( - "PreemptionCallback", [this, &kernel](u64 userdata, s64 cycles_late) { + "PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) { { SchedulerLock lock(kernel); global_scheduler.PreemptThreads(); } - s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10)); + const auto time_interval = std::chrono::nanoseconds{ + Core::Timing::msToCycles(std::chrono::milliseconds(10))}; system.CoreTiming().ScheduleEvent(time_interval, preemption_event); }); - s64 time_interval = Core::Timing::msToCycles(std::chrono::milliseconds(10)); + const auto time_interval = + std::chrono::nanoseconds{Core::Timing::msToCycles(std::chrono::milliseconds(10))}; system.CoreTiming().ScheduleEvent(time_interval, preemption_event); } void InitializeSuspendThreads() { for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { std::string name = "Suspend Thread Id:" + std::to_string(i); - std::function<void(void*)> init_func = - system.GetCpuManager().GetSuspendThreadStartFunc(); + std::function<void(void*)> init_func = Core::CpuManager::GetSuspendThreadStartFunc(); void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); - ThreadType type = + const auto type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_SUSPEND); - auto thread_res = Thread::Create(system, type, name, 0, 0, 0, static_cast<u32>(i), 0, - nullptr, std::move(init_func), init_func_parameter); + auto thread_res = + Thread::Create(system, type, std::move(name), 0, 0, 0, static_cast<u32>(i), 0, + nullptr, std::move(init_func), init_func_parameter); + suspend_threads[i] = std::move(thread_res).Unwrap(); } } @@ -215,6 +219,7 @@ struct KernelCore::Impl { return static_cast<u32>(system.GetCpuManager().CurrentCore()); } } + std::unique_lock lock{register_thread_mutex}; const auto it = host_thread_ids.find(this_id); if (it == host_thread_ids.end()) { return Core::INVALID_HOST_THREAD_ID; @@ -307,7 +312,7 @@ struct KernelCore::Impl { // This is the kernel's handle table or supervisor handle table which // stores all the objects in place. - Kernel::HandleTable global_handle_table; + HandleTable global_handle_table; /// Map of named ports managed by the kernel, which can be retrieved using /// the ConnectToPort SVC. @@ -320,7 +325,7 @@ struct KernelCore::Impl { std::unordered_map<std::thread::id, u32> host_thread_ids; u32 registered_thread_ids{Core::Hardware::NUM_CPU_CORES}; std::bitset<Core::Hardware::NUM_CPU_CORES> registered_core_threads; - std::mutex register_thread_mutex; + mutable std::mutex register_thread_mutex; // Kernel memory management std::unique_ptr<Memory::MemoryManager> memory_manager; diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index 49bd47e89..16285c3f0 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -9,6 +9,7 @@ #include <string> #include <unordered_map> #include <vector> +#include "core/arm/cpu_interrupt_handler.h" #include "core/hardware_properties.h" #include "core/hle/kernel/memory/memory_types.h" #include "core/hle/kernel/object.h" diff --git a/src/core/hle/kernel/memory/address_space_info.cpp b/src/core/hle/kernel/memory/address_space_info.cpp index a523a2502..e4288cab4 100644 --- a/src/core/hle/kernel/memory/address_space_info.cpp +++ b/src/core/hle/kernel/memory/address_space_info.cpp @@ -29,40 +29,39 @@ enum : u64 { // clang-format off constexpr std::array<AddressSpaceInfo, 13> AddressSpaceInfos{{ - { 32 /*bit_width*/, Size_2_MB /*addr*/, Size_1_GB - Size_2_MB /*size*/, AddressSpaceInfo::Type::Is32Bit, }, - { 32 /*bit_width*/, Size_1_GB /*addr*/, Size_4_GB - Size_1_GB /*size*/, AddressSpaceInfo::Type::Small64Bit, }, - { 32 /*bit_width*/, Invalid /*addr*/, Size_1_GB /*size*/, AddressSpaceInfo::Type::Heap, }, - { 32 /*bit_width*/, Invalid /*addr*/, Size_1_GB /*size*/, AddressSpaceInfo::Type::Alias, }, - { 36 /*bit_width*/, Size_128_MB /*addr*/, Size_2_GB - Size_128_MB /*size*/, AddressSpaceInfo::Type::Is32Bit, }, - { 36 /*bit_width*/, Size_2_GB /*addr*/, Size_64_GB - Size_2_GB /*size*/, AddressSpaceInfo::Type::Small64Bit, }, - { 36 /*bit_width*/, Invalid /*addr*/, Size_6_GB /*size*/, AddressSpaceInfo::Type::Heap, }, - { 36 /*bit_width*/, Invalid /*addr*/, Size_6_GB /*size*/, AddressSpaceInfo::Type::Alias, }, - { 39 /*bit_width*/, Size_128_MB /*addr*/, Size_512_GB - Size_128_MB /*size*/, AddressSpaceInfo::Type::Large64Bit, }, - { 39 /*bit_width*/, Invalid /*addr*/, Size_64_GB /*size*/, AddressSpaceInfo::Type::Is32Bit }, - { 39 /*bit_width*/, Invalid /*addr*/, Size_6_GB /*size*/, AddressSpaceInfo::Type::Heap, }, - { 39 /*bit_width*/, Invalid /*addr*/, Size_64_GB /*size*/, AddressSpaceInfo::Type::Alias, }, - { 39 /*bit_width*/, Invalid /*addr*/, Size_2_GB /*size*/, AddressSpaceInfo::Type::Stack, }, + { .bit_width = 32, .address = Size_2_MB , .size = Size_1_GB - Size_2_MB , .type = AddressSpaceInfo::Type::Is32Bit, }, + { .bit_width = 32, .address = Size_1_GB , .size = Size_4_GB - Size_1_GB , .type = AddressSpaceInfo::Type::Small64Bit, }, + { .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = AddressSpaceInfo::Type::Heap, }, + { .bit_width = 32, .address = Invalid , .size = Size_1_GB , .type = AddressSpaceInfo::Type::Alias, }, + { .bit_width = 36, .address = Size_128_MB, .size = Size_2_GB - Size_128_MB, .type = AddressSpaceInfo::Type::Is32Bit, }, + { .bit_width = 36, .address = Size_2_GB , .size = Size_64_GB - Size_2_GB , .type = AddressSpaceInfo::Type::Small64Bit, }, + { .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = AddressSpaceInfo::Type::Heap, }, + { .bit_width = 36, .address = Invalid , .size = Size_6_GB , .type = AddressSpaceInfo::Type::Alias, }, + { .bit_width = 39, .address = Size_128_MB, .size = Size_512_GB - Size_128_MB, .type = AddressSpaceInfo::Type::Large64Bit, }, + { .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = AddressSpaceInfo::Type::Is32Bit }, + { .bit_width = 39, .address = Invalid , .size = Size_6_GB , .type = AddressSpaceInfo::Type::Heap, }, + { .bit_width = 39, .address = Invalid , .size = Size_64_GB , .type = AddressSpaceInfo::Type::Alias, }, + { .bit_width = 39, .address = Invalid , .size = Size_2_GB , .type = AddressSpaceInfo::Type::Stack, }, }}; // clang-format on constexpr bool IsAllowedIndexForAddress(std::size_t index) { - return index < std::size(AddressSpaceInfos) && AddressSpaceInfos[index].GetAddress() != Invalid; + return index < AddressSpaceInfos.size() && AddressSpaceInfos[index].address != Invalid; } -constexpr std::array<std::size_t, static_cast<std::size_t>(AddressSpaceInfo::Type::Count)> - AddressSpaceIndices32Bit{ - 0, 1, 0, 2, 0, 3, - }; +using IndexArray = std::array<std::size_t, static_cast<std::size_t>(AddressSpaceInfo::Type::Count)>; -constexpr std::array<std::size_t, static_cast<std::size_t>(AddressSpaceInfo::Type::Count)> - AddressSpaceIndices36Bit{ - 4, 5, 4, 6, 4, 7, - }; +constexpr IndexArray AddressSpaceIndices32Bit{ + 0, 1, 0, 2, 0, 3, +}; + +constexpr IndexArray AddressSpaceIndices36Bit{ + 4, 5, 4, 6, 4, 7, +}; -constexpr std::array<std::size_t, static_cast<std::size_t>(AddressSpaceInfo::Type::Count)> - AddressSpaceIndices39Bit{ - 9, 8, 8, 10, 12, 11, - }; +constexpr IndexArray AddressSpaceIndices39Bit{ + 9, 8, 8, 10, 12, 11, +}; constexpr bool IsAllowed32BitType(AddressSpaceInfo::Type type) { return type < AddressSpaceInfo::Type::Count && type != AddressSpaceInfo::Type::Large64Bit && @@ -80,37 +79,37 @@ constexpr bool IsAllowed39BitType(AddressSpaceInfo::Type type) { } // namespace -u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, AddressSpaceInfo::Type type) { +u64 AddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) { const std::size_t index{static_cast<std::size_t>(type)}; switch (width) { case 32: ASSERT(IsAllowed32BitType(type)); ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices32Bit[index])); - return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].GetAddress(); + return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].address; case 36: ASSERT(IsAllowed36BitType(type)); ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices36Bit[index])); - return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].GetAddress(); + return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].address; case 39: ASSERT(IsAllowed39BitType(type)); ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices39Bit[index])); - return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].GetAddress(); + return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address; } UNREACHABLE(); } -std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, AddressSpaceInfo::Type type) { +std::size_t AddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type) { const std::size_t index{static_cast<std::size_t>(type)}; switch (width) { case 32: ASSERT(IsAllowed32BitType(type)); - return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].GetSize(); + return AddressSpaceInfos[AddressSpaceIndices32Bit[index]].size; case 36: ASSERT(IsAllowed36BitType(type)); - return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].GetSize(); + return AddressSpaceInfos[AddressSpaceIndices36Bit[index]].size; case 39: ASSERT(IsAllowed39BitType(type)); - return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].GetSize(); + return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size; } UNREACHABLE(); } diff --git a/src/core/hle/kernel/memory/address_space_info.h b/src/core/hle/kernel/memory/address_space_info.h index c479890be..a4e6e91e5 100644 --- a/src/core/hle/kernel/memory/address_space_info.h +++ b/src/core/hle/kernel/memory/address_space_info.h @@ -11,8 +11,7 @@ namespace Kernel::Memory { -class AddressSpaceInfo final : NonCopyable { -public: +struct AddressSpaceInfo final { enum class Type : u32 { Is32Bit = 0, Small64Bit = 1, @@ -23,31 +22,13 @@ public: Count, }; -private: - std::size_t bit_width{}; - std::size_t addr{}; - std::size_t size{}; - Type type{}; - -public: static u64 GetAddressSpaceStart(std::size_t width, Type type); static std::size_t GetAddressSpaceSize(std::size_t width, Type type); - constexpr AddressSpaceInfo(std::size_t bit_width, std::size_t addr, std::size_t size, Type type) - : bit_width{bit_width}, addr{addr}, size{size}, type{type} {} - - constexpr std::size_t GetWidth() const { - return bit_width; - } - constexpr std::size_t GetAddress() const { - return addr; - } - constexpr std::size_t GetSize() const { - return size; - } - constexpr Type GetType() const { - return type; - } + const std::size_t bit_width{}; + const std::size_t address{}; + const std::size_t size{}; + const Type type{}; }; } // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/memory_layout.h b/src/core/hle/kernel/memory/memory_layout.h index 830c6f0d7..9b3d6267a 100644 --- a/src/core/hle/kernel/memory/memory_layout.h +++ b/src/core/hle/kernel/memory/memory_layout.h @@ -66,8 +66,6 @@ private: const MemoryRegion application; const MemoryRegion applet; const MemoryRegion system; - - const PAddr start_address{}; }; } // namespace Kernel::Memory diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp index 5d6aac00f..a3fadb533 100644 --- a/src/core/hle/kernel/memory/page_table.cpp +++ b/src/core/hle/kernel/memory/page_table.cpp @@ -604,7 +604,6 @@ ResultCode PageTable::MapPages(VAddr addr, const PageLinkedList& page_linked_lis if (const auto result{ Operate(cur_addr, node.GetNumPages(), perm, OperationType::Map, node.GetAddress())}; result.IsError()) { - const MemoryInfo info{block_manager->FindBlock(cur_addr).GetMemoryInfo()}; const std::size_t num_pages{(addr - cur_addr) / PageSize}; ASSERT( @@ -852,11 +851,12 @@ ResultCode PageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) { return result; } - block_manager->UpdateLock(addr, size / PageSize, - [](MemoryBlockManager::iterator block, MemoryPermission perm) { - block->ShareToDevice(perm); - }, - perm); + block_manager->UpdateLock( + addr, size / PageSize, + [](MemoryBlockManager::iterator block, MemoryPermission perm) { + block->ShareToDevice(perm); + }, + perm); return RESULT_SUCCESS; } @@ -874,11 +874,12 @@ ResultCode PageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) return result; } - block_manager->UpdateLock(addr, size / PageSize, - [](MemoryBlockManager::iterator block, MemoryPermission perm) { - block->UnshareToDevice(perm); - }, - perm); + block_manager->UpdateLock( + addr, size / PageSize, + [](MemoryBlockManager::iterator block, MemoryPermission perm) { + block->UnshareToDevice(perm); + }, + perm); return RESULT_SUCCESS; } diff --git a/src/core/hle/kernel/memory/system_control.cpp b/src/core/hle/kernel/memory/system_control.cpp index 2f98e9c4c..11d204bc2 100644 --- a/src/core/hle/kernel/memory/system_control.cpp +++ b/src/core/hle/kernel/memory/system_control.cpp @@ -7,22 +7,15 @@ #include "core/hle/kernel/memory/system_control.h" namespace Kernel::Memory::SystemControl { - -u64 GenerateRandomU64ForInit() { - static std::random_device device; - static std::mt19937 gen(device()); - static std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max()); - return distribution(gen); -} - +namespace { template <typename F> u64 GenerateUniformRange(u64 min, u64 max, F f) { - /* Handle the case where the difference is too large to represent. */ + // Handle the case where the difference is too large to represent. if (max == std::numeric_limits<u64>::max() && min == std::numeric_limits<u64>::min()) { return f(); } - /* Iterate until we get a value in range. */ + // Iterate until we get a value in range. const u64 range_size = ((max + 1) - min); const u64 effective_max = (std::numeric_limits<u64>::max() / range_size) * range_size; while (true) { @@ -32,6 +25,14 @@ u64 GenerateUniformRange(u64 min, u64 max, F f) { } } +u64 GenerateRandomU64ForInit() { + static std::random_device device; + static std::mt19937 gen(device()); + static std::uniform_int_distribution<u64> distribution(1, std::numeric_limits<u64>::max()); + return distribution(gen); +} +} // Anonymous namespace + u64 GenerateRandomRange(u64 min, u64 max) { return GenerateUniformRange(min, max, GenerateRandomU64ForInit); } diff --git a/src/core/hle/kernel/memory/system_control.h b/src/core/hle/kernel/memory/system_control.h index 3fa93111d..19cab8cbc 100644 --- a/src/core/hle/kernel/memory/system_control.h +++ b/src/core/hle/kernel/memory/system_control.h @@ -8,11 +8,6 @@ namespace Kernel::Memory::SystemControl { -u64 GenerateRandomU64ForInit(); - -template <typename F> -u64 GenerateUniformRange(u64 min, u64 max, F f); - u64 GenerateRandomRange(u64 min, u64 max); } // namespace Kernel::Memory::SystemControl diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp index c6fcb56ad..ff9d9248b 100644 --- a/src/core/hle/kernel/process.cpp +++ b/src/core/hle/kernel/process.cpp @@ -408,7 +408,7 @@ void Process::LoadModule(CodeSet code_set, VAddr base_addr) { Process::Process(Core::System& system) : SynchronizationObject{system.Kernel()}, page_table{std::make_unique<Memory::PageTable>( system)}, - address_arbiter{system}, mutex{system}, system{system} {} + handle_table{system.Kernel()}, address_arbiter{system}, mutex{system}, system{system} {} Process::~Process() = default; diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h index 9dabe3568..f45cb5674 100644 --- a/src/core/hle/kernel/process.h +++ b/src/core/hle/kernel/process.h @@ -382,12 +382,6 @@ private: /// List of threads waiting for a condition variable std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> cond_var_threads; - /// System context - Core::System& system; - - /// Name of this process - std::string name; - /// Address of the top of the main thread's stack VAddr main_thread_stack_top{}; @@ -399,6 +393,12 @@ private: /// Process total image size std::size_t image_size{}; + + /// Name of this process + std::string name; + + /// System context + Core::System& system; }; } // namespace Kernel diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index 7b929781c..5cbd3b912 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp @@ -131,7 +131,8 @@ u32 GlobalScheduler::SelectThreads() { u32 cores_needing_context_switch{}; for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { Scheduler& sched = kernel.Scheduler(core); - ASSERT(top_threads[core] == nullptr || top_threads[core]->GetProcessorID() == core); + ASSERT(top_threads[core] == nullptr || + static_cast<u32>(top_threads[core]->GetProcessorID()) == core); if (update_thread(top_threads[core], sched)) { cores_needing_context_switch |= (1ul << core); } @@ -663,32 +664,26 @@ void Scheduler::Reload() { } void Scheduler::SwitchContextStep2() { - Thread* previous_thread = current_thread_prev.get(); - Thread* new_thread = selected_thread.get(); - // Load context of new thread - Process* const previous_process = - previous_thread != nullptr ? previous_thread->GetOwnerProcess() : nullptr; - - if (new_thread) { - ASSERT_MSG(new_thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable, + if (selected_thread) { + ASSERT_MSG(selected_thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable, "Thread must be runnable."); // Cancel any outstanding wakeup events for this thread - new_thread->SetIsRunning(true); - new_thread->last_running_ticks = system.CoreTiming().GetCPUTicks(); - new_thread->SetWasRunning(false); + selected_thread->SetIsRunning(true); + selected_thread->last_running_ticks = system.CoreTiming().GetCPUTicks(); + selected_thread->SetWasRunning(false); auto* const thread_owner_process = current_thread->GetOwnerProcess(); if (thread_owner_process != nullptr) { system.Kernel().MakeCurrentProcess(thread_owner_process); } - if (!new_thread->IsHLEThread()) { - Core::ARM_Interface& cpu_core = new_thread->ArmInterface(); - cpu_core.LoadContext(new_thread->GetContext32()); - cpu_core.LoadContext(new_thread->GetContext64()); - cpu_core.SetTlsAddress(new_thread->GetTLSAddress()); - cpu_core.SetTPIDR_EL0(new_thread->GetTPIDR_EL0()); + if (!selected_thread->IsHLEThread()) { + Core::ARM_Interface& cpu_core = selected_thread->ArmInterface(); + cpu_core.LoadContext(selected_thread->GetContext32()); + cpu_core.LoadContext(selected_thread->GetContext64()); + cpu_core.SetTlsAddress(selected_thread->GetTLSAddress()); + cpu_core.SetTPIDR_EL0(selected_thread->GetTPIDR_EL0()); cpu_core.ChangeProcessorID(this->core_id); cpu_core.ClearExclusiveState(); } @@ -761,7 +756,11 @@ void Scheduler::SwitchToCurrent() { current_thread = selected_thread; is_context_switch_pending = false; } - while (!is_context_switch_pending) { + const auto is_switch_pending = [this] { + std::scoped_lock lock{guard}; + return is_context_switch_pending; + }; + do { if (current_thread != nullptr && !current_thread->IsHLEThread()) { current_thread->context_guard.lock(); if (!current_thread->IsRunnable()) { @@ -780,7 +779,7 @@ void Scheduler::SwitchToCurrent() { next_context = &idle_thread->GetHostContext(); } Common::Fiber::YieldTo(switch_fiber, *next_context); - } + } while (!is_switch_pending()); } } @@ -802,7 +801,7 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { void Scheduler::Initialize() { std::string name = "Idle Thread Id:" + std::to_string(core_id); - std::function<void(void*)> init_func = system.GetCpuManager().GetIdleThreadStartFunc(); + std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc(); void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE); auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0, diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index b9cad3f4a..b6f04dcea 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h @@ -289,7 +289,7 @@ private: class SchedulerLock { public: - explicit SchedulerLock(KernelCore& kernel); + [[nodiscard]] explicit SchedulerLock(KernelCore& kernel); ~SchedulerLock(); protected: diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp index 7b23a6889..7e6391c6c 100644 --- a/src/core/hle/kernel/server_session.cpp +++ b/src/core/hle/kernel/server_session.cpp @@ -33,8 +33,10 @@ ResultVal<std::shared_ptr<ServerSession>> ServerSession::Create(KernelCore& kern std::string name) { std::shared_ptr<ServerSession> session{std::make_shared<ServerSession>(kernel)}; - session->request_event = Core::Timing::CreateEvent( - name, [session](u64 userdata, s64 cycles_late) { session->CompleteSyncRequest(); }); + session->request_event = + Core::Timing::CreateEvent(name, [session](std::uintptr_t, std::chrono::nanoseconds) { + session->CompleteSyncRequest(); + }); session->name = std::move(name); session->parent = std::move(parent); @@ -184,8 +186,8 @@ ResultCode ServerSession::CompleteSyncRequest() { ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory) { - ResultCode result = QueueSyncRequest(std::move(thread), memory); - const u64 delay = kernel.IsMulticore() ? 0U : 20000U; + const ResultCode result = QueueSyncRequest(std::move(thread), memory); + const auto delay = std::chrono::nanoseconds{kernel.IsMulticore() ? 0 : 20000}; Core::System::GetInstance().CoreTiming().ScheduleEvent(delay, request_event, {}); return result; } diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 5db19dcf3..01ae57053 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -458,9 +458,7 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr return ERR_OUT_OF_RANGE; } - auto* const thread = system.CurrentScheduler().GetCurrentThread(); auto& kernel = system.Kernel(); - using ObjectPtr = Thread::ThreadSynchronizationObjects::value_type; Thread::ThreadSynchronizationObjects objects(handle_count); const auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); @@ -1750,9 +1748,9 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_ // Only process up to 'target' threads, unless 'target' is less equal 0, in which case process // them all. std::size_t last = waiting_threads.size(); - if (target > 0) + if (target > 0) { last = std::min(waiting_threads.size(), static_cast<std::size_t>(target)); - auto& time_manager = kernel.TimeManager(); + } for (std::size_t index = 0; index < last; ++index) { auto& thread = waiting_threads[index]; @@ -1763,7 +1761,6 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_ const std::size_t current_core = system.CurrentCoreIndex(); auto& monitor = system.Monitor(); - auto& memory = system.Memory(); // Atomically read the value of the mutex. u32 mutex_val = 0; diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp index 851b702a5..8b875d853 100644 --- a/src/core/hle/kernel/synchronization.cpp +++ b/src/core/hle/kernel/synchronization.cpp @@ -19,7 +19,6 @@ Synchronization::Synchronization(Core::System& system) : system{system} {} void Synchronization::SignalObject(SynchronizationObject& obj) const { auto& kernel = system.Kernel(); SchedulerLock lock(kernel); - auto& time_manager = kernel.TimeManager(); if (obj.IsSignaled()) { for (auto thread : obj.GetWaitingThreads()) { if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) { diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 2b1092697..d132aba34 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp @@ -13,16 +13,8 @@ #include "common/logging/log.h" #include "common/thread_queue_list.h" #include "core/arm/arm_interface.h" -#ifdef ARCHITECTURE_x86_64 -#include "core/arm/dynarmic/arm_dynarmic_32.h" -#include "core/arm/dynarmic/arm_dynarmic_64.h" -#endif -#include "core/arm/cpu_interrupt_handler.h" -#include "core/arm/exclusive_monitor.h" #include "core/arm/unicorn/arm_unicorn.h" #include "core/core.h" -#include "core/core_timing.h" -#include "core/core_timing_util.h" #include "core/cpu_manager.h" #include "core/hardware_properties.h" #include "core/hle/kernel/errors.h" @@ -36,6 +28,11 @@ #include "core/hle/result.h" #include "core/memory.h" +#ifdef ARCHITECTURE_x86_64 +#include "core/arm/dynarmic/arm_dynarmic_32.h" +#include "core/arm/dynarmic/arm_dynarmic_64.h" +#endif + namespace Kernel { bool Thread::ShouldWait(const Thread* thread) const { @@ -158,7 +155,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy std::string name, VAddr entry_point, u32 priority, u64 arg, s32 processor_id, VAddr stack_top, Process* owner_process) { - std::function<void(void*)> init_func = system.GetCpuManager().GetGuestThreadStartFunc(); + std::function<void(void*)> init_func = Core::CpuManager::GetGuestThreadStartFunc(); void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); return Create(system, type_flags, name, entry_point, priority, arg, processor_id, stack_top, owner_process, std::move(init_func), init_func_parameter); @@ -540,13 +537,4 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { return RESULT_SUCCESS; } -//////////////////////////////////////////////////////////////////////////////////////////////////// - -/** - * Gets the current thread - */ -Thread* GetCurrentThread() { - return Core::System::GetInstance().CurrentScheduler().GetCurrentThread(); -} - } // namespace Kernel diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index c0342c462..8daf79fac 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h @@ -583,8 +583,6 @@ private: void SetCurrentPriority(u32 new_priority); - void AdjustSchedulingOnAffinity(u64 old_affinity_mask, s32 old_core); - Common::SpinLock context_guard{}; ThreadContext32 context_32{}; ThreadContext64 context_64{}; @@ -680,9 +678,4 @@ private: std::string name; }; -/** - * Gets the current thread - */ -Thread* GetCurrentThread(); - } // namespace Kernel diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp index 941305e8e..95f2446c9 100644 --- a/src/core/hle/kernel/time_manager.cpp +++ b/src/core/hle/kernel/time_manager.cpp @@ -16,14 +16,14 @@ namespace Kernel { TimeManager::TimeManager(Core::System& system_) : system{system_} { time_manager_event_type = Core::Timing::CreateEvent( - "Kernel::TimeManagerCallback", [this](u64 thread_handle, [[maybe_unused]] s64 cycles_late) { - SchedulerLock lock(system.Kernel()); - Handle proper_handle = static_cast<Handle>(thread_handle); + "Kernel::TimeManagerCallback", + [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { + const SchedulerLock lock(system.Kernel()); + const auto proper_handle = static_cast<Handle>(thread_handle); if (cancelled_events[proper_handle]) { return; } - std::shared_ptr<Thread> thread = - this->system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle); + auto thread = this->system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle); thread->OnWakeUp(); }); } @@ -34,7 +34,8 @@ void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64 ASSERT(timetask); ASSERT(timetask->GetStatus() != ThreadStatus::Ready); ASSERT(timetask->GetStatus() != ThreadStatus::WaitMutex); - system.CoreTiming().ScheduleEvent(nanoseconds, time_manager_event_type, event_handle); + system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{nanoseconds}, + time_manager_event_type, event_handle); } else { event_handle = InvalidHandle; } |