diff options
Diffstat (limited to 'src/core')
62 files changed, 1904 insertions, 855 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index eee8e2ccd..698c4f912 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -179,12 +179,15 @@ add_library(core STATIC hle/kernel/k_client_port.h hle/kernel/k_client_session.cpp hle/kernel/k_client_session.h + hle/kernel/k_code_memory.cpp + hle/kernel/k_code_memory.h hle/kernel/k_condition_variable.cpp hle/kernel/k_condition_variable.h hle/kernel/k_event.cpp hle/kernel/k_event.h hle/kernel/k_handle_table.cpp hle/kernel/k_handle_table.h + hle/kernel/k_light_condition_variable.cpp hle/kernel/k_light_condition_variable.h hle/kernel/k_light_lock.cpp hle/kernel/k_light_lock.h @@ -237,6 +240,7 @@ add_library(core STATIC hle/kernel/k_system_control.h hle/kernel/k_thread.cpp hle/kernel/k_thread.h + hle/kernel/k_thread_queue.cpp hle/kernel/k_thread_queue.h hle/kernel/k_trace.h hle/kernel/k_transfer_memory.cpp @@ -261,8 +265,6 @@ add_library(core STATIC hle/kernel/svc_wrap.h hle/kernel/time_manager.cpp hle/kernel/time_manager.h - hle/lock.cpp - hle/lock.h hle/result.h hle/service/acc/acc.cpp hle/service/acc/acc.h @@ -408,6 +410,8 @@ add_library(core STATIC hle/service/glue/glue.h hle/service/glue/glue_manager.cpp hle/service/glue/glue_manager.h + hle/service/glue/notif.cpp + hle/service/glue/notif.h hle/service/grc/grc.cpp hle/service/grc/grc.h hle/service/hid/hid.cpp diff --git a/src/core/core.cpp b/src/core/core.cpp index 473ab9f81..aa96f709b 100644 --- a/src/core/core.cpp +++ b/src/core/core.cpp @@ -521,12 +521,6 @@ const ARM_Interface& System::CurrentArmInterface() const { return impl->kernel.CurrentPhysicalCore().ArmInterface(); } -std::size_t System::CurrentCoreIndex() const { - std::size_t core = impl->kernel.GetCurrentHostThreadID(); - ASSERT(core < Core::Hardware::NUM_CPU_CORES); - return core; -} - Kernel::PhysicalCore& System::CurrentPhysicalCore() { return impl->kernel.CurrentPhysicalCore(); } diff --git a/src/core/core.h b/src/core/core.h index 645e5c241..52ff90359 100644 --- a/src/core/core.h +++ b/src/core/core.h @@ -208,9 +208,6 @@ public: /// Gets an ARM interface to the CPU core that is currently running [[nodiscard]] const ARM_Interface& CurrentArmInterface() const; - /// Gets the index of the currently running CPU core - [[nodiscard]] std::size_t CurrentCoreIndex() const; - /// Gets the physical core for the CPU core that is currently running [[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore(); diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp index 5d43c6e5d..cbcc54891 100644 --- a/src/core/cpu_manager.cpp +++ b/src/core/cpu_manager.cpp @@ -117,17 +117,18 @@ void CpuManager::MultiCoreRunGuestLoop() { physical_core = &kernel.CurrentPhysicalCore(); } system.ExitDynarmicProfile(); - physical_core->ArmInterface().ClearExclusiveState(); - kernel.CurrentScheduler()->RescheduleCurrentCore(); + { + Kernel::KScopedDisableDispatch dd(kernel); + physical_core->ArmInterface().ClearExclusiveState(); + } } } void CpuManager::MultiCoreRunIdleThread() { auto& kernel = system.Kernel(); while (true) { - auto& physical_core = kernel.CurrentPhysicalCore(); - physical_core.Idle(); - kernel.CurrentScheduler()->RescheduleCurrentCore(); + Kernel::KScopedDisableDispatch dd(kernel); + kernel.CurrentPhysicalCore().Idle(); } } @@ -135,12 +136,12 @@ void CpuManager::MultiCoreRunSuspendThread() { auto& kernel = system.Kernel(); kernel.CurrentScheduler()->OnThreadStart(); while (true) { - auto core = kernel.GetCurrentHostThreadID(); + auto core = kernel.CurrentPhysicalCoreIndex(); auto& scheduler = *kernel.CurrentScheduler(); Kernel::KThread* current_thread = scheduler.GetCurrentThread(); Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context); ASSERT(scheduler.ContextSwitchPending()); - ASSERT(core == kernel.GetCurrentHostThreadID()); + ASSERT(core == kernel.CurrentPhysicalCoreIndex()); scheduler.RescheduleCurrentCore(); } } @@ -346,13 +347,9 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) { sc_sync_first_use = false; } - // Abort if emulation was killed before the session really starts - if (!system.IsPoweredOn()) { - return; - } - + // Emulation was stopped if (stop_token.stop_requested()) { - break; + return; } auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread(); diff --git a/src/core/hid/emulated_console.cpp b/src/core/hid/emulated_console.cpp index 6744c6846..08f8af551 100644 --- a/src/core/hid/emulated_console.cpp +++ b/src/core/hid/emulated_console.cpp @@ -66,9 +66,10 @@ void EmulatedConsole::ReloadInput() { motion_devices = Common::Input::CreateDevice<Common::Input::InputDevice>(motion_params); if (motion_devices) { - Common::Input::InputCallback motion_callback{ - [this](Common::Input::CallbackStatus callback) { SetMotion(callback); }}; - motion_devices->SetCallback(motion_callback); + motion_devices->SetCallback({ + .on_change = + [this](const Common::Input::CallbackStatus& callback) { SetMotion(callback); }, + }); } // Unique index for identifying touch device source @@ -78,9 +79,12 @@ void EmulatedConsole::ReloadInput() { if (!touch_device) { continue; } - Common::Input::InputCallback touch_callback{ - [this, index](Common::Input::CallbackStatus callback) { SetTouch(callback, index); }}; - touch_device->SetCallback(touch_callback); + touch_device->SetCallback({ + .on_change = + [this, index](const Common::Input::CallbackStatus& callback) { + SetTouch(callback, index); + }, + }); index++; } } @@ -127,7 +131,7 @@ void EmulatedConsole::SetMotionParam(Common::ParamPackage param) { ReloadInput(); } -void EmulatedConsole::SetMotion(Common::Input::CallbackStatus callback) { +void EmulatedConsole::SetMotion(const Common::Input::CallbackStatus& callback) { std::lock_guard lock{mutex}; auto& raw_status = console.motion_values.raw_status; auto& emulated = console.motion_values.emulated; @@ -165,8 +169,7 @@ void EmulatedConsole::SetMotion(Common::Input::CallbackStatus callback) { TriggerOnChange(ConsoleTriggerType::Motion); } -void EmulatedConsole::SetTouch(Common::Input::CallbackStatus callback, - [[maybe_unused]] std::size_t index) { +void EmulatedConsole::SetTouch(const Common::Input::CallbackStatus& callback, std::size_t index) { if (index >= console.touch_values.size()) { return; } diff --git a/src/core/hid/emulated_console.h b/src/core/hid/emulated_console.h index e682a76c2..707419102 100644 --- a/src/core/hid/emulated_console.h +++ b/src/core/hid/emulated_console.h @@ -157,14 +157,14 @@ private: * Updates the motion status of the console * @param callback A CallbackStatus containing gyro and accelerometer data */ - void SetMotion(Common::Input::CallbackStatus callback); + void SetMotion(const Common::Input::CallbackStatus& callback); /** * Updates the touch status of the console * @param callback A CallbackStatus containing the touch position * @param index Finger ID to be updated */ - void SetTouch(Common::Input::CallbackStatus callback, std::size_t index); + void SetTouch(const Common::Input::CallbackStatus& callback, std::size_t index); /** * Triggers a callback that something has changed on the console status diff --git a/src/core/hid/emulated_controller.cpp b/src/core/hid/emulated_controller.cpp index fbb19f230..ff9d7a7e3 100644 --- a/src/core/hid/emulated_controller.cpp +++ b/src/core/hid/emulated_controller.cpp @@ -205,11 +205,12 @@ void EmulatedController::ReloadInput() { continue; } const auto uuid = Common::UUID{button_params[index].Get("guid", "")}; - Common::Input::InputCallback button_callback{ - [this, index, uuid](Common::Input::CallbackStatus callback) { - SetButton(callback, index, uuid); - }}; - button_devices[index]->SetCallback(button_callback); + button_devices[index]->SetCallback({ + .on_change = + [this, index, uuid](const Common::Input::CallbackStatus& callback) { + SetButton(callback, index, uuid); + }, + }); button_devices[index]->ForceUpdate(); } @@ -218,11 +219,12 @@ void EmulatedController::ReloadInput() { continue; } const auto uuid = Common::UUID{stick_params[index].Get("guid", "")}; - Common::Input::InputCallback stick_callback{ - [this, index, uuid](Common::Input::CallbackStatus callback) { - SetStick(callback, index, uuid); - }}; - stick_devices[index]->SetCallback(stick_callback); + stick_devices[index]->SetCallback({ + .on_change = + [this, index, uuid](const Common::Input::CallbackStatus& callback) { + SetStick(callback, index, uuid); + }, + }); stick_devices[index]->ForceUpdate(); } @@ -231,11 +233,12 @@ void EmulatedController::ReloadInput() { continue; } const auto uuid = Common::UUID{trigger_params[index].Get("guid", "")}; - Common::Input::InputCallback trigger_callback{ - [this, index, uuid](Common::Input::CallbackStatus callback) { - SetTrigger(callback, index, uuid); - }}; - trigger_devices[index]->SetCallback(trigger_callback); + trigger_devices[index]->SetCallback({ + .on_change = + [this, index, uuid](const Common::Input::CallbackStatus& callback) { + SetTrigger(callback, index, uuid); + }, + }); trigger_devices[index]->ForceUpdate(); } @@ -243,9 +246,12 @@ void EmulatedController::ReloadInput() { if (!battery_devices[index]) { continue; } - Common::Input::InputCallback battery_callback{ - [this, index](Common::Input::CallbackStatus callback) { SetBattery(callback, index); }}; - battery_devices[index]->SetCallback(battery_callback); + battery_devices[index]->SetCallback({ + .on_change = + [this, index](const Common::Input::CallbackStatus& callback) { + SetBattery(callback, index); + }, + }); battery_devices[index]->ForceUpdate(); } @@ -253,9 +259,12 @@ void EmulatedController::ReloadInput() { if (!motion_devices[index]) { continue; } - Common::Input::InputCallback motion_callback{ - [this, index](Common::Input::CallbackStatus callback) { SetMotion(callback, index); }}; - motion_devices[index]->SetCallback(motion_callback); + motion_devices[index]->SetCallback({ + .on_change = + [this, index](const Common::Input::CallbackStatus& callback) { + SetMotion(callback, index); + }, + }); motion_devices[index]->ForceUpdate(); } @@ -267,22 +276,24 @@ void EmulatedController::ReloadInput() { if (!tas_button_devices[index]) { continue; } - Common::Input::InputCallback button_callback{ - [this, index, tas_uuid](Common::Input::CallbackStatus callback) { - SetButton(callback, index, tas_uuid); - }}; - tas_button_devices[index]->SetCallback(button_callback); + tas_button_devices[index]->SetCallback({ + .on_change = + [this, index, tas_uuid](const Common::Input::CallbackStatus& callback) { + SetButton(callback, index, tas_uuid); + }, + }); } for (std::size_t index = 0; index < tas_stick_devices.size(); ++index) { if (!tas_stick_devices[index]) { continue; } - Common::Input::InputCallback stick_callback{ - [this, index, tas_uuid](Common::Input::CallbackStatus callback) { - SetStick(callback, index, tas_uuid); - }}; - tas_stick_devices[index]->SetCallback(stick_callback); + tas_stick_devices[index]->SetCallback({ + .on_change = + [this, index, tas_uuid](const Common::Input::CallbackStatus& callback) { + SetStick(callback, index, tas_uuid); + }, + }); } } @@ -440,7 +451,7 @@ void EmulatedController::SetButtonParam(std::size_t index, Common::ParamPackage if (index >= button_params.size()) { return; } - button_params[index] = param; + button_params[index] = std::move(param); ReloadInput(); } @@ -448,7 +459,7 @@ void EmulatedController::SetStickParam(std::size_t index, Common::ParamPackage p if (index >= stick_params.size()) { return; } - stick_params[index] = param; + stick_params[index] = std::move(param); ReloadInput(); } @@ -456,11 +467,11 @@ void EmulatedController::SetMotionParam(std::size_t index, Common::ParamPackage if (index >= motion_params.size()) { return; } - motion_params[index] = param; + motion_params[index] = std::move(param); ReloadInput(); } -void EmulatedController::SetButton(Common::Input::CallbackStatus callback, std::size_t index, +void EmulatedController::SetButton(const Common::Input::CallbackStatus& callback, std::size_t index, Common::UUID uuid) { if (index >= controller.button_values.size()) { return; @@ -600,7 +611,7 @@ void EmulatedController::SetButton(Common::Input::CallbackStatus callback, std:: TriggerOnChange(ControllerTriggerType::Button, true); } -void EmulatedController::SetStick(Common::Input::CallbackStatus callback, std::size_t index, +void EmulatedController::SetStick(const Common::Input::CallbackStatus& callback, std::size_t index, Common::UUID uuid) { if (index >= controller.stick_values.size()) { return; @@ -650,8 +661,8 @@ void EmulatedController::SetStick(Common::Input::CallbackStatus callback, std::s TriggerOnChange(ControllerTriggerType::Stick, true); } -void EmulatedController::SetTrigger(Common::Input::CallbackStatus callback, std::size_t index, - Common::UUID uuid) { +void EmulatedController::SetTrigger(const Common::Input::CallbackStatus& callback, + std::size_t index, Common::UUID uuid) { if (index >= controller.trigger_values.size()) { return; } @@ -659,7 +670,7 @@ void EmulatedController::SetTrigger(Common::Input::CallbackStatus callback, std: const auto trigger_value = TransformToTrigger(callback); // Only read trigger values that have the same uuid or are pressed once - if (controller.stick_values[index].uuid != uuid) { + if (controller.trigger_values[index].uuid != uuid) { if (!trigger_value.pressed.value) { return; } @@ -675,7 +686,7 @@ void EmulatedController::SetTrigger(Common::Input::CallbackStatus callback, std: return; } - const auto trigger = controller.trigger_values[index]; + const auto& trigger = controller.trigger_values[index]; switch (index) { case Settings::NativeTrigger::LTrigger: @@ -692,7 +703,8 @@ void EmulatedController::SetTrigger(Common::Input::CallbackStatus callback, std: TriggerOnChange(ControllerTriggerType::Trigger, true); } -void EmulatedController::SetMotion(Common::Input::CallbackStatus callback, std::size_t index) { +void EmulatedController::SetMotion(const Common::Input::CallbackStatus& callback, + std::size_t index) { if (index >= controller.motion_values.size()) { return; } @@ -730,7 +742,8 @@ void EmulatedController::SetMotion(Common::Input::CallbackStatus callback, std:: TriggerOnChange(ControllerTriggerType::Motion, true); } -void EmulatedController::SetBattery(Common::Input::CallbackStatus callback, std::size_t index) { +void EmulatedController::SetBattery(const Common::Input::CallbackStatus& callback, + std::size_t index) { if (index >= controller.battery_values.size()) { return; } @@ -830,23 +843,18 @@ bool EmulatedController::SetVibration(std::size_t device_index, VibrationValue v } bool EmulatedController::TestVibration(std::size_t device_index) { - if (device_index >= output_devices.size()) { - return false; - } - if (!output_devices[device_index]) { - return false; - } - - // Send a slight vibration to test for rumble support - constexpr Common::Input::VibrationStatus status = { + static constexpr VibrationValue test_vibration = { .low_amplitude = 0.001f, .low_frequency = 160.0f, .high_amplitude = 0.001f, .high_frequency = 320.0f, - .type = Common::Input::VibrationAmplificationType::Linear, }; - return output_devices[device_index]->SetVibration(status) == - Common::Input::VibrationError::None; + + // Send a slight vibration to test for rumble support + SetVibration(device_index, test_vibration); + + // Stop any vibration and return the result + return SetVibration(device_index, DEFAULT_VIBRATION_VALUE); } void EmulatedController::SetLedPattern() { @@ -1110,7 +1118,7 @@ void EmulatedController::TriggerOnChange(ControllerTriggerType type, bool is_npa int EmulatedController::SetCallback(ControllerUpdateCallback update_callback) { std::lock_guard lock{mutex}; - callback_list.insert_or_assign(last_callback_key, update_callback); + callback_list.insert_or_assign(last_callback_key, std::move(update_callback)); return last_callback_key++; } diff --git a/src/core/hid/emulated_controller.h b/src/core/hid/emulated_controller.h index 425b3e7c4..e42aafebc 100644 --- a/src/core/hid/emulated_controller.h +++ b/src/core/hid/emulated_controller.h @@ -328,35 +328,38 @@ private: * @param callback A CallbackStatus containing the button status * @param index Button ID of the to be updated */ - void SetButton(Common::Input::CallbackStatus callback, std::size_t index, Common::UUID uuid); + void SetButton(const Common::Input::CallbackStatus& callback, std::size_t index, + Common::UUID uuid); /** * Updates the analog stick status of the controller * @param callback A CallbackStatus containing the analog stick status * @param index stick ID of the to be updated */ - void SetStick(Common::Input::CallbackStatus callback, std::size_t index, Common::UUID uuid); + void SetStick(const Common::Input::CallbackStatus& callback, std::size_t index, + Common::UUID uuid); /** * Updates the trigger status of the controller * @param callback A CallbackStatus containing the trigger status * @param index trigger ID of the to be updated */ - void SetTrigger(Common::Input::CallbackStatus callback, std::size_t index, Common::UUID uuid); + void SetTrigger(const Common::Input::CallbackStatus& callback, std::size_t index, + Common::UUID uuid); /** * Updates the motion status of the controller * @param callback A CallbackStatus containing gyro and accelerometer data * @param index motion ID of the to be updated */ - void SetMotion(Common::Input::CallbackStatus callback, std::size_t index); + void SetMotion(const Common::Input::CallbackStatus& callback, std::size_t index); /** * Updates the battery status of the controller * @param callback A CallbackStatus containing the battery status * @param index Button ID of the to be updated */ - void SetBattery(Common::Input::CallbackStatus callback, std::size_t index); + void SetBattery(const Common::Input::CallbackStatus& callback, std::size_t index); /** * Triggers a callback that something has changed on the controller status diff --git a/src/core/hid/emulated_devices.cpp b/src/core/hid/emulated_devices.cpp index 874780ec2..708480f2d 100644 --- a/src/core/hid/emulated_devices.cpp +++ b/src/core/hid/emulated_devices.cpp @@ -70,50 +70,55 @@ void EmulatedDevices::ReloadInput() { if (!mouse_button_devices[index]) { continue; } - Common::Input::InputCallback button_callback{ - [this, index](Common::Input::CallbackStatus callback) { - SetMouseButton(callback, index); - }}; - mouse_button_devices[index]->SetCallback(button_callback); + mouse_button_devices[index]->SetCallback({ + .on_change = + [this, index](const Common::Input::CallbackStatus& callback) { + SetMouseButton(callback, index); + }, + }); } for (std::size_t index = 0; index < mouse_analog_devices.size(); ++index) { if (!mouse_analog_devices[index]) { continue; } - Common::Input::InputCallback button_callback{ - [this, index](Common::Input::CallbackStatus callback) { - SetMouseAnalog(callback, index); - }}; - mouse_analog_devices[index]->SetCallback(button_callback); + mouse_analog_devices[index]->SetCallback({ + .on_change = + [this, index](const Common::Input::CallbackStatus& callback) { + SetMouseAnalog(callback, index); + }, + }); } if (mouse_stick_device) { - Common::Input::InputCallback button_callback{ - [this](Common::Input::CallbackStatus callback) { SetMouseStick(callback); }}; - mouse_stick_device->SetCallback(button_callback); + mouse_stick_device->SetCallback({ + .on_change = + [this](const Common::Input::CallbackStatus& callback) { SetMouseStick(callback); }, + }); } for (std::size_t index = 0; index < keyboard_devices.size(); ++index) { if (!keyboard_devices[index]) { continue; } - Common::Input::InputCallback button_callback{ - [this, index](Common::Input::CallbackStatus callback) { - SetKeyboardButton(callback, index); - }}; - keyboard_devices[index]->SetCallback(button_callback); + keyboard_devices[index]->SetCallback({ + .on_change = + [this, index](const Common::Input::CallbackStatus& callback) { + SetKeyboardButton(callback, index); + }, + }); } for (std::size_t index = 0; index < keyboard_modifier_devices.size(); ++index) { if (!keyboard_modifier_devices[index]) { continue; } - Common::Input::InputCallback button_callback{ - [this, index](Common::Input::CallbackStatus callback) { - SetKeyboardModifier(callback, index); - }}; - keyboard_modifier_devices[index]->SetCallback(button_callback); + keyboard_modifier_devices[index]->SetCallback({ + .on_change = + [this, index](const Common::Input::CallbackStatus& callback) { + SetKeyboardModifier(callback, index); + }, + }); } } @@ -159,7 +164,8 @@ void EmulatedDevices::RestoreConfig() { ReloadFromSettings(); } -void EmulatedDevices::SetKeyboardButton(Common::Input::CallbackStatus callback, std::size_t index) { +void EmulatedDevices::SetKeyboardButton(const Common::Input::CallbackStatus& callback, + std::size_t index) { if (index >= device_status.keyboard_values.size()) { return; } @@ -216,7 +222,7 @@ void EmulatedDevices::UpdateKey(std::size_t key_index, bool status) { } } -void EmulatedDevices::SetKeyboardModifier(Common::Input::CallbackStatus callback, +void EmulatedDevices::SetKeyboardModifier(const Common::Input::CallbackStatus& callback, std::size_t index) { if (index >= device_status.keyboard_moddifier_values.size()) { return; @@ -286,7 +292,8 @@ void EmulatedDevices::SetKeyboardModifier(Common::Input::CallbackStatus callback TriggerOnChange(DeviceTriggerType::KeyboardModdifier); } -void EmulatedDevices::SetMouseButton(Common::Input::CallbackStatus callback, std::size_t index) { +void EmulatedDevices::SetMouseButton(const Common::Input::CallbackStatus& callback, + std::size_t index) { if (index >= device_status.mouse_button_values.size()) { return; } @@ -347,7 +354,8 @@ void EmulatedDevices::SetMouseButton(Common::Input::CallbackStatus callback, std TriggerOnChange(DeviceTriggerType::Mouse); } -void EmulatedDevices::SetMouseAnalog(Common::Input::CallbackStatus callback, std::size_t index) { +void EmulatedDevices::SetMouseAnalog(const Common::Input::CallbackStatus& callback, + std::size_t index) { if (index >= device_status.mouse_analog_values.size()) { return; } @@ -374,7 +382,7 @@ void EmulatedDevices::SetMouseAnalog(Common::Input::CallbackStatus callback, std TriggerOnChange(DeviceTriggerType::Mouse); } -void EmulatedDevices::SetMouseStick(Common::Input::CallbackStatus callback) { +void EmulatedDevices::SetMouseStick(const Common::Input::CallbackStatus& callback) { std::lock_guard lock{mutex}; const auto touch_value = TransformToTouch(callback); @@ -435,7 +443,7 @@ void EmulatedDevices::TriggerOnChange(DeviceTriggerType type) { int EmulatedDevices::SetCallback(InterfaceUpdateCallback update_callback) { std::lock_guard lock{mutex}; - callback_list.insert_or_assign(last_callback_key, update_callback); + callback_list.insert_or_assign(last_callback_key, std::move(update_callback)); return last_callback_key++; } diff --git a/src/core/hid/emulated_devices.h b/src/core/hid/emulated_devices.h index c72327681..790d3b411 100644 --- a/src/core/hid/emulated_devices.h +++ b/src/core/hid/emulated_devices.h @@ -156,35 +156,34 @@ private: * @param callback A CallbackStatus containing the key status * @param index key ID to be updated */ - void SetKeyboardButton(Common::Input::CallbackStatus callback, std::size_t index); + void SetKeyboardButton(const Common::Input::CallbackStatus& callback, std::size_t index); /** * Updates the keyboard status of the keyboard device * @param callback A CallbackStatus containing the modifier key status * @param index modifier key ID to be updated */ - void SetKeyboardModifier(Common::Input::CallbackStatus callback, std::size_t index); + void SetKeyboardModifier(const Common::Input::CallbackStatus& callback, std::size_t index); /** * Updates the mouse button status of the mouse device * @param callback A CallbackStatus containing the button status * @param index Button ID to be updated */ - void SetMouseButton(Common::Input::CallbackStatus callback, std::size_t index); + void SetMouseButton(const Common::Input::CallbackStatus& callback, std::size_t index); /** * Updates the mouse wheel status of the mouse device * @param callback A CallbackStatus containing the wheel status * @param index wheel ID to be updated */ - void SetMouseAnalog(Common::Input::CallbackStatus callback, std::size_t index); + void SetMouseAnalog(const Common::Input::CallbackStatus& callback, std::size_t index); /** * Updates the mouse position status of the mouse device * @param callback A CallbackStatus containing the position status - * @param index stick ID to be updated */ - void SetMouseStick(Common::Input::CallbackStatus callback); + void SetMouseStick(const Common::Input::CallbackStatus& callback); /** * Triggers a callback that something has changed on the device status diff --git a/src/core/hid/hid_core.cpp b/src/core/hid/hid_core.cpp index 0c3eb5a62..a1c3bbb57 100644 --- a/src/core/hid/hid_core.cpp +++ b/src/core/hid/hid_core.cpp @@ -145,6 +145,16 @@ NpadIdType HIDCore::GetFirstNpadId() const { return NpadIdType::Player1; } +NpadIdType HIDCore::GetFirstDisconnectedNpadId() const { + for (std::size_t player_index = 0; player_index < available_controllers; ++player_index) { + const auto* const controller = GetEmulatedControllerByIndex(player_index); + if (!controller->IsConnected()) { + return controller->GetNpadIdType(); + } + } + return NpadIdType::Player1; +} + void HIDCore::EnableAllControllerConfiguration() { player_1->EnableConfiguration(); player_2->EnableConfiguration(); diff --git a/src/core/hid/hid_core.h b/src/core/hid/hid_core.h index 2fb0f7e19..837f7de49 100644 --- a/src/core/hid/hid_core.h +++ b/src/core/hid/hid_core.h @@ -45,6 +45,9 @@ public: /// Returns the first connected npad id NpadIdType GetFirstNpadId() const; + /// Returns the first disconnected npad id + NpadIdType GetFirstDisconnectedNpadId() const; + /// Sets all emulated controllers into configuring mode. void EnableAllControllerConfiguration(); diff --git a/src/core/hid/hid_types.h b/src/core/hid/hid_types.h index 7c12f01fc..4eca68533 100644 --- a/src/core/hid/hid_types.h +++ b/src/core/hid/hid_types.h @@ -496,6 +496,13 @@ struct VibrationValue { }; static_assert(sizeof(VibrationValue) == 0x10, "VibrationValue has incorrect size."); +constexpr VibrationValue DEFAULT_VIBRATION_VALUE{ + .low_amplitude = 0.0f, + .low_frequency = 160.0f, + .high_amplitude = 0.0f, + .high_frequency = 320.0f, +}; + // This is nn::hid::VibrationDeviceInfo struct VibrationDeviceInfo { VibrationDeviceType type{}; diff --git a/src/core/hle/kernel/init/init_slab_setup.cpp b/src/core/hle/kernel/init/init_slab_setup.cpp index 8ff0f695d..36fc0944a 100644 --- a/src/core/hle/kernel/init/init_slab_setup.cpp +++ b/src/core/hle/kernel/init/init_slab_setup.cpp @@ -9,6 +9,7 @@ #include "core/core.h" #include "core/hardware_properties.h" #include "core/hle/kernel/init/init_slab_setup.h" +#include "core/hle/kernel/k_code_memory.h" #include "core/hle/kernel/k_event.h" #include "core/hle/kernel/k_memory_layout.h" #include "core/hle/kernel/k_memory_manager.h" @@ -32,6 +33,7 @@ namespace Kernel::Init { HANDLER(KPort, (SLAB_COUNT(KPort)), ##__VA_ARGS__) \ HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ##__VA_ARGS__) \ HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ##__VA_ARGS__) \ + HANDLER(KCodeMemory, (SLAB_COUNT(KCodeMemory)), ##__VA_ARGS__) \ HANDLER(KSession, (SLAB_COUNT(KSession)), ##__VA_ARGS__) \ HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ##__VA_ARGS__) diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp index 1b429bc1e..783c69858 100644 --- a/src/core/hle/kernel/k_address_arbiter.cpp +++ b/src/core/hle/kernel/k_address_arbiter.cpp @@ -8,6 +8,7 @@ #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/k_thread.h" +#include "core/hle/kernel/k_thread_queue.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/time_manager.h" @@ -28,7 +29,7 @@ bool ReadFromUser(Core::System& system, s32* out, VAddr address) { bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) { auto& monitor = system.Monitor(); - const auto current_core = system.CurrentCoreIndex(); + const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. // TODO(bunnei): We should call CanAccessAtomic(..) here. @@ -58,7 +59,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) { auto& monitor = system.Monitor(); - const auto current_core = system.CurrentCoreIndex(); + const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. // TODO(bunnei): We should call CanAccessAtomic(..) here. @@ -85,6 +86,27 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 return true; } +class ThreadQueueImplForKAddressArbiter final : public KThreadQueue { +public: + explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel_, KAddressArbiter::ThreadTree* t) + : KThreadQueue(kernel_), m_tree(t) {} + + void CancelWait(KThread* waiting_thread, ResultCode wait_result, + bool cancel_timer_task) override { + // If the thread is waiting on an address arbiter, remove it from the tree. + if (waiting_thread->IsWaitingForAddressArbiter()) { + m_tree->erase(m_tree->iterator_to(*waiting_thread)); + waiting_thread->ClearAddressArbiter(); + } + + // Invoke the base cancel wait handler. + KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task); + } + +private: + KAddressArbiter::ThreadTree* m_tree; +}; + } // namespace ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) { @@ -96,14 +118,14 @@ ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) { auto it = thread_tree.nfind_light({addr, -1}); while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) { + // End the thread's wait. KThread* target_thread = std::addressof(*it); - target_thread->SetSyncedObject(nullptr, ResultSuccess); + target_thread->EndWait(ResultSuccess); ASSERT(target_thread->IsWaitingForAddressArbiter()); - target_thread->Wakeup(); + target_thread->ClearAddressArbiter(); it = thread_tree.erase(it); - target_thread->ClearAddressArbiter(); ++num_waiters; } } @@ -129,14 +151,14 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 auto it = thread_tree.nfind_light({addr, -1}); while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) { + // End the thread's wait. KThread* target_thread = std::addressof(*it); - target_thread->SetSyncedObject(nullptr, ResultSuccess); + target_thread->EndWait(ResultSuccess); ASSERT(target_thread->IsWaitingForAddressArbiter()); - target_thread->Wakeup(); + target_thread->ClearAddressArbiter(); it = thread_tree.erase(it); - target_thread->ClearAddressArbiter(); ++num_waiters; } } @@ -197,14 +219,14 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) { + // End the thread's wait. KThread* target_thread = std::addressof(*it); - target_thread->SetSyncedObject(nullptr, ResultSuccess); + target_thread->EndWait(ResultSuccess); ASSERT(target_thread->IsWaitingForAddressArbiter()); - target_thread->Wakeup(); + target_thread->ClearAddressArbiter(); it = thread_tree.erase(it); - target_thread->ClearAddressArbiter(); ++num_waiters; } } @@ -214,6 +236,7 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) { // Prepare to wait. KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); + ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree)); { KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; @@ -224,9 +247,6 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement return ResultTerminationRequested; } - // Set the synced object. - cur_thread->SetSyncedObject(nullptr, ResultTimedOut); - // Read the value from userspace. s32 user_value{}; bool succeeded{}; @@ -256,31 +276,20 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement // Set the arbiter. cur_thread->SetAddressArbiter(&thread_tree, addr); thread_tree.insert(*cur_thread); - cur_thread->SetState(ThreadState::Waiting); - cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration); - } - - // Cancel the timer wait. - kernel.TimeManager().UnscheduleTimeEvent(cur_thread); - // Remove from the address arbiter. - { - KScopedSchedulerLock sl(kernel); - - if (cur_thread->IsWaitingForAddressArbiter()) { - thread_tree.erase(thread_tree.iterator_to(*cur_thread)); - cur_thread->ClearAddressArbiter(); - } + // Wait for the thread to finish. + cur_thread->BeginWait(std::addressof(wait_queue)); + cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration); } // Get the result. - KSynchronizationObject* dummy{}; - return cur_thread->GetWaitResult(&dummy); + return cur_thread->GetWaitResult(); } ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) { // Prepare to wait. KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); + ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree)); { KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; @@ -291,9 +300,6 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) { return ResultTerminationRequested; } - // Set the synced object. - cur_thread->SetSyncedObject(nullptr, ResultTimedOut); - // Read the value from userspace. s32 user_value{}; if (!ReadFromUser(system, &user_value, addr)) { @@ -316,26 +322,14 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) { // Set the arbiter. cur_thread->SetAddressArbiter(&thread_tree, addr); thread_tree.insert(*cur_thread); - cur_thread->SetState(ThreadState::Waiting); - cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration); - } - - // Cancel the timer wait. - kernel.TimeManager().UnscheduleTimeEvent(cur_thread); - // Remove from the address arbiter. - { - KScopedSchedulerLock sl(kernel); - - if (cur_thread->IsWaitingForAddressArbiter()) { - thread_tree.erase(thread_tree.iterator_to(*cur_thread)); - cur_thread->ClearAddressArbiter(); - } + // Wait for the thread to finish. + cur_thread->BeginWait(std::addressof(wait_queue)); + cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration); } // Get the result. - KSynchronizationObject* dummy{}; - return cur_thread->GetWaitResult(&dummy); + return cur_thread->GetWaitResult(); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h index e4fcdbc67..165b76747 100644 --- a/src/core/hle/kernel/k_auto_object.h +++ b/src/core/hle/kernel/k_auto_object.h @@ -170,6 +170,10 @@ public: } } + const std::string& GetName() const { + return name; + } + private: void RegisterWithKernel(); void UnregisterWithKernel(); diff --git a/src/core/hle/kernel/k_class_token.cpp b/src/core/hle/kernel/k_class_token.cpp index 0be0027be..21e2fe494 100644 --- a/src/core/hle/kernel/k_class_token.cpp +++ b/src/core/hle/kernel/k_class_token.cpp @@ -6,6 +6,7 @@ #include "core/hle/kernel/k_class_token.h" #include "core/hle/kernel/k_client_port.h" #include "core/hle/kernel/k_client_session.h" +#include "core/hle/kernel/k_code_memory.h" #include "core/hle/kernel/k_event.h" #include "core/hle/kernel/k_port.h" #include "core/hle/kernel/k_process.h" @@ -48,7 +49,7 @@ static_assert(ClassToken<KWritableEvent> == 0b10001001'00000000); static_assert(ClassToken<KTransferMemory> == 0b10010001'00000000); // static_assert(ClassToken<KDeviceAddressSpace> == 0b01100001'00000000); // static_assert(ClassToken<KSessionRequest> == 0b10100001'00000000); -// static_assert(ClassToken<KCodeMemory> == 0b11000001'00000000); +static_assert(ClassToken<KCodeMemory> == 0b11000001'00000000); // Ensure that the token hierarchy is correct. @@ -79,7 +80,7 @@ static_assert(ClassToken<KWritableEvent> == ((0b10001001 << 8) | ClassToken<KAut static_assert(ClassToken<KTransferMemory> == ((0b10010001 << 8) | ClassToken<KAutoObject>)); // static_assert(ClassToken<KDeviceAddressSpace> == ((0b01100001 << 8) | ClassToken<KAutoObject>)); // static_assert(ClassToken<KSessionRequest> == ((0b10100001 << 8) | ClassToken<KAutoObject>)); -// static_assert(ClassToken<KCodeMemory> == ((0b11000001 << 8) | ClassToken<KAutoObject>)); +static_assert(ClassToken<KCodeMemory> == ((0b11000001 << 8) | ClassToken<KAutoObject>)); // Ensure that the token hierarchy reflects the class hierarchy. diff --git a/src/core/hle/kernel/k_code_memory.cpp b/src/core/hle/kernel/k_code_memory.cpp new file mode 100644 index 000000000..d69f7ffb7 --- /dev/null +++ b/src/core/hle/kernel/k_code_memory.cpp @@ -0,0 +1,146 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/common_types.h" +#include "core/device_memory.h" +#include "core/hle/kernel/k_auto_object.h" +#include "core/hle/kernel/k_code_memory.h" +#include "core/hle/kernel/k_light_lock.h" +#include "core/hle/kernel/k_memory_block.h" +#include "core/hle/kernel/k_page_linked_list.h" +#include "core/hle/kernel/k_page_table.h" +#include "core/hle/kernel/k_process.h" +#include "core/hle/kernel/slab_helpers.h" +#include "core/hle/kernel/svc_types.h" +#include "core/hle/result.h" + +namespace Kernel { + +KCodeMemory::KCodeMemory(KernelCore& kernel_) + : KAutoObjectWithSlabHeapAndContainer{kernel_}, m_lock(kernel_) {} + +ResultCode KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) { + // Set members. + m_owner = kernel.CurrentProcess(); + + // Get the owner page table. + auto& page_table = m_owner->PageTable(); + + // Construct the page group. + KMemoryInfo kBlockInfo = page_table.QueryInfo(addr); + m_page_group = KPageLinkedList(kBlockInfo.GetAddress(), kBlockInfo.GetNumPages()); + + // Lock the memory. + R_TRY(page_table.LockForCodeMemory(addr, size)) + + // Clear the memory. + for (const auto& block : m_page_group.Nodes()) { + std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize()); + } + + // Set remaining tracking members. + m_address = addr; + m_is_initialized = true; + m_is_owner_mapped = false; + m_is_mapped = false; + + // We succeeded. + return ResultSuccess; +} + +void KCodeMemory::Finalize() { + // Unlock. + if (!m_is_mapped && !m_is_owner_mapped) { + const size_t size = m_page_group.GetNumPages() * PageSize; + m_owner->PageTable().UnlockForCodeMemory(m_address, size); + } +} + +ResultCode KCodeMemory::Map(VAddr address, size_t size) { + // Validate the size. + R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); + + // Lock ourselves. + KScopedLightLock lk(m_lock); + + // Ensure we're not already mapped. + R_UNLESS(!m_is_mapped, ResultInvalidState); + + // Map the memory. + R_TRY(kernel.CurrentProcess()->PageTable().MapPages( + address, m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite)); + + // Mark ourselves as mapped. + m_is_mapped = true; + + return ResultSuccess; +} + +ResultCode KCodeMemory::Unmap(VAddr address, size_t size) { + // Validate the size. + R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); + + // Lock ourselves. + KScopedLightLock lk(m_lock); + + // Unmap the memory. + R_TRY(kernel.CurrentProcess()->PageTable().UnmapPages(address, m_page_group, + KMemoryState::CodeOut)); + + // Mark ourselves as unmapped. + m_is_mapped = false; + + return ResultSuccess; +} + +ResultCode KCodeMemory::MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm) { + // Validate the size. + R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); + + // Lock ourselves. + KScopedLightLock lk(m_lock); + + // Ensure we're not already mapped. + R_UNLESS(!m_is_owner_mapped, ResultInvalidState); + + // Convert the memory permission. + KMemoryPermission k_perm{}; + switch (perm) { + case Svc::MemoryPermission::Read: + k_perm = KMemoryPermission::UserRead; + break; + case Svc::MemoryPermission::ReadExecute: + k_perm = KMemoryPermission::UserReadExecute; + break; + default: + break; + } + + // Map the memory. + R_TRY( + m_owner->PageTable().MapPages(address, m_page_group, KMemoryState::GeneratedCode, k_perm)); + + // Mark ourselves as mapped. + m_is_owner_mapped = true; + + return ResultSuccess; +} + +ResultCode KCodeMemory::UnmapFromOwner(VAddr address, size_t size) { + // Validate the size. + R_UNLESS(m_page_group.GetNumPages() == Common::DivideUp(size, PageSize), ResultInvalidSize); + + // Lock ourselves. + KScopedLightLock lk(m_lock); + + // Unmap the memory. + R_TRY(m_owner->PageTable().UnmapPages(address, m_page_group, KMemoryState::GeneratedCode)); + + // Mark ourselves as unmapped. + m_is_owner_mapped = false; + + return ResultSuccess; +} + +} // namespace Kernel
\ No newline at end of file diff --git a/src/core/hle/kernel/k_code_memory.h b/src/core/hle/kernel/k_code_memory.h new file mode 100644 index 000000000..e0ba19a53 --- /dev/null +++ b/src/core/hle/kernel/k_code_memory.h @@ -0,0 +1,66 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "common/common_types.h" +#include "core/device_memory.h" +#include "core/hle/kernel/k_auto_object.h" +#include "core/hle/kernel/k_light_lock.h" +#include "core/hle/kernel/k_page_linked_list.h" +#include "core/hle/kernel/k_process.h" +#include "core/hle/kernel/slab_helpers.h" +#include "core/hle/kernel/svc_types.h" +#include "core/hle/result.h" + +namespace Kernel { + +enum class CodeMemoryOperation : u32 { + Map = 0, + MapToOwner = 1, + Unmap = 2, + UnmapFromOwner = 3, +}; + +class KCodeMemory final + : public KAutoObjectWithSlabHeapAndContainer<KCodeMemory, KAutoObjectWithList> { + KERNEL_AUTOOBJECT_TRAITS(KCodeMemory, KAutoObject); + +public: + explicit KCodeMemory(KernelCore& kernel_); + + ResultCode Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size); + void Finalize(); + + ResultCode Map(VAddr address, size_t size); + ResultCode Unmap(VAddr address, size_t size); + ResultCode MapToOwner(VAddr address, size_t size, Svc::MemoryPermission perm); + ResultCode UnmapFromOwner(VAddr address, size_t size); + + bool IsInitialized() const { + return m_is_initialized; + } + static void PostDestroy([[maybe_unused]] uintptr_t arg) {} + + KProcess* GetOwner() const { + return m_owner; + } + VAddr GetSourceAddress() const { + return m_address; + } + size_t GetSize() const { + return m_is_initialized ? m_page_group.GetNumPages() * PageSize : 0; + } + +private: + KPageLinkedList m_page_group{}; + KProcess* m_owner{}; + VAddr m_address{}; + KLightLock m_lock; + bool m_is_initialized{}; + bool m_is_owner_mapped{}; + bool m_is_mapped{}; +}; + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp index 7fa9b8cc3..aadcc297a 100644 --- a/src/core/hle/kernel/k_condition_variable.cpp +++ b/src/core/hle/kernel/k_condition_variable.cpp @@ -11,6 +11,7 @@ #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_thread.h" +#include "core/hle/kernel/k_thread_queue.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/svc_common.h" #include "core/hle/kernel/svc_results.h" @@ -33,7 +34,7 @@ bool WriteToUser(Core::System& system, VAddr address, const u32* p) { bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero, u32 new_orr_mask) { auto& monitor = system.Monitor(); - const auto current_core = system.CurrentCoreIndex(); + const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); // Load the value from the address. const auto expected = monitor.ExclusiveRead32(current_core, address); @@ -57,6 +58,48 @@ bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero return true; } +class ThreadQueueImplForKConditionVariableWaitForAddress final : public KThreadQueue { +public: + explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel_) + : KThreadQueue(kernel_) {} + + void CancelWait(KThread* waiting_thread, ResultCode wait_result, + bool cancel_timer_task) override { + // Remove the thread as a waiter from its owner. + waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread); + + // Invoke the base cancel wait handler. + KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task); + } +}; + +class ThreadQueueImplForKConditionVariableWaitConditionVariable final : public KThreadQueue { +private: + KConditionVariable::ThreadTree* m_tree; + +public: + explicit ThreadQueueImplForKConditionVariableWaitConditionVariable( + KernelCore& kernel_, KConditionVariable::ThreadTree* t) + : KThreadQueue(kernel_), m_tree(t) {} + + void CancelWait(KThread* waiting_thread, ResultCode wait_result, + bool cancel_timer_task) override { + // Remove the thread as a waiter from its owner. + if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) { + owner->RemoveWaiter(waiting_thread); + } + + // If the thread is waiting on a condvar, remove it from the tree. + if (waiting_thread->IsWaitingForConditionVariable()) { + m_tree->erase(m_tree->iterator_to(*waiting_thread)); + waiting_thread->ClearConditionVariable(); + } + + // Invoke the base cancel wait handler. + KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task); + } +}; + } // namespace KConditionVariable::KConditionVariable(Core::System& system_) @@ -78,84 +121,77 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) { // Determine the next tag. u32 next_value{}; - if (next_owner_thread) { + if (next_owner_thread != nullptr) { next_value = next_owner_thread->GetAddressKeyValue(); if (num_waiters > 1) { next_value |= Svc::HandleWaitMask; } - next_owner_thread->SetSyncedObject(nullptr, ResultSuccess); - next_owner_thread->Wakeup(); - } - - // Write the value to userspace. - if (!WriteToUser(system, addr, std::addressof(next_value))) { - if (next_owner_thread) { - next_owner_thread->SetSyncedObject(nullptr, ResultInvalidCurrentMemory); + // Write the value to userspace. + ResultCode result{ResultSuccess}; + if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] { + result = ResultSuccess; + } else { + result = ResultInvalidCurrentMemory; } - return ResultInvalidCurrentMemory; + // Signal the next owner thread. + next_owner_thread->EndWait(result); + return result; + } else { + // Just write the value to userspace. + R_UNLESS(WriteToUser(system, addr, std::addressof(next_value)), + ResultInvalidCurrentMemory); + + return ResultSuccess; } } - - return ResultSuccess; } ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) { KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); + ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel); // Wait for the address. + KThread* owner_thread{}; { - KScopedAutoObject<KThread> owner_thread; - ASSERT(owner_thread.IsNull()); - { - KScopedSchedulerLock sl(kernel); - cur_thread->SetSyncedObject(nullptr, ResultSuccess); + KScopedSchedulerLock sl(kernel); - // Check if the thread should terminate. - R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested); + // Check if the thread should terminate. + R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested); - { - // Read the tag from userspace. - u32 test_tag{}; - R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), - ResultInvalidCurrentMemory); - - // If the tag isn't the handle (with wait mask), we're done. - R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), ResultSuccess); - - // Get the lock owner thread. - owner_thread = - kernel.CurrentProcess()->GetHandleTable().GetObjectWithoutPseudoHandle<KThread>( - handle); - R_UNLESS(owner_thread.IsNotNull(), ResultInvalidHandle); - - // Update the lock. - cur_thread->SetAddressKey(addr, value); - owner_thread->AddWaiter(cur_thread); - cur_thread->SetState(ThreadState::Waiting); - cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); - cur_thread->SetMutexWaitAddressForDebugging(addr); - } - } - ASSERT(owner_thread.IsNotNull()); - } + // Read the tag from userspace. + u32 test_tag{}; + R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), ResultInvalidCurrentMemory); - // Remove the thread as a waiter from the lock owner. - { - KScopedSchedulerLock sl(kernel); - KThread* owner_thread = cur_thread->GetLockOwner(); - if (owner_thread != nullptr) { - owner_thread->RemoveWaiter(cur_thread); - } + // If the tag isn't the handle (with wait mask), we're done. + R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask)); + + // Get the lock owner thread. + owner_thread = kernel.CurrentProcess() + ->GetHandleTable() + .GetObjectWithoutPseudoHandle<KThread>(handle) + .ReleasePointerUnsafe(); + R_UNLESS(owner_thread != nullptr, ResultInvalidHandle); + + // Update the lock. + cur_thread->SetAddressKey(addr, value); + owner_thread->AddWaiter(cur_thread); + + // Begin waiting. + cur_thread->BeginWait(std::addressof(wait_queue)); + cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); + cur_thread->SetMutexWaitAddressForDebugging(addr); } + // Close our reference to the owner thread, now that the wait is over. + owner_thread->Close(); + // Get the wait result. - KSynchronizationObject* dummy{}; - return cur_thread->GetWaitResult(std::addressof(dummy)); + return cur_thread->GetWaitResult(); } -KThread* KConditionVariable::SignalImpl(KThread* thread) { +void KConditionVariable::SignalImpl(KThread* thread) { // Check pre-conditions. ASSERT(kernel.GlobalSchedulerContext().IsLocked()); @@ -169,18 +205,16 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) { // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. // TODO(bunnei): We should call CanAccessAtomic(..) here. can_access = true; - if (can_access) { + if (can_access) [[likely]] { UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag, Svc::HandleWaitMask); } } - KThread* thread_to_close = nullptr; - if (can_access) { + if (can_access) [[likely]] { if (prev_tag == Svc::InvalidHandle) { // If nobody held the lock previously, we're all good. - thread->SetSyncedObject(nullptr, ResultSuccess); - thread->Wakeup(); + thread->EndWait(ResultSuccess); } else { // Get the previous owner. KThread* owner_thread = kernel.CurrentProcess() @@ -189,33 +223,22 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) { static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask)) .ReleasePointerUnsafe(); - if (owner_thread) { + if (owner_thread) [[likely]] { // Add the thread as a waiter on the owner. owner_thread->AddWaiter(thread); - thread_to_close = owner_thread; + owner_thread->Close(); } else { // The lock was tagged with a thread that doesn't exist. - thread->SetSyncedObject(nullptr, ResultInvalidState); - thread->Wakeup(); + thread->EndWait(ResultInvalidState); } } } else { // If the address wasn't accessible, note so. - thread->SetSyncedObject(nullptr, ResultInvalidCurrentMemory); - thread->Wakeup(); + thread->EndWait(ResultInvalidCurrentMemory); } - - return thread_to_close; } void KConditionVariable::Signal(u64 cv_key, s32 count) { - // Prepare for signaling. - constexpr int MaxThreads = 16; - - KLinkedList<KThread> thread_list{kernel}; - std::array<KThread*, MaxThreads> thread_array; - s32 num_to_close{}; - // Perform signaling. s32 num_waiters{}; { @@ -226,14 +249,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) { (it->GetConditionVariableKey() == cv_key)) { KThread* target_thread = std::addressof(*it); - if (KThread* thread = SignalImpl(target_thread); thread != nullptr) { - if (num_to_close < MaxThreads) { - thread_array[num_to_close++] = thread; - } else { - thread_list.push_back(*thread); - } - } - + this->SignalImpl(target_thread); it = thread_tree.erase(it); target_thread->ClearConditionVariable(); ++num_waiters; @@ -245,27 +261,16 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) { WriteToUser(system, cv_key, std::addressof(has_waiter_flag)); } } - - // Close threads in the array. - for (auto i = 0; i < num_to_close; ++i) { - thread_array[i]->Close(); - } - - // Close threads in the list. - for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) { - (*it).Close(); - } } ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { // Prepare to wait. - KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); + KThread* cur_thread = GetCurrentThreadPointer(kernel); + ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue( + kernel, std::addressof(thread_tree)); { - KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; - - // Set the synced object. - cur_thread->SetSyncedObject(nullptr, ResultTimedOut); + KScopedSchedulerLockAndSleep slp(kernel, cur_thread, timeout); // Check that the thread isn't terminating. if (cur_thread->IsTerminationRequested()) { @@ -290,8 +295,7 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) } // Wake up the next owner. - next_owner_thread->SetSyncedObject(nullptr, ResultSuccess); - next_owner_thread->Wakeup(); + next_owner_thread->EndWait(ResultSuccess); } // Write to the cv key. @@ -308,40 +312,21 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) } } - // Update condition variable tracking. - { - cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value); - thread_tree.insert(*cur_thread); - } + // If timeout is zero, time out. + R_UNLESS(timeout != 0, ResultTimedOut); - // If the timeout is non-zero, set the thread as waiting. - if (timeout != 0) { - cur_thread->SetState(ThreadState::Waiting); - cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); - cur_thread->SetMutexWaitAddressForDebugging(addr); - } - } - - // Cancel the timer wait. - kernel.TimeManager().UnscheduleTimeEvent(cur_thread); - - // Remove from the condition variable. - { - KScopedSchedulerLock sl(kernel); - - if (KThread* owner = cur_thread->GetLockOwner(); owner != nullptr) { - owner->RemoveWaiter(cur_thread); - } + // Update condition variable tracking. + cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value); + thread_tree.insert(*cur_thread); - if (cur_thread->IsWaitingForConditionVariable()) { - thread_tree.erase(thread_tree.iterator_to(*cur_thread)); - cur_thread->ClearConditionVariable(); - } + // Begin waiting. + cur_thread->BeginWait(std::addressof(wait_queue)); + cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar); + cur_thread->SetMutexWaitAddressForDebugging(addr); } - // Get the result. - KSynchronizationObject* dummy{}; - return cur_thread->GetWaitResult(std::addressof(dummy)); + // Get the wait result. + return cur_thread->GetWaitResult(); } } // namespace Kernel diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h index 861dbd420..5e4815d08 100644 --- a/src/core/hle/kernel/k_condition_variable.h +++ b/src/core/hle/kernel/k_condition_variable.h @@ -34,7 +34,7 @@ public: [[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout); private: - [[nodiscard]] KThread* SignalImpl(KThread* thread); + void SignalImpl(KThread* thread); ThreadTree thread_tree; diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp index e90fc0628..cf95f0852 100644 --- a/src/core/hle/kernel/k_handle_table.cpp +++ b/src/core/hle/kernel/k_handle_table.cpp @@ -13,6 +13,7 @@ ResultCode KHandleTable::Finalize() { // Get the table and clear our record of it. u16 saved_table_size = 0; { + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); std::swap(m_table_size, saved_table_size); @@ -43,6 +44,7 @@ bool KHandleTable::Remove(Handle handle) { // Find the object and free the entry. KAutoObject* obj = nullptr; { + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); if (this->IsValidHandle(handle)) { @@ -62,6 +64,7 @@ bool KHandleTable::Remove(Handle handle) { } ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); // Never exceed our capacity. @@ -84,6 +87,7 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { } ResultCode KHandleTable::Reserve(Handle* out_handle) { + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); // Never exceed our capacity. @@ -94,6 +98,7 @@ ResultCode KHandleTable::Reserve(Handle* out_handle) { } void KHandleTable::Unreserve(Handle handle) { + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); // Unpack the handle. @@ -112,6 +117,7 @@ void KHandleTable::Unreserve(Handle handle) { } void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) { + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); // Unpack the handle. diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h index 95ec905ae..4b114ec2f 100644 --- a/src/core/hle/kernel/k_handle_table.h +++ b/src/core/hle/kernel/k_handle_table.h @@ -68,6 +68,7 @@ public: template <typename T = KAutoObject> KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { // Lock and look up in table. + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); if constexpr (std::is_same_v<T, KAutoObject>) { @@ -122,6 +123,7 @@ public: size_t num_opened; { // Lock the table. + KScopedDisableDispatch dd(kernel); KScopedSpinLock lk(m_lock); for (num_opened = 0; num_opened < num_handles; num_opened++) { // Get the current handle. diff --git a/src/core/hle/kernel/k_light_condition_variable.cpp b/src/core/hle/kernel/k_light_condition_variable.cpp new file mode 100644 index 000000000..a8001fffc --- /dev/null +++ b/src/core/hle/kernel/k_light_condition_variable.cpp @@ -0,0 +1,80 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/kernel/k_light_condition_variable.h" +#include "core/hle/kernel/k_scheduler.h" +#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" +#include "core/hle/kernel/k_thread_queue.h" +#include "core/hle/kernel/svc_results.h" + +namespace Kernel { + +namespace { + +class ThreadQueueImplForKLightConditionVariable final : public KThreadQueue { +public: + ThreadQueueImplForKLightConditionVariable(KernelCore& kernel_, KThread::WaiterList* wl, + bool term) + : KThreadQueue(kernel_), m_wait_list(wl), m_allow_terminating_thread(term) {} + + void CancelWait(KThread* waiting_thread, ResultCode wait_result, + bool cancel_timer_task) override { + // Only process waits if we're allowed to. + if (ResultTerminationRequested == wait_result && m_allow_terminating_thread) { + return; + } + + // Remove the thread from the waiting thread from the light condition variable. + m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread)); + + // Invoke the base cancel wait handler. + KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task); + } + +private: + KThread::WaiterList* m_wait_list; + bool m_allow_terminating_thread; +}; + +} // namespace + +void KLightConditionVariable::Wait(KLightLock* lock, s64 timeout, bool allow_terminating_thread) { + // Create thread queue. + KThread* owner = GetCurrentThreadPointer(kernel); + + ThreadQueueImplForKLightConditionVariable wait_queue(kernel, std::addressof(wait_list), + allow_terminating_thread); + + // Sleep the thread. + { + KScopedSchedulerLockAndSleep lk(kernel, owner, timeout); + + if (!allow_terminating_thread && owner->IsTerminationRequested()) { + lk.CancelSleep(); + return; + } + + lock->Unlock(); + + // Add the thread to the queue. + wait_list.push_back(*owner); + + // Begin waiting. + owner->BeginWait(std::addressof(wait_queue)); + } + + // Re-acquire the lock. + lock->Lock(); +} + +void KLightConditionVariable::Broadcast() { + KScopedSchedulerLock lk(kernel); + + // Signal all threads. + for (auto it = wait_list.begin(); it != wait_list.end(); it = wait_list.erase(it)) { + it->EndWait(ResultSuccess); + } +} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_light_condition_variable.h b/src/core/hle/kernel/k_light_condition_variable.h index fb0ad783a..5d6d7f128 100644 --- a/src/core/hle/kernel/k_light_condition_variable.h +++ b/src/core/hle/kernel/k_light_condition_variable.h @@ -2,72 +2,24 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. -// This file references various implementation details from Atmosphere, an open-source firmware for -// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. - #pragma once #include "common/common_types.h" -#include "core/hle/kernel/k_scheduler.h" -#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" -#include "core/hle/kernel/time_manager.h" +#include "core/hle/kernel/k_thread.h" namespace Kernel { + class KernelCore; +class KLightLock; class KLightConditionVariable { public: explicit KLightConditionVariable(KernelCore& kernel_) : kernel{kernel_} {} - void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true) { - WaitImpl(lock, timeout, allow_terminating_thread); - } - - void Broadcast() { - KScopedSchedulerLock lk{kernel}; - - // Signal all threads. - for (auto& thread : wait_list) { - thread.SetState(ThreadState::Runnable); - } - } + void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true); + void Broadcast(); private: - void WaitImpl(KLightLock* lock, s64 timeout, bool allow_terminating_thread) { - KThread* owner = GetCurrentThreadPointer(kernel); - - // Sleep the thread. - { - KScopedSchedulerLockAndSleep lk{kernel, owner, timeout}; - - if (!allow_terminating_thread && owner->IsTerminationRequested()) { - lk.CancelSleep(); - return; - } - - lock->Unlock(); - - // Set the thread as waiting. - GetCurrentThread(kernel).SetState(ThreadState::Waiting); - - // Add the thread to the queue. - wait_list.push_back(GetCurrentThread(kernel)); - } - - // Remove the thread from the wait list. - { - KScopedSchedulerLock sl{kernel}; - - wait_list.erase(wait_list.iterator_to(GetCurrentThread(kernel))); - } - - // Cancel the task that the sleep setup. - kernel.TimeManager().UnscheduleTimeEvent(owner); - - // Re-acquire the lock. - lock->Lock(); - } - KernelCore& kernel; KThread::WaiterList wait_list{}; }; diff --git a/src/core/hle/kernel/k_light_lock.cpp b/src/core/hle/kernel/k_light_lock.cpp index 0896e705f..4620342eb 100644 --- a/src/core/hle/kernel/k_light_lock.cpp +++ b/src/core/hle/kernel/k_light_lock.cpp @@ -5,44 +5,59 @@ #include "core/hle/kernel/k_light_lock.h" #include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_thread.h" +#include "core/hle/kernel/k_thread_queue.h" #include "core/hle/kernel/kernel.h" namespace Kernel { +namespace { + +class ThreadQueueImplForKLightLock final : public KThreadQueue { +public: + explicit ThreadQueueImplForKLightLock(KernelCore& kernel_) : KThreadQueue(kernel_) {} + + void CancelWait(KThread* waiting_thread, ResultCode wait_result, + bool cancel_timer_task) override { + // Remove the thread as a waiter from its owner. + if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) { + owner->RemoveWaiter(waiting_thread); + } + + // Invoke the base cancel wait handler. + KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task); + } +}; + +} // namespace + void KLightLock::Lock() { const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)); - const uintptr_t cur_thread_tag = (cur_thread | 1); while (true) { uintptr_t old_tag = tag.load(std::memory_order_relaxed); - while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1, + while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : (old_tag | 1), std::memory_order_acquire)) { - if ((old_tag | 1) == cur_thread_tag) { - return; - } } - if ((old_tag == 0) || ((old_tag | 1) == cur_thread_tag)) { + if (old_tag == 0 || this->LockSlowPath(old_tag | 1, cur_thread)) { break; } - - LockSlowPath(old_tag | 1, cur_thread); } } void KLightLock::Unlock() { const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel)); + uintptr_t expected = cur_thread; - do { - if (expected != cur_thread) { - return UnlockSlowPath(cur_thread); - } - } while (!tag.compare_exchange_weak(expected, 0, std::memory_order_release)); + if (!tag.compare_exchange_strong(expected, 0, std::memory_order_release)) { + this->UnlockSlowPath(cur_thread); + } } -void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) { +bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) { KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread); + ThreadQueueImplForKLightLock wait_queue(kernel); // Pend the current thread waiting on the owner thread. { @@ -50,7 +65,7 @@ void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) { // Ensure we actually have locking to do. if (tag.load(std::memory_order_relaxed) != _owner) { - return; + return false; } // Add the current thread as a waiter on the owner. @@ -58,22 +73,15 @@ void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) { cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag))); owner_thread->AddWaiter(cur_thread); - // Set thread states. - cur_thread->SetState(ThreadState::Waiting); + // Begin waiting to hold the lock. + cur_thread->BeginWait(std::addressof(wait_queue)); if (owner_thread->IsSuspended()) { owner_thread->ContinueIfHasKernelWaiters(); } } - // We're no longer waiting on the lock owner. - { - KScopedSchedulerLock sl{kernel}; - - if (KThread* owner_thread = cur_thread->GetLockOwner(); owner_thread != nullptr) { - owner_thread->RemoveWaiter(cur_thread); - } - } + return true; } void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) { @@ -81,22 +89,20 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) { // Unlock. { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl(kernel); // Get the next owner. - s32 num_waiters = 0; + s32 num_waiters; KThread* next_owner = owner_thread->RemoveWaiterByKey( std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag))); // Pass the lock to the next owner. uintptr_t next_tag = 0; if (next_owner != nullptr) { - next_tag = reinterpret_cast<uintptr_t>(next_owner); - if (num_waiters > 1) { - next_tag |= 0x1; - } + next_tag = + reinterpret_cast<uintptr_t>(next_owner) | static_cast<uintptr_t>(num_waiters > 1); - next_owner->SetState(ThreadState::Runnable); + next_owner->EndWait(ResultSuccess); if (next_owner->IsSuspended()) { next_owner->ContinueIfHasKernelWaiters(); @@ -110,7 +116,7 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) { } // Write the new tag value. - tag.store(next_tag); + tag.store(next_tag, std::memory_order_release); } } diff --git a/src/core/hle/kernel/k_light_lock.h b/src/core/hle/kernel/k_light_lock.h index ad853661d..4163b8a85 100644 --- a/src/core/hle/kernel/k_light_lock.h +++ b/src/core/hle/kernel/k_light_lock.h @@ -20,7 +20,7 @@ public: void Unlock(); - void LockSlowPath(uintptr_t owner, uintptr_t cur_thread); + bool LockSlowPath(uintptr_t owner, uintptr_t cur_thread); void UnlockSlowPath(uintptr_t cur_thread); diff --git a/src/core/hle/kernel/k_memory_block.h b/src/core/hle/kernel/k_memory_block.h index a7fdb5fb8..fd491146f 100644 --- a/src/core/hle/kernel/k_memory_block.h +++ b/src/core/hle/kernel/k_memory_block.h @@ -131,6 +131,26 @@ enum class KMemoryPermission : u8 { UserMask = static_cast<u8>(Svc::MemoryPermission::Read | Svc::MemoryPermission::Write | Svc::MemoryPermission::Execute), + + KernelShift = 3, + + KernelRead = Read << KernelShift, + KernelWrite = Write << KernelShift, + KernelExecute = Execute << KernelShift, + + NotMapped = (1 << (2 * KernelShift)), + + KernelReadWrite = KernelRead | KernelWrite, + KernelReadExecute = KernelRead | KernelExecute, + + UserRead = Read | KernelRead, + UserWrite = Write | KernelWrite, + UserExecute = Execute, + + UserReadWrite = UserRead | UserWrite, + UserReadExecute = UserRead | UserExecute, + + IpcLockChangeMask = NotMapped | UserReadWrite }; DECLARE_ENUM_FLAG_OPERATORS(KMemoryPermission); diff --git a/src/core/hle/kernel/k_page_linked_list.h b/src/core/hle/kernel/k_page_linked_list.h index 3362fb236..0e2ae582a 100644 --- a/src/core/hle/kernel/k_page_linked_list.h +++ b/src/core/hle/kernel/k_page_linked_list.h @@ -27,6 +27,10 @@ public: return num_pages; } + constexpr std::size_t GetSize() const { + return GetNumPages() * PageSize; + } + private: u64 addr{}; std::size_t num_pages{}; diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp index 9bda5c5b2..99982e5a3 100644 --- a/src/core/hle/kernel/k_page_table.cpp +++ b/src/core/hle/kernel/k_page_table.cpp @@ -368,6 +368,33 @@ ResultCode KPageTable::UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, st return ResultSuccess; } +ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, + KPageTable& src_page_table, VAddr src_addr) { + std::lock_guard lock{page_table_lock}; + + const std::size_t num_pages{size / PageSize}; + + // Check that the memory is mapped in the destination process. + size_t num_allocator_blocks; + R_TRY(CheckMemoryState(&num_allocator_blocks, dst_addr, size, KMemoryState::All, + KMemoryState::SharedCode, KMemoryPermission::UserReadWrite, + KMemoryPermission::UserReadWrite, KMemoryAttribute::All, + KMemoryAttribute::None)); + + // Check that the memory is mapped in the source process. + R_TRY(src_page_table.CheckMemoryState(src_addr, size, KMemoryState::FlagCanMapProcess, + KMemoryState::FlagCanMapProcess, KMemoryPermission::None, + KMemoryPermission::None, KMemoryAttribute::All, + KMemoryAttribute::None)); + + CASCADE_CODE(Operate(dst_addr, num_pages, KMemoryPermission::None, OperationType::Unmap)); + + // Apply the memory block update. + block_manager->Update(dst_addr, num_pages, KMemoryState::Free, KMemoryPermission::None, + KMemoryAttribute::None); + + return ResultSuccess; +} void KPageTable::MapPhysicalMemory(KPageLinkedList& page_linked_list, VAddr start, VAddr end) { auto node{page_linked_list.Nodes().begin()}; PAddr map_addr{node->GetAddress()}; @@ -942,6 +969,60 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) return ResultSuccess; } +ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + KMemoryPermission new_perm = KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite; + + KMemoryPermission old_perm{}; + + if (const ResultCode result{CheckMemoryState( + nullptr, &old_perm, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, + KMemoryState::FlagCanCodeMemory, KMemoryPermission::Mask, + KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None)}; + result.IsError()) { + return result; + } + + new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; + + block_manager->UpdateLock( + addr, size / PageSize, + [](KMemoryBlockManager::iterator block, KMemoryPermission permission) { + block->ShareToDevice(permission); + }, + new_perm); + + return ResultSuccess; +} + +ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) { + std::lock_guard lock{page_table_lock}; + + KMemoryPermission new_perm = KMemoryPermission::UserReadWrite; + + KMemoryPermission old_perm{}; + + if (const ResultCode result{CheckMemoryState( + nullptr, &old_perm, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, + KMemoryState::FlagCanCodeMemory, KMemoryPermission::None, KMemoryPermission::None, + KMemoryAttribute::All, KMemoryAttribute::Locked)}; + result.IsError()) { + return result; + } + + new_perm = (new_perm != KMemoryPermission::None) ? new_perm : old_perm; + + block_manager->UpdateLock( + addr, size / PageSize, + [](KMemoryBlockManager::iterator block, KMemoryPermission permission) { + block->UnshareToDevice(permission); + }, + new_perm); + + return ResultSuccess; +} + ResultCode KPageTable::InitializeMemoryLayout(VAddr start, VAddr end) { block_manager = std::make_unique<KMemoryBlockManager>(start, end); @@ -1231,4 +1312,42 @@ ResultCode KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermissi return ResultSuccess; } +ResultCode KPageTable::CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size, + KMemoryState state_mask, KMemoryState state, + KMemoryPermission perm_mask, KMemoryPermission perm, + KMemoryAttribute attr_mask, KMemoryAttribute attr) const { + // Get information about the first block. + const VAddr last_addr = addr + size - 1; + KMemoryBlockManager::const_iterator it{block_manager->FindIterator(addr)}; + KMemoryInfo info = it->GetMemoryInfo(); + + // If the start address isn't aligned, we need a block. + const size_t blocks_for_start_align = + (Common::AlignDown(addr, PageSize) != info.GetAddress()) ? 1 : 0; + + while (true) { + // Validate against the provided masks. + R_TRY(CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr)); + + // Break once we're done. + if (last_addr <= info.GetLastAddress()) { + break; + } + + // Advance our iterator. + it++; + info = it->GetMemoryInfo(); + } + + // If the end address isn't aligned, we need a block. + const size_t blocks_for_end_align = + (Common::AlignUp(addr + size, PageSize) != info.GetEndAddress()) ? 1 : 0; + + if (out_blocks_needed != nullptr) { + *out_blocks_needed = blocks_for_start_align + blocks_for_end_align; + } + + return ResultSuccess; +} + } // namespace Kernel diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h index b7ec38f06..d784aa67e 100644 --- a/src/core/hle/kernel/k_page_table.h +++ b/src/core/hle/kernel/k_page_table.h @@ -33,6 +33,8 @@ public: KMemoryPermission perm); ResultCode MapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); ResultCode UnmapProcessCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size); + ResultCode UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table, + VAddr src_addr); ResultCode MapPhysicalMemory(VAddr addr, std::size_t size); ResultCode UnmapPhysicalMemory(VAddr addr, std::size_t size); ResultCode UnmapMemory(VAddr addr, std::size_t size); @@ -55,6 +57,8 @@ public: KMemoryPermission perm, PAddr map_addr = 0); ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size); ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size); + ResultCode LockForCodeMemory(VAddr addr, std::size_t size); + ResultCode UnlockForCodeMemory(VAddr addr, std::size_t size); Common::PageTable& PageTableImpl() { return page_table_impl; @@ -115,6 +119,10 @@ private: return CheckMemoryState(nullptr, nullptr, nullptr, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr); } + ResultCode CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size, + KMemoryState state_mask, KMemoryState state, + KMemoryPermission perm_mask, KMemoryPermission perm, + KMemoryAttribute attr_mask, KMemoryAttribute attr) const; std::recursive_mutex page_table_lock; std::unique_ptr<KMemoryBlockManager> block_manager; diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index 1aad061e1..aee313995 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp @@ -28,7 +28,6 @@ #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/svc_results.h" -#include "core/hle/lock.h" #include "core/memory.h" namespace Kernel { @@ -60,6 +59,7 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority thread->GetContext64().cpu_registers[0] = 0; thread->GetContext32().cpu_registers[1] = thread_handle; thread->GetContext64().cpu_registers[1] = thread_handle; + thread->DisableDispatch(); auto& kernel = system.Kernel(); // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires @@ -227,12 +227,15 @@ void KProcess::PinCurrentThread() { const s32 core_id = GetCurrentCoreId(kernel); KThread* cur_thread = GetCurrentThreadPointer(kernel); - // Pin it. - PinThread(core_id, cur_thread); - cur_thread->Pin(); + // If the thread isn't terminated, pin it. + if (!cur_thread->IsTerminationRequested()) { + // Pin it. + PinThread(core_id, cur_thread); + cur_thread->Pin(); - // An update is needed. - KScheduler::SetSchedulerUpdateNeeded(kernel); + // An update is needed. + KScheduler::SetSchedulerUpdateNeeded(kernel); + } } void KProcess::UnpinCurrentThread() { @@ -250,6 +253,20 @@ void KProcess::UnpinCurrentThread() { KScheduler::SetSchedulerUpdateNeeded(kernel); } +void KProcess::UnpinThread(KThread* thread) { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + + // Get the thread's core id. + const auto core_id = thread->GetActiveCore(); + + // Unpin it. + UnpinThread(core_id, thread); + thread->Unpin(); + + // An update is needed. + KScheduler::SetSchedulerUpdateNeeded(kernel); +} + ResultCode KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address, [[maybe_unused]] size_t size) { // Lock ourselves, to prevent concurrent access. @@ -525,7 +542,6 @@ void KProcess::FreeTLSRegion(VAddr tls_address) { } void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) { - std::lock_guard lock{HLE::g_hle_lock}; const auto ReprotectSegment = [&](const CodeSet::Segment& segment, KMemoryPermission permission) { page_table->SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission); diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h index 8a8c1fcbb..cb93c7e24 100644 --- a/src/core/hle/kernel/k_process.h +++ b/src/core/hle/kernel/k_process.h @@ -347,6 +347,7 @@ public: void PinCurrentThread(); void UnpinCurrentThread(); + void UnpinThread(KThread* thread); KLightLock& GetStateLock() { return state_lock; diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 6a7d80d03..277201de4 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp @@ -240,8 +240,8 @@ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s3 // If the thread is runnable, we want to change its priority in the queue. if (thread->GetRawState() == ThreadState::Runnable) { - GetPriorityQueue(kernel).ChangePriority( - old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread); + GetPriorityQueue(kernel).ChangePriority(old_priority, + thread == kernel.GetCurrentEmuThread(), thread); IncrementScheduledCount(thread); SetSchedulerUpdateNeeded(kernel); } @@ -360,7 +360,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) { } bool KScheduler::CanSchedule(KernelCore& kernel) { - return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1; + return kernel.GetCurrentEmuThread()->GetDisableDispatchCount() <= 1; } bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) { @@ -376,20 +376,30 @@ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) { } void KScheduler::DisableScheduling(KernelCore& kernel) { - if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { - ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0); - scheduler->GetCurrentThread()->DisableDispatch(); + // If we are shutting down the kernel, none of this is relevant anymore. + if (kernel.IsShuttingDown()) { + return; } + + ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0); + GetCurrentThreadPointer(kernel)->DisableDispatch(); } void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { - if (auto* scheduler = kernel.CurrentScheduler(); scheduler) { - ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1); - if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) { - scheduler->GetCurrentThread()->EnableDispatch(); - } + // If we are shutting down the kernel, none of this is relevant anymore. + if (kernel.IsShuttingDown()) { + return; + } + + auto* current_thread = GetCurrentThreadPointer(kernel); + + ASSERT(current_thread->GetDisableDispatchCount() >= 1); + + if (current_thread->GetDisableDispatchCount() > 1) { + current_thread->EnableDispatch(); + } else { + RescheduleCores(kernel, cores_needing_scheduling); } - RescheduleCores(kernel, cores_needing_scheduling); } u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { @@ -617,13 +627,17 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c state.highest_priority_thread = nullptr; } -KScheduler::~KScheduler() { +void KScheduler::Finalize() { if (idle_thread) { idle_thread->Close(); idle_thread = nullptr; } } +KScheduler::~KScheduler() { + ASSERT(!idle_thread); +} + KThread* KScheduler::GetCurrentThread() const { if (auto result = current_thread.load(); result) { return result; @@ -642,10 +656,12 @@ void KScheduler::RescheduleCurrentCore() { if (phys_core.IsInterrupted()) { phys_core.ClearInterrupt(); } + guard.Lock(); if (state.needs_scheduling.load()) { Schedule(); } else { + GetCurrentThread()->EnableDispatch(); guard.Unlock(); } } @@ -655,26 +671,33 @@ void KScheduler::OnThreadStart() { } void KScheduler::Unload(KThread* thread) { + ASSERT(thread); + LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); - if (thread) { - if (thread->IsCallingSvc()) { - thread->ClearIsCallingSvc(); - } - if (!thread->IsTerminationRequested()) { - prev_thread = thread; - - Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); - cpu_core.SaveContext(thread->GetContext32()); - cpu_core.SaveContext(thread->GetContext64()); - // Save the TPIDR_EL0 system register in case it was modified. - thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); - cpu_core.ClearExclusiveState(); - } else { - prev_thread = nullptr; - } - thread->context_guard.Unlock(); + if (thread->IsCallingSvc()) { + thread->ClearIsCallingSvc(); + } + + auto& physical_core = system.Kernel().PhysicalCore(core_id); + if (!physical_core.IsInitialized()) { + return; + } + + Core::ARM_Interface& cpu_core = physical_core.ArmInterface(); + cpu_core.SaveContext(thread->GetContext32()); + cpu_core.SaveContext(thread->GetContext64()); + // Save the TPIDR_EL0 system register in case it was modified. + thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); + cpu_core.ClearExclusiveState(); + + if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) { + prev_thread = thread; + } else { + prev_thread = nullptr; } + + thread->context_guard.Unlock(); } void KScheduler::Reload(KThread* thread) { @@ -683,11 +706,6 @@ void KScheduler::Reload(KThread* thread) { if (thread) { ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); - auto* const thread_owner_process = thread->GetOwnerProcess(); - if (thread_owner_process != nullptr) { - system.Kernel().MakeCurrentProcess(thread_owner_process); - } - Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); cpu_core.LoadContext(thread->GetContext32()); cpu_core.LoadContext(thread->GetContext64()); @@ -705,7 +723,7 @@ void KScheduler::SwitchContextStep2() { } void KScheduler::ScheduleImpl() { - KThread* previous_thread = current_thread.load(); + KThread* previous_thread = GetCurrentThread(); KThread* next_thread = state.highest_priority_thread; state.needs_scheduling = false; @@ -717,10 +735,15 @@ void KScheduler::ScheduleImpl() { // If we're not actually switching thread, there's nothing to do. if (next_thread == current_thread.load()) { + previous_thread->EnableDispatch(); guard.Unlock(); return; } + if (next_thread->GetCurrentCore() != core_id) { + next_thread->SetCurrentCore(core_id); + } + current_thread.store(next_thread); KProcess* const previous_process = system.Kernel().CurrentProcess(); @@ -731,11 +754,7 @@ void KScheduler::ScheduleImpl() { Unload(previous_thread); std::shared_ptr<Common::Fiber>* old_context; - if (previous_thread != nullptr) { - old_context = &previous_thread->GetHostContext(); - } else { - old_context = &idle_thread->GetHostContext(); - } + old_context = &previous_thread->GetHostContext(); guard.Unlock(); Common::Fiber::YieldTo(*old_context, *switch_fiber); diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 7df288438..82fcd99e7 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h @@ -33,6 +33,8 @@ public: explicit KScheduler(Core::System& system_, s32 core_id_); ~KScheduler(); + void Finalize(); + /// Reschedules to the next available thread (call after current thread is suspended) void RescheduleCurrentCore(); diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h index c571f2992..93c47f1b1 100644 --- a/src/core/hle/kernel/k_scheduler_lock.h +++ b/src/core/hle/kernel/k_scheduler_lock.h @@ -23,6 +23,11 @@ public: } void Lock() { + // If we are shutting down the kernel, none of this is relevant anymore. + if (kernel.IsShuttingDown()) { + return; + } + if (IsLockedByCurrentThread()) { // If we already own the lock, we can just increment the count. ASSERT(lock_count > 0); @@ -43,6 +48,11 @@ public: } void Unlock() { + // If we are shutting down the kernel, none of this is relevant anymore. + if (kernel.IsShuttingDown()) { + return; + } + ASSERT(IsLockedByCurrentThread()); ASSERT(lock_count > 0); diff --git a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h index 61dc2858f..2995c492d 100644 --- a/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h +++ b/src/core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h @@ -8,6 +8,7 @@ #pragma once #include "common/common_types.h" +#include "core/hle/kernel/global_scheduler_context.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/time_manager.h" diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp index 2bd53ccbd..d4e4a6b06 100644 --- a/src/core/hle/kernel/k_server_session.cpp +++ b/src/core/hle/kernel/k_server_session.cpp @@ -175,8 +175,7 @@ ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) { { KScopedSchedulerLock lock(kernel); if (!context.IsThreadWaiting()) { - context.GetThread().Wakeup(); - context.GetThread().SetSyncedObject(nullptr, result); + context.GetThread().EndWait(result); } } diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp index f168b4f21..e4c5eb74f 100644 --- a/src/core/hle/kernel/k_synchronization_object.cpp +++ b/src/core/hle/kernel/k_synchronization_object.cpp @@ -8,11 +8,66 @@ #include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_thread.h" +#include "core/hle/kernel/k_thread_queue.h" #include "core/hle/kernel/kernel.h" #include "core/hle/kernel/svc_results.h" namespace Kernel { +namespace { + +class ThreadQueueImplForKSynchronizationObjectWait final : public KThreadQueueWithoutEndWait { +public: + ThreadQueueImplForKSynchronizationObjectWait(KernelCore& kernel_, KSynchronizationObject** o, + KSynchronizationObject::ThreadListNode* n, s32 c) + : KThreadQueueWithoutEndWait(kernel_), m_objects(o), m_nodes(n), m_count(c) {} + + void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object, + ResultCode wait_result) override { + // Determine the sync index, and unlink all nodes. + s32 sync_index = -1; + for (auto i = 0; i < m_count; ++i) { + // Check if this is the signaled object. + if (m_objects[i] == signaled_object && sync_index == -1) { + sync_index = i; + } + + // Unlink the current node from the current object. + m_objects[i]->UnlinkNode(std::addressof(m_nodes[i])); + } + + // Set the waiting thread's sync index. + waiting_thread->SetSyncedIndex(sync_index); + + // Set the waiting thread as not cancellable. + waiting_thread->ClearCancellable(); + + // Invoke the base end wait handler. + KThreadQueue::EndWait(waiting_thread, wait_result); + } + + void CancelWait(KThread* waiting_thread, ResultCode wait_result, + bool cancel_timer_task) override { + // Remove all nodes from our list. + for (auto i = 0; i < m_count; ++i) { + m_objects[i]->UnlinkNode(std::addressof(m_nodes[i])); + } + + // Set the waiting thread as not cancellable. + waiting_thread->ClearCancellable(); + + // Invoke the base cancel wait handler. + KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task); + } + +private: + KSynchronizationObject** m_objects; + KSynchronizationObject::ThreadListNode* m_nodes; + s32 m_count; +}; + +} // namespace + void KSynchronizationObject::Finalize() { this->OnFinalizeSynchronizationObject(); KAutoObject::Finalize(); @@ -25,11 +80,19 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index, std::vector<ThreadListNode> thread_nodes(num_objects); // Prepare for wait. - KThread* thread = kernel_ctx.CurrentScheduler()->GetCurrentThread(); + KThread* thread = GetCurrentThreadPointer(kernel_ctx); + ThreadQueueImplForKSynchronizationObjectWait wait_queue(kernel_ctx, objects, + thread_nodes.data(), num_objects); { // Setup the scheduling lock and sleep. - KScopedSchedulerLockAndSleep slp{kernel_ctx, thread, timeout}; + KScopedSchedulerLockAndSleep slp(kernel_ctx, thread, timeout); + + // Check if the thread should terminate. + if (thread->IsTerminationRequested()) { + slp.CancelSleep(); + return ResultTerminationRequested; + } // Check if any of the objects are already signaled. for (auto i = 0; i < num_objects; ++i) { @@ -48,12 +111,6 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index, return ResultTimedOut; } - // Check if the thread should terminate. - if (thread->IsTerminationRequested()) { - slp.CancelSleep(); - return ResultTerminationRequested; - } - // Check if waiting was canceled. if (thread->IsWaitCancelled()) { slp.CancelSleep(); @@ -66,73 +123,25 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index, thread_nodes[i].thread = thread; thread_nodes[i].next = nullptr; - if (objects[i]->thread_list_tail == nullptr) { - objects[i]->thread_list_head = std::addressof(thread_nodes[i]); - } else { - objects[i]->thread_list_tail->next = std::addressof(thread_nodes[i]); - } - - objects[i]->thread_list_tail = std::addressof(thread_nodes[i]); + objects[i]->LinkNode(std::addressof(thread_nodes[i])); } - // For debugging only - thread->SetWaitObjectsForDebugging({objects, static_cast<std::size_t>(num_objects)}); - - // Mark the thread as waiting. + // Mark the thread as cancellable. thread->SetCancellable(); - thread->SetSyncedObject(nullptr, ResultTimedOut); - thread->SetState(ThreadState::Waiting); - thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization); - } - // The lock/sleep is done, so we should be able to get our result. + // Clear the thread's synced index. + thread->SetSyncedIndex(-1); - // Thread is no longer cancellable. - thread->ClearCancellable(); - - // For debugging only - thread->SetWaitObjectsForDebugging({}); + // Wait for an object to be signaled. + thread->BeginWait(std::addressof(wait_queue)); + thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization); + } - // Cancel the timer as needed. - kernel_ctx.TimeManager().UnscheduleTimeEvent(thread); + // Set the output index. + *out_index = thread->GetSyncedIndex(); // Get the wait result. - ResultCode wait_result{ResultSuccess}; - s32 sync_index = -1; - { - KScopedSchedulerLock lock(kernel_ctx); - KSynchronizationObject* synced_obj; - wait_result = thread->GetWaitResult(std::addressof(synced_obj)); - - for (auto i = 0; i < num_objects; ++i) { - // Unlink the object from the list. - ThreadListNode* prev_ptr = - reinterpret_cast<ThreadListNode*>(std::addressof(objects[i]->thread_list_head)); - ThreadListNode* prev_val = nullptr; - ThreadListNode *prev, *tail_prev; - - do { - prev = prev_ptr; - prev_ptr = prev_ptr->next; - tail_prev = prev_val; - prev_val = prev_ptr; - } while (prev_ptr != std::addressof(thread_nodes[i])); - - if (objects[i]->thread_list_tail == std::addressof(thread_nodes[i])) { - objects[i]->thread_list_tail = tail_prev; - } - - prev->next = thread_nodes[i].next; - - if (objects[i] == synced_obj) { - sync_index = i; - } - } - } - - // Set output. - *out_index = sync_index; - return wait_result; + return thread->GetWaitResult(); } KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_) @@ -141,7 +150,7 @@ KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_) KSynchronizationObject::~KSynchronizationObject() = default; void KSynchronizationObject::NotifyAvailable(ResultCode result) { - KScopedSchedulerLock lock(kernel); + KScopedSchedulerLock sl(kernel); // If we're not signaled, we've nothing to notify. if (!this->IsSignaled()) { @@ -150,11 +159,7 @@ void KSynchronizationObject::NotifyAvailable(ResultCode result) { // Iterate over each thread. for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) { - KThread* thread = cur_node->thread; - if (thread->GetState() == ThreadState::Waiting) { - thread->SetSyncedObject(this, result); - thread->SetState(ThreadState::Runnable); - } + cur_node->thread->NotifyAvailable(this, result); } } diff --git a/src/core/hle/kernel/k_synchronization_object.h b/src/core/hle/kernel/k_synchronization_object.h index 898e58e16..ec235437b 100644 --- a/src/core/hle/kernel/k_synchronization_object.h +++ b/src/core/hle/kernel/k_synchronization_object.h @@ -35,6 +35,38 @@ public: [[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const; + void LinkNode(ThreadListNode* node_) { + // Link the node to the list. + if (thread_list_tail == nullptr) { + thread_list_head = node_; + } else { + thread_list_tail->next = node_; + } + + thread_list_tail = node_; + } + + void UnlinkNode(ThreadListNode* node_) { + // Unlink the node from the list. + ThreadListNode* prev_ptr = + reinterpret_cast<ThreadListNode*>(std::addressof(thread_list_head)); + ThreadListNode* prev_val = nullptr; + ThreadListNode *prev, *tail_prev; + + do { + prev = prev_ptr; + prev_ptr = prev_ptr->next; + tail_prev = prev_val; + prev_val = prev_ptr; + } while (prev_ptr != node_); + + if (thread_list_tail == node_) { + thread_list_tail = tail_prev; + } + + prev->next = node_->next; + } + protected: explicit KSynchronizationObject(KernelCore& kernel); ~KSynchronizationObject() override; diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index db65ce79a..752592e2e 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp @@ -13,6 +13,9 @@ #include "common/common_types.h" #include "common/fiber.h" #include "common/logging/log.h" +#include "common/scope_exit.h" +#include "common/settings.h" +#include "common/thread_queue_list.h" #include "core/core.h" #include "core/cpu_manager.h" #include "core/hardware_properties.h" @@ -56,6 +59,34 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context, namespace Kernel { +namespace { + +class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait { +public: + explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel_) + : KThreadQueueWithoutEndWait(kernel_) {} +}; + +class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue { +public: + explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel_, KThread::WaiterList* wl) + : KThreadQueue(kernel_), m_wait_list(wl) {} + + void CancelWait(KThread* waiting_thread, ResultCode wait_result, + bool cancel_timer_task) override { + // Remove the thread from the wait list. + m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread)); + + // Invoke the base cancel wait handler. + KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task); + } + +private: + KThread::WaiterList* m_wait_list; +}; + +} // namespace + KThread::KThread(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {} KThread::~KThread() = default; @@ -82,6 +113,8 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s [[fallthrough]]; case ThreadType::HighPriority: [[fallthrough]]; + case ThreadType::Dummy: + [[fallthrough]]; case ThreadType::User: ASSERT(((owner == nullptr) || (owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask())); @@ -127,11 +160,8 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s priority = prio; base_priority = prio; - // Set sync object and waiting lock to null. - synced_object = nullptr; - // Initialize sleeping queue. - sleeping_queue = nullptr; + wait_queue = nullptr; // Set suspend flags. suspend_request_flags = 0; @@ -184,7 +214,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s // Setup the stack parameters. StackParameters& sp = GetStackParameters(); sp.cur_thread = this; - sp.disable_count = 1; + sp.disable_count = 0; SetInExceptionHandler(); // Set thread ID. @@ -211,15 +241,16 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint // Initialize the thread. R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); - // Initialize host context. + // Initialize emulation parameters. thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter); + thread->is_single_core = !Settings::values.use_multi_core.GetValue(); return ResultSuccess; } ResultCode KThread::InitializeDummyThread(KThread* thread) { - return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Main); + return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Dummy); } ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { @@ -273,11 +304,14 @@ void KThread::Finalize() { auto it = waiter_list.begin(); while (it != waiter_list.end()) { - // The thread shouldn't be a kernel waiter. + // Clear the lock owner it->SetLockOwner(nullptr); - it->SetSyncedObject(nullptr, ResultInvalidState); - it->Wakeup(); + + // Erase the waiter from our list. it = waiter_list.erase(it); + + // Cancel the thread's wait. + it->CancelWait(ResultInvalidState, true); } } @@ -294,15 +328,12 @@ bool KThread::IsSignaled() const { return signaled; } -void KThread::Wakeup() { - KScopedSchedulerLock sl{kernel}; +void KThread::OnTimer() { + ASSERT(kernel.GlobalSchedulerContext().IsLocked()); + // If we're waiting, cancel the wait. if (GetState() == ThreadState::Waiting) { - if (sleeping_queue != nullptr) { - sleeping_queue->WakeupThread(this); - } else { - SetState(ThreadState::Runnable); - } + wait_queue->CancelWait(this, ResultTimedOut, false); } } @@ -327,7 +358,7 @@ void KThread::StartTermination() { // Signal. signaled = true; - NotifyAvailable(); + KSynchronizationObject::NotifyAvailable(); // Clear previous thread in KScheduler. KScheduler::ClearPreviousThread(kernel, this); @@ -475,30 +506,32 @@ ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_m return ResultSuccess; } -ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) { +ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) { ASSERT(parent != nullptr); ASSERT(v_affinity_mask != 0); - KScopedLightLock lk{activity_pause_lock}; + KScopedLightLock lk(activity_pause_lock); // Set the core mask. u64 p_affinity_mask = 0; { - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl(kernel); ASSERT(num_core_migration_disables >= 0); - // If the core id is no-update magic, preserve the ideal core id. - if (cpu_core_id == Svc::IdealCoreNoUpdate) { - cpu_core_id = virtual_ideal_core_id; - R_UNLESS(((1ULL << cpu_core_id) & v_affinity_mask) != 0, ResultInvalidCombination); + // If we're updating, set our ideal virtual core. + if (core_id_ != Svc::IdealCoreNoUpdate) { + virtual_ideal_core_id = core_id_; + } else { + // Preserve our ideal core id. + core_id_ = virtual_ideal_core_id; + R_UNLESS(((1ULL << core_id_) & v_affinity_mask) != 0, ResultInvalidCombination); } - // Set the virtual core/affinity mask. - virtual_ideal_core_id = cpu_core_id; + // Set our affinity mask. virtual_affinity_mask = v_affinity_mask; // Translate the virtual core to a physical core. - if (cpu_core_id >= 0) { - cpu_core_id = Core::Hardware::VirtualToPhysicalCoreMap[cpu_core_id]; + if (core_id_ >= 0) { + core_id_ = Core::Hardware::VirtualToPhysicalCoreMap[core_id_]; } // Translate the virtual affinity mask to a physical one. @@ -513,7 +546,7 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) { const KAffinityMask old_mask = physical_affinity_mask; // Set our new ideals. - physical_ideal_core_id = cpu_core_id; + physical_ideal_core_id = core_id_; physical_affinity_mask.SetAffinityMask(p_affinity_mask); if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { @@ -531,18 +564,18 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) { } } else { // Otherwise, we edit the original affinity for restoration later. - original_physical_ideal_core_id = cpu_core_id; + original_physical_ideal_core_id = core_id_; original_physical_affinity_mask.SetAffinityMask(p_affinity_mask); } } // Update the pinned waiter list. + ThreadQueueImplForKThreadSetProperty wait_queue_(kernel, std::addressof(pinned_waiter_list)); { bool retry_update{}; - bool thread_is_pinned{}; do { // Lock the scheduler. - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl(kernel); // Don't do any further management if our termination has been requested. R_SUCCEED_IF(IsTerminationRequested()); @@ -570,12 +603,9 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) { R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested); - // Note that the thread was pinned. - thread_is_pinned = true; - // Wait until the thread isn't pinned any more. pinned_waiter_list.push_back(GetCurrentThread(kernel)); - GetCurrentThread(kernel).SetState(ThreadState::Waiting); + GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_)); } else { // If the thread isn't pinned, release the scheduler lock and retry until it's // not current. @@ -583,16 +613,6 @@ ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) { } } } while (retry_update); - - // If the thread was pinned, it no longer is, and we should remove the current thread from - // our waiter list. - if (thread_is_pinned) { - // Lock the scheduler. - KScopedSchedulerLock sl{kernel}; - - // Remove from the list. - pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel))); - } } return ResultSuccess; @@ -641,15 +661,9 @@ void KThread::WaitCancel() { KScopedSchedulerLock sl{kernel}; // Check if we're waiting and cancellable. - if (GetState() == ThreadState::Waiting && cancellable) { - if (sleeping_queue != nullptr) { - sleeping_queue->WakeupThread(this); - wait_cancelled = true; - } else { - SetSyncedObject(nullptr, ResultCancelled); - SetState(ThreadState::Runnable); - wait_cancelled = false; - } + if (this->GetState() == ThreadState::Waiting && cancellable) { + wait_cancelled = false; + wait_queue->CancelWait(this, ResultCancelled, true); } else { // Otherwise, note that we cancelled a wait. wait_cancelled = true; @@ -700,60 +714,59 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) { // Set the activity. { // Lock the scheduler. - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl(kernel); // Verify our state. - const auto cur_state = GetState(); + const auto cur_state = this->GetState(); R_UNLESS((cur_state == ThreadState::Waiting || cur_state == ThreadState::Runnable), ResultInvalidState); // Either pause or resume. if (activity == Svc::ThreadActivity::Paused) { // Verify that we're not suspended. - R_UNLESS(!IsSuspendRequested(SuspendType::Thread), ResultInvalidState); + R_UNLESS(!this->IsSuspendRequested(SuspendType::Thread), ResultInvalidState); // Suspend. - RequestSuspend(SuspendType::Thread); + this->RequestSuspend(SuspendType::Thread); } else { ASSERT(activity == Svc::ThreadActivity::Runnable); // Verify that we're suspended. - R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState); + R_UNLESS(this->IsSuspendRequested(SuspendType::Thread), ResultInvalidState); // Resume. - Resume(SuspendType::Thread); + this->Resume(SuspendType::Thread); } } // If the thread is now paused, update the pinned waiter list. if (activity == Svc::ThreadActivity::Paused) { - bool thread_is_pinned{}; - bool thread_is_current{}; + ThreadQueueImplForKThreadSetProperty wait_queue_(kernel, + std::addressof(pinned_waiter_list)); + + bool thread_is_current; do { // Lock the scheduler. - KScopedSchedulerLock sl{kernel}; + KScopedSchedulerLock sl(kernel); // Don't do any further management if our termination has been requested. - R_SUCCEED_IF(IsTerminationRequested()); + R_SUCCEED_IF(this->IsTerminationRequested()); + + // By default, treat the thread as not current. + thread_is_current = false; // Check whether the thread is pinned. - if (GetStackParameters().is_pinned) { + if (this->GetStackParameters().is_pinned) { // Verify that the current thread isn't terminating. R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested); - // Note that the thread was pinned and not current. - thread_is_pinned = true; - thread_is_current = false; - // Wait until the thread isn't pinned any more. pinned_waiter_list.push_back(GetCurrentThread(kernel)); - GetCurrentThread(kernel).SetState(ThreadState::Waiting); + GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_)); } else { // Check if the thread is currently running. // If it is, we'll need to retry. - thread_is_current = false; - for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) { if (kernel.Scheduler(i).GetCurrentThread() == this) { thread_is_current = true; @@ -762,16 +775,6 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) { } } } while (thread_is_current); - - // If the thread was pinned, it no longer is, and we should remove the current thread from - // our waiter list. - if (thread_is_pinned) { - // Lock the scheduler. - KScopedSchedulerLock sl{kernel}; - - // Remove from the list. - pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel))); - } } return ResultSuccess; @@ -966,6 +969,9 @@ ResultCode KThread::Run() { // Set our state and finish. SetState(ThreadState::Runnable); + + DisableDispatch(); + return ResultSuccess; } } @@ -996,27 +1002,61 @@ ResultCode KThread::Sleep(s64 timeout) { ASSERT(this == GetCurrentThreadPointer(kernel)); ASSERT(timeout > 0); + ThreadQueueImplForKThreadSleep wait_queue_(kernel); { // Setup the scheduling lock and sleep. - KScopedSchedulerLockAndSleep slp{kernel, this, timeout}; + KScopedSchedulerLockAndSleep slp(kernel, this, timeout); // Check if the thread should terminate. - if (IsTerminationRequested()) { + if (this->IsTerminationRequested()) { slp.CancelSleep(); return ResultTerminationRequested; } - // Mark the thread as waiting. - SetState(ThreadState::Waiting); + // Wait for the sleep to end. + this->BeginWait(std::addressof(wait_queue_)); SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep); } - // The lock/sleep is done. + return ResultSuccess; +} - // Cancel the timer. - kernel.TimeManager().UnscheduleTimeEvent(this); +void KThread::BeginWait(KThreadQueue* queue) { + // Set our state as waiting. + SetState(ThreadState::Waiting); - return ResultSuccess; + // Set our wait queue. + wait_queue = queue; +} + +void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_) { + // Lock the scheduler. + KScopedSchedulerLock sl(kernel); + + // If we're waiting, notify our queue that we're available. + if (GetState() == ThreadState::Waiting) { + wait_queue->NotifyAvailable(this, signaled_object, wait_result_); + } +} + +void KThread::EndWait(ResultCode wait_result_) { + // Lock the scheduler. + KScopedSchedulerLock sl(kernel); + + // If we're waiting, notify our queue that we're available. + if (GetState() == ThreadState::Waiting) { + wait_queue->EndWait(this, wait_result_); + } +} + +void KThread::CancelWait(ResultCode wait_result_, bool cancel_timer_task) { + // Lock the scheduler. + KScopedSchedulerLock sl(kernel); + + // If we're waiting, notify our queue that we're available. + if (GetState() == ThreadState::Waiting) { + wait_queue->CancelWait(this, wait_result_, cancel_timer_task); + } } void KThread::SetState(ThreadState state) { @@ -1050,4 +1090,26 @@ s32 GetCurrentCoreId(KernelCore& kernel) { return GetCurrentThread(kernel).GetCurrentCore(); } +KScopedDisableDispatch::~KScopedDisableDispatch() { + // If we are shutting down the kernel, none of this is relevant anymore. + if (kernel.IsShuttingDown()) { + return; + } + + // Skip the reschedule if single-core, as dispatch tracking is disabled here. + if (!Settings::values.use_multi_core.GetValue()) { + return; + } + + if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) { + auto scheduler = kernel.CurrentScheduler(); + + if (scheduler) { + scheduler->RescheduleCurrentCore(); + } + } else { + GetCurrentThread(kernel).EnableDispatch(); + } +} + } // namespace Kernel diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index c77f44ad4..c8a08bd71 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h @@ -48,6 +48,7 @@ enum class ThreadType : u32 { Kernel = 1, HighPriority = 2, User = 3, + Dummy = 100, // Special thread type for emulation purposes only }; DECLARE_ENUM_FLAG_OPERATORS(ThreadType); @@ -161,8 +162,6 @@ public: } } - void Wakeup(); - void SetBasePriority(s32 value); [[nodiscard]] ResultCode Run(); @@ -197,13 +196,19 @@ public: void Suspend(); - void SetSyncedObject(KSynchronizationObject* obj, ResultCode wait_res) { - synced_object = obj; + constexpr void SetSyncedIndex(s32 index) { + synced_index = index; + } + + [[nodiscard]] constexpr s32 GetSyncedIndex() const { + return synced_index; + } + + constexpr void SetWaitResult(ResultCode wait_res) { wait_result = wait_res; } - [[nodiscard]] ResultCode GetWaitResult(KSynchronizationObject** out) const { - *out = synced_object; + [[nodiscard]] constexpr ResultCode GetWaitResult() const { return wait_result; } @@ -374,6 +379,8 @@ public: [[nodiscard]] bool IsSignaled() const override; + void OnTimer(); + static void PostDestroy(uintptr_t arg); [[nodiscard]] static ResultCode InitializeDummyThread(KThread* thread); @@ -446,20 +453,39 @@ public: return per_core_priority_queue_entry[core]; } - void SetSleepingQueue(KThreadQueue* q) { - sleeping_queue = q; + [[nodiscard]] bool IsKernelThread() const { + return GetActiveCore() == 3; + } + + [[nodiscard]] bool IsDispatchTrackingDisabled() const { + return is_single_core || IsKernelThread(); } [[nodiscard]] s32 GetDisableDispatchCount() const { + if (IsDispatchTrackingDisabled()) { + // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. + return 1; + } + return this->GetStackParameters().disable_count; } void DisableDispatch() { + if (IsDispatchTrackingDisabled()) { + // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. + return; + } + ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); this->GetStackParameters().disable_count++; } void EnableDispatch() { + if (IsDispatchTrackingDisabled()) { + // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch. + return; + } + ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); this->GetStackParameters().disable_count--; } @@ -573,6 +599,15 @@ public: address_key_value = val; } + void ClearWaitQueue() { + wait_queue = nullptr; + } + + void BeginWait(KThreadQueue* queue); + void NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_); + void EndWait(ResultCode wait_result_); + void CancelWait(ResultCode wait_result_, bool cancel_timer_task); + [[nodiscard]] bool HasWaiters() const { return !waiter_list.empty(); } @@ -667,7 +702,6 @@ private: KAffinityMask physical_affinity_mask{}; u64 thread_id{}; std::atomic<s64> cpu_time{}; - KSynchronizationObject* synced_object{}; VAddr address_key{}; KProcess* parent{}; VAddr kernel_stack_top{}; @@ -677,13 +711,14 @@ private: s64 schedule_count{}; s64 last_scheduled_tick{}; std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{}; - KThreadQueue* sleeping_queue{}; + KThreadQueue* wait_queue{}; WaiterList waiter_list{}; WaiterList pinned_waiter_list{}; KThread* lock_owner{}; u32 address_key_value{}; u32 suspend_request_flags{}; u32 suspend_allowed_flags{}; + s32 synced_index{}; ResultCode wait_result{ResultSuccess}; s32 base_priority{}; s32 physical_ideal_core_id{}; @@ -708,6 +743,7 @@ private: // For emulation std::shared_ptr<Common::Fiber> host_context{}; + bool is_single_core{}; // For debugging std::vector<KSynchronizationObject*> wait_objects_for_debugging; @@ -752,4 +788,20 @@ public: } }; +class KScopedDisableDispatch { +public: + [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} { + // If we are shutting down the kernel, none of this is relevant anymore. + if (kernel.IsShuttingDown()) { + return; + } + GetCurrentThread(kernel).DisableDispatch(); + } + + ~KScopedDisableDispatch(); + +private: + KernelCore& kernel; +}; + } // namespace Kernel diff --git a/src/core/hle/kernel/k_thread_queue.cpp b/src/core/hle/kernel/k_thread_queue.cpp new file mode 100644 index 000000000..d5248b547 --- /dev/null +++ b/src/core/hle/kernel/k_thread_queue.cpp @@ -0,0 +1,49 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/kernel/k_thread_queue.h" +#include "core/hle/kernel/kernel.h" +#include "core/hle/kernel/time_manager.h" + +namespace Kernel { + +void KThreadQueue::NotifyAvailable([[maybe_unused]] KThread* waiting_thread, + [[maybe_unused]] KSynchronizationObject* signaled_object, + [[maybe_unused]] ResultCode wait_result) {} + +void KThreadQueue::EndWait(KThread* waiting_thread, ResultCode wait_result) { + // Set the thread's wait result. + waiting_thread->SetWaitResult(wait_result); + + // Set the thread as runnable. + waiting_thread->SetState(ThreadState::Runnable); + + // Clear the thread's wait queue. + waiting_thread->ClearWaitQueue(); + + // Cancel the thread task. + kernel.TimeManager().UnscheduleTimeEvent(waiting_thread); +} + +void KThreadQueue::CancelWait(KThread* waiting_thread, ResultCode wait_result, + bool cancel_timer_task) { + // Set the thread's wait result. + waiting_thread->SetWaitResult(wait_result); + + // Set the thread as runnable. + waiting_thread->SetState(ThreadState::Runnable); + + // Clear the thread's wait queue. + waiting_thread->ClearWaitQueue(); + + // Cancel the thread task. + if (cancel_timer_task) { + kernel.TimeManager().UnscheduleTimeEvent(waiting_thread); + } +} + +void KThreadQueueWithoutEndWait::EndWait([[maybe_unused]] KThread* waiting_thread, + [[maybe_unused]] ResultCode wait_result) {} + +} // namespace Kernel diff --git a/src/core/hle/kernel/k_thread_queue.h b/src/core/hle/kernel/k_thread_queue.h index 35d471dc5..ccb718e49 100644 --- a/src/core/hle/kernel/k_thread_queue.h +++ b/src/core/hle/kernel/k_thread_queue.h @@ -4,6 +4,7 @@ #pragma once +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_thread.h" namespace Kernel { @@ -11,71 +12,24 @@ namespace Kernel { class KThreadQueue { public: explicit KThreadQueue(KernelCore& kernel_) : kernel{kernel_} {} + virtual ~KThreadQueue() = default; - bool IsEmpty() const { - return wait_list.empty(); - } - - KThread::WaiterList::iterator begin() { - return wait_list.begin(); - } - KThread::WaiterList::iterator end() { - return wait_list.end(); - } - - bool SleepThread(KThread* t) { - KScopedSchedulerLock sl{kernel}; - - // If the thread needs terminating, don't enqueue it. - if (t->IsTerminationRequested()) { - return false; - } - - // Set the thread's queue and mark it as waiting. - t->SetSleepingQueue(this); - t->SetState(ThreadState::Waiting); - - // Add the thread to the queue. - wait_list.push_back(*t); - - return true; - } - - void WakeupThread(KThread* t) { - KScopedSchedulerLock sl{kernel}; - - // Remove the thread from the queue. - wait_list.erase(wait_list.iterator_to(*t)); - - // Mark the thread as no longer sleeping. - t->SetState(ThreadState::Runnable); - t->SetSleepingQueue(nullptr); - } - - KThread* WakeupFrontThread() { - KScopedSchedulerLock sl{kernel}; - - if (wait_list.empty()) { - return nullptr; - } else { - // Remove the thread from the queue. - auto it = wait_list.begin(); - KThread* thread = std::addressof(*it); - wait_list.erase(it); - - ASSERT(thread->GetState() == ThreadState::Waiting); - - // Mark the thread as no longer sleeping. - thread->SetState(ThreadState::Runnable); - thread->SetSleepingQueue(nullptr); - - return thread; - } - } + virtual void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object, + ResultCode wait_result); + virtual void EndWait(KThread* waiting_thread, ResultCode wait_result); + virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result, + bool cancel_timer_task); private: KernelCore& kernel; KThread::WaiterList wait_list{}; }; +class KThreadQueueWithoutEndWait : public KThreadQueue { +public: + explicit KThreadQueueWithoutEndWait(KernelCore& kernel_) : KThreadQueue(kernel_) {} + + void EndWait(KThread* waiting_thread, ResultCode wait_result) override final; +}; + } // namespace Kernel diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index 45e86a677..1225e1fba 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp @@ -14,6 +14,7 @@ #include "common/assert.h" #include "common/logging/log.h" #include "common/microprofile.h" +#include "common/scope_exit.h" #include "common/thread.h" #include "common/thread_worker.h" #include "core/arm/arm_interface.h" @@ -83,12 +84,16 @@ struct KernelCore::Impl { } void InitializeCores() { - for (auto& core : cores) { - core.Initialize(current_process->Is64BitProcess()); + for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { + cores[core_id].Initialize(current_process->Is64BitProcess()); + system.Memory().SetCurrentPageTable(*current_process, core_id); } } void Shutdown() { + is_shutting_down.store(true, std::memory_order_relaxed); + SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); }); + process_list.clear(); // Close all open server ports. @@ -123,15 +128,6 @@ struct KernelCore::Impl { next_user_process_id = KProcess::ProcessIDMin; next_thread_id = 1; - for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { - if (suspend_threads[core_id]) { - suspend_threads[core_id]->Close(); - suspend_threads[core_id] = nullptr; - } - - schedulers[core_id].reset(); - } - cores.clear(); global_handle_table->Finalize(); @@ -159,6 +155,16 @@ struct KernelCore::Impl { CleanupObject(time_shared_mem); CleanupObject(system_resource_limit); + for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { + if (suspend_threads[core_id]) { + suspend_threads[core_id]->Close(); + suspend_threads[core_id] = nullptr; + } + + schedulers[core_id]->Finalize(); + schedulers[core_id].reset(); + } + // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others next_host_thread_id = Core::Hardware::NUM_CPU_CORES; @@ -176,7 +182,10 @@ struct KernelCore::Impl { // Shutdown all processes. if (current_process) { current_process->Finalize(); - current_process->Close(); + // current_process->Close(); + // TODO: The current process should be destroyed based on accurate ref counting after + // calling Close(). Adding a manual Destroy() call instead to avoid a memory leak. + current_process->Destroy(); current_process = nullptr; } @@ -245,13 +254,11 @@ struct KernelCore::Impl { KScopedSchedulerLock lock(kernel); global_scheduler_context->PreemptThreads(); } - const auto time_interval = std::chrono::nanoseconds{ - Core::Timing::msToCycles(std::chrono::milliseconds(10))}; + const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)}; system.CoreTiming().ScheduleEvent(time_interval, preemption_event); }); - const auto time_interval = - std::chrono::nanoseconds{Core::Timing::msToCycles(std::chrono::milliseconds(10))}; + const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)}; system.CoreTiming().ScheduleEvent(time_interval, preemption_event); } @@ -267,14 +274,6 @@ struct KernelCore::Impl { void MakeCurrentProcess(KProcess* process) { current_process = process; - if (process == nullptr) { - return; - } - - const u32 core_id = GetCurrentHostThreadID(); - if (core_id < Core::Hardware::NUM_CPU_CORES) { - system.Memory().SetCurrentPageTable(*process, core_id); - } } static inline thread_local u32 host_thread_id = UINT32_MAX; @@ -344,7 +343,16 @@ struct KernelCore::Impl { is_phantom_mode_for_singlecore = value; } + bool IsShuttingDown() const { + return is_shutting_down.load(std::memory_order_relaxed); + } + KThread* GetCurrentEmuThread() { + // If we are shutting down the kernel, none of this is relevant anymore. + if (IsShuttingDown()) { + return {}; + } + const auto thread_id = GetCurrentHostThreadID(); if (thread_id >= Core::Hardware::NUM_CPU_CORES) { return GetHostDummyThread(); @@ -760,6 +768,7 @@ struct KernelCore::Impl { std::vector<std::unique_ptr<KThread>> dummy_threads; bool is_multicore{}; + std::atomic_bool is_shutting_down{}; bool is_phantom_mode_for_singlecore{}; u32 single_core_thread_id{}; @@ -845,16 +854,20 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const { return impl->cores[id]; } +size_t KernelCore::CurrentPhysicalCoreIndex() const { + const u32 core_id = impl->GetCurrentHostThreadID(); + if (core_id >= Core::Hardware::NUM_CPU_CORES) { + return Core::Hardware::NUM_CPU_CORES - 1; + } + return core_id; +} + Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { - u32 core_id = impl->GetCurrentHostThreadID(); - ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); - return impl->cores[core_id]; + return impl->cores[CurrentPhysicalCoreIndex()]; } const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { - u32 core_id = impl->GetCurrentHostThreadID(); - ASSERT(core_id < Core::Hardware::NUM_CPU_CORES); - return impl->cores[core_id]; + return impl->cores[CurrentPhysicalCoreIndex()]; } Kernel::KScheduler* KernelCore::CurrentScheduler() { @@ -1057,6 +1070,9 @@ void KernelCore::Suspend(bool in_suspention) { impl->suspend_threads[core_id]->SetState(state); impl->suspend_threads[core_id]->SetWaitReasonForDebugging( ThreadWaitReasonForDebugging::Suspended); + if (!should_suspend) { + impl->suspend_threads[core_id]->DisableDispatch(); + } } } } @@ -1065,19 +1081,21 @@ bool KernelCore::IsMulticore() const { return impl->is_multicore; } +bool KernelCore::IsShuttingDown() const { + return impl->IsShuttingDown(); +} + void KernelCore::ExceptionalExit() { exception_exited = true; Suspend(true); } void KernelCore::EnterSVCProfile() { - std::size_t core = impl->GetCurrentHostThreadID(); - impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); + impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); } void KernelCore::ExitSVCProfile() { - std::size_t core = impl->GetCurrentHostThreadID(); - MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]); + MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]); } std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h index d2ceae950..b9b423908 100644 --- a/src/core/hle/kernel/kernel.h +++ b/src/core/hle/kernel/kernel.h @@ -53,6 +53,7 @@ class KSharedMemoryInfo; class KThread; class KTransferMemory; class KWritableEvent; +class KCodeMemory; class PhysicalCore; class ServiceThread; class Synchronization; @@ -148,6 +149,9 @@ public: /// Gets the an instance of the respective physical CPU core. const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; + /// Gets the current physical core index for the running host thread. + std::size_t CurrentPhysicalCoreIndex() const; + /// Gets the sole instance of the Scheduler at the current running core. Kernel::KScheduler* CurrentScheduler(); @@ -271,6 +275,8 @@ public: bool IsMulticore() const; + bool IsShuttingDown() const; + void EnterSVCProfile(); void ExitSVCProfile(); @@ -326,6 +332,8 @@ public: return slab_heap_container->transfer_memory; } else if constexpr (std::is_same_v<T, KWritableEvent>) { return slab_heap_container->writeable_event; + } else if constexpr (std::is_same_v<T, KCodeMemory>) { + return slab_heap_container->code_memory; } } @@ -377,6 +385,7 @@ private: KSlabHeap<KThread> thread; KSlabHeap<KTransferMemory> transfer_memory; KSlabHeap<KWritableEvent> writeable_event; + KSlabHeap<KCodeMemory> code_memory; }; std::unique_ptr<SlabHeapContainer> slab_heap_container; diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp index 6721b6276..03f3dec10 100644 --- a/src/core/hle/kernel/service_thread.cpp +++ b/src/core/hle/kernel/service_thread.cpp @@ -25,24 +25,27 @@ public: void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context); private: - std::vector<std::thread> threads; + std::vector<std::jthread> threads; std::queue<std::function<void()>> requests; std::mutex queue_mutex; - std::condition_variable condition; + std::condition_variable_any condition; const std::string service_name; - bool stop{}; }; ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name) : service_name{name} { - for (std::size_t i = 0; i < num_threads; ++i) - threads.emplace_back([this, &kernel] { + for (std::size_t i = 0; i < num_threads; ++i) { + threads.emplace_back([this, &kernel](std::stop_token stop_token) { Common::SetCurrentThreadName(std::string{"yuzu:HleService:" + service_name}.c_str()); // Wait for first request before trying to acquire a render context { std::unique_lock lock{queue_mutex}; - condition.wait(lock, [this] { return stop || !requests.empty(); }); + condition.wait(lock, stop_token, [this] { return !requests.empty(); }); + } + + if (stop_token.stop_requested()) { + return; } kernel.RegisterHostThread(); @@ -52,10 +55,16 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std { std::unique_lock lock{queue_mutex}; - condition.wait(lock, [this] { return stop || !requests.empty(); }); - if (stop || requests.empty()) { + condition.wait(lock, stop_token, [this] { return !requests.empty(); }); + + if (stop_token.stop_requested()) { return; } + + if (requests.empty()) { + continue; + } + task = std::move(requests.front()); requests.pop(); } @@ -63,6 +72,7 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std task(); } }); + } } void ServiceThread::Impl::QueueSyncRequest(KSession& session, @@ -88,12 +98,9 @@ void ServiceThread::Impl::QueueSyncRequest(KSession& session, } ServiceThread::Impl::~Impl() { - { - std::unique_lock lock{queue_mutex}; - stop = true; - } condition.notify_all(); - for (std::thread& thread : threads) { + for (auto& thread : threads) { + thread.request_stop(); thread.join(); } } diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index f0cd8471e..bb9475c56 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp @@ -18,6 +18,7 @@ #include "core/core_timing.h" #include "core/hle/kernel/k_client_port.h" #include "core/hle/kernel/k_client_session.h" +#include "core/hle/kernel/k_code_memory.h" #include "core/hle/kernel/k_event.h" #include "core/hle/kernel/k_handle_table.h" #include "core/hle/kernel/k_memory_block.h" @@ -31,6 +32,7 @@ #include "core/hle/kernel/k_shared_memory.h" #include "core/hle/kernel/k_synchronization_object.h" #include "core/hle/kernel/k_thread.h" +#include "core/hle/kernel/k_thread_queue.h" #include "core/hle/kernel/k_transfer_memory.h" #include "core/hle/kernel/k_writable_event.h" #include "core/hle/kernel/kernel.h" @@ -39,7 +41,6 @@ #include "core/hle/kernel/svc_results.h" #include "core/hle/kernel/svc_types.h" #include "core/hle/kernel/svc_wrap.h" -#include "core/hle/lock.h" #include "core/hle/result.h" #include "core/memory.h" #include "core/reporter.h" @@ -135,7 +136,6 @@ enum class ResourceLimitValueType { /// Set the process heap to a given Size. It can both extend and shrink the heap. static ResultCode SetHeapSize(Core::System& system, VAddr* heap_addr, u64 heap_size) { - std::lock_guard lock{HLE::g_hle_lock}; LOG_TRACE(Kernel_SVC, "called, heap_size=0x{:X}", heap_size); // Size must be a multiple of 0x200000 (2MB) and be equal to or less than 8GB. @@ -166,7 +166,6 @@ static ResultCode SetHeapSize32(Core::System& system, u32* heap_addr, u32 heap_s static ResultCode SetMemoryAttribute(Core::System& system, VAddr address, u64 size, u32 mask, u32 attribute) { - std::lock_guard lock{HLE::g_hle_lock}; LOG_DEBUG(Kernel_SVC, "called, address=0x{:016X}, size=0x{:X}, mask=0x{:08X}, attribute=0x{:08X}", address, size, mask, attribute); @@ -210,7 +209,6 @@ static ResultCode SetMemoryAttribute32(Core::System& system, u32 address, u32 si /// Maps a memory range into a different range. static ResultCode MapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) { - std::lock_guard lock{HLE::g_hle_lock}; LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, src_addr, size); @@ -230,7 +228,6 @@ static ResultCode MapMemory32(Core::System& system, u32 dst_addr, u32 src_addr, /// Unmaps a region that was previously mapped with svcMapMemory static ResultCode UnmapMemory(Core::System& system, VAddr dst_addr, VAddr src_addr, u64 size) { - std::lock_guard lock{HLE::g_hle_lock}; LOG_TRACE(Kernel_SVC, "called, dst_addr=0x{:X}, src_addr=0x{:X}, size=0x{:X}", dst_addr, src_addr, size); @@ -307,26 +304,29 @@ static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle, /// Makes a blocking IPC call to an OS service. static ResultCode SendSyncRequest(Core::System& system, Handle handle) { - auto& kernel = system.Kernel(); + // Create the wait queue. + KThreadQueue wait_queue(kernel); + + // Get the client session from its handle. + KScopedAutoObject session = + kernel.CurrentProcess()->GetHandleTable().GetObject<KClientSession>(handle); + R_UNLESS(session.IsNotNull(), ResultInvalidHandle); + + LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName()); + auto thread = kernel.CurrentScheduler()->GetCurrentThread(); { KScopedSchedulerLock lock(kernel); - thread->SetState(ThreadState::Waiting); - thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); - - { - KScopedAutoObject session = - kernel.CurrentProcess()->GetHandleTable().GetObject<KClientSession>(handle); - R_UNLESS(session.IsNotNull(), ResultInvalidHandle); - LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName()); - session->SendSyncRequest(thread, system.Memory(), system.CoreTiming()); - } + + // This is a synchronous request, so we should wait for our request to complete. + GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue)); + GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC); + session->SendSyncRequest(&GetCurrentThread(kernel), system.Memory(), system.CoreTiming()); } - KSynchronizationObject* dummy{}; - return thread->GetWaitResult(std::addressof(dummy)); + return thread->GetWaitResult(); } static ResultCode SendSyncRequest32(Core::System& system, Handle handle) { @@ -637,7 +637,6 @@ static void OutputDebugString(Core::System& system, VAddr address, u64 len) { /// Gets system/memory information for the current process static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle handle, u64 info_sub_id) { - std::lock_guard lock{HLE::g_hle_lock}; LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id, info_sub_id, handle); @@ -873,7 +872,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle const u64 thread_ticks = current_thread->GetCpuTime(); out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks); - } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) { + } else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) { out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks; } @@ -887,7 +886,8 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle return ResultInvalidHandle; } - if (info_sub_id != 0xFFFFFFFFFFFFFFFF && info_sub_id != system.CurrentCoreIndex()) { + if (info_sub_id != 0xFFFFFFFFFFFFFFFF && + info_sub_id != system.Kernel().CurrentPhysicalCoreIndex()) { LOG_ERROR(Kernel_SVC, "Core is not the current core, got {}", info_sub_id); return ResultInvalidCombination; } @@ -918,7 +918,6 @@ static ResultCode GetInfo32(Core::System& system, u32* result_low, u32* result_h /// Maps memory at a desired address static ResultCode MapPhysicalMemory(Core::System& system, VAddr addr, u64 size) { - std::lock_guard lock{HLE::g_hle_lock}; LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size); if (!Common::Is4KBAligned(addr)) { @@ -972,7 +971,6 @@ static ResultCode MapPhysicalMemory32(Core::System& system, u32 addr, u32 size) /// Unmaps memory previously mapped via MapPhysicalMemory static ResultCode UnmapPhysicalMemory(Core::System& system, VAddr addr, u64 size) { - std::lock_guard lock{HLE::g_hle_lock}; LOG_DEBUG(Kernel_SVC, "called, addr=0x{:016X}, size=0x{:X}", addr, size); if (!Common::Is4KBAligned(addr)) { @@ -1197,6 +1195,22 @@ constexpr bool IsValidProcessMemoryPermission(Svc::MemoryPermission perm) { } } +constexpr bool IsValidMapCodeMemoryPermission(Svc::MemoryPermission perm) { + return perm == Svc::MemoryPermission::ReadWrite; +} + +constexpr bool IsValidMapToOwnerCodeMemoryPermission(Svc::MemoryPermission perm) { + return perm == Svc::MemoryPermission::Read || perm == Svc::MemoryPermission::ReadExecute; +} + +constexpr bool IsValidUnmapCodeMemoryPermission(Svc::MemoryPermission perm) { + return perm == Svc::MemoryPermission::None; +} + +constexpr bool IsValidUnmapFromOwnerCodeMemoryPermission(Svc::MemoryPermission perm) { + return perm == Svc::MemoryPermission::None; +} + } // Anonymous namespace static ResultCode MapSharedMemory(Core::System& system, Handle shmem_handle, VAddr address, @@ -1306,10 +1320,198 @@ static ResultCode SetProcessMemoryPermission(Core::System& system, Handle proces return page_table.SetProcessMemoryPermission(address, size, ConvertToKMemoryPermission(perm)); } +static ResultCode MapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle, + VAddr src_address, u64 size) { + LOG_TRACE(Kernel_SVC, + "called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}", + dst_address, process_handle, src_address, size); + + // Validate the address/size. + R_UNLESS(Common::IsAligned(dst_address, PageSize), ResultInvalidAddress); + R_UNLESS(Common::IsAligned(src_address, PageSize), ResultInvalidAddress); + R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize); + R_UNLESS(size > 0, ResultInvalidSize); + R_UNLESS((dst_address < dst_address + size), ResultInvalidCurrentMemory); + R_UNLESS((src_address < src_address + size), ResultInvalidCurrentMemory); + + // Get the processes. + KProcess* dst_process = system.CurrentProcess(); + KScopedAutoObject src_process = + dst_process->GetHandleTable().GetObjectWithoutPseudoHandle<KProcess>(process_handle); + R_UNLESS(src_process.IsNotNull(), ResultInvalidHandle); + + // Get the page tables. + auto& dst_pt = dst_process->PageTable(); + auto& src_pt = src_process->PageTable(); + + // Validate that the mapping is in range. + R_UNLESS(src_pt.Contains(src_address, size), ResultInvalidCurrentMemory); + R_UNLESS(dst_pt.CanContain(dst_address, size, KMemoryState::SharedCode), + ResultInvalidMemoryRegion); + + // Create a new page group. + KMemoryInfo kBlockInfo = dst_pt.QueryInfo(dst_address); + KPageLinkedList pg(kBlockInfo.GetAddress(), kBlockInfo.GetNumPages()); + + // Map the group. + R_TRY(dst_pt.MapPages(dst_address, pg, KMemoryState::SharedCode, + KMemoryPermission::UserReadWrite)); + + return ResultSuccess; +} + +static ResultCode UnmapProcessMemory(Core::System& system, VAddr dst_address, Handle process_handle, + VAddr src_address, u64 size) { + LOG_TRACE(Kernel_SVC, + "called, dst_address=0x{:X}, process_handle=0x{:X}, src_address=0x{:X}, size=0x{:X}", + dst_address, process_handle, src_address, size); + + // Validate the address/size. + R_UNLESS(Common::IsAligned(dst_address, PageSize), ResultInvalidAddress); + R_UNLESS(Common::IsAligned(src_address, PageSize), ResultInvalidAddress); + R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize); + R_UNLESS(size > 0, ResultInvalidSize); + R_UNLESS((dst_address < dst_address + size), ResultInvalidCurrentMemory); + R_UNLESS((src_address < src_address + size), ResultInvalidCurrentMemory); + + // Get the processes. + KProcess* dst_process = system.CurrentProcess(); + KScopedAutoObject src_process = + dst_process->GetHandleTable().GetObjectWithoutPseudoHandle<KProcess>(process_handle); + R_UNLESS(src_process.IsNotNull(), ResultInvalidHandle); + + // Get the page tables. + auto& dst_pt = dst_process->PageTable(); + auto& src_pt = src_process->PageTable(); + + // Validate that the mapping is in range. + R_UNLESS(src_pt.Contains(src_address, size), ResultInvalidCurrentMemory); + R_UNLESS(dst_pt.CanContain(dst_address, size, KMemoryState::SharedCode), + ResultInvalidMemoryRegion); + + // Unmap the memory. + R_TRY(dst_pt.UnmapProcessMemory(dst_address, size, src_pt, src_address)); + + return ResultSuccess; +} + +static ResultCode CreateCodeMemory(Core::System& system, Handle* out, VAddr address, size_t size) { + LOG_TRACE(Kernel_SVC, "called, handle_out=0x{:X}, address=0x{:X}, size=0x{:X}", + static_cast<void*>(out), address, size); + // Get kernel instance. + auto& kernel = system.Kernel(); + + // Validate address / size. + R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress); + R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize); + R_UNLESS(size > 0, ResultInvalidSize); + R_UNLESS((address < address + size), ResultInvalidCurrentMemory); + + // Create the code memory. + + KCodeMemory* code_mem = KCodeMemory::Create(kernel); + R_UNLESS(code_mem != nullptr, ResultOutOfResource); + + // Verify that the region is in range. + R_UNLESS(system.CurrentProcess()->PageTable().Contains(address, size), + ResultInvalidCurrentMemory); + + // Initialize the code memory. + R_TRY(code_mem->Initialize(system.DeviceMemory(), address, size)); + + // Register the code memory. + KCodeMemory::Register(kernel, code_mem); + + // Add the code memory to the handle table. + R_TRY(system.CurrentProcess()->GetHandleTable().Add(out, code_mem)); + + code_mem->Close(); + + return ResultSuccess; +} + +static ResultCode ControlCodeMemory(Core::System& system, Handle code_memory_handle, u32 operation, + VAddr address, size_t size, Svc::MemoryPermission perm) { + + LOG_TRACE(Kernel_SVC, + "called, code_memory_handle=0x{:X}, operation=0x{:X}, address=0x{:X}, size=0x{:X}, " + "permission=0x{:X}", + code_memory_handle, operation, address, size, perm); + + // Validate the address / size. + R_UNLESS(Common::IsAligned(address, PageSize), ResultInvalidAddress); + R_UNLESS(Common::IsAligned(size, PageSize), ResultInvalidSize); + R_UNLESS(size > 0, ResultInvalidSize); + R_UNLESS((address < address + size), ResultInvalidCurrentMemory); + + // Get the code memory from its handle. + KScopedAutoObject code_mem = + system.CurrentProcess()->GetHandleTable().GetObject<KCodeMemory>(code_memory_handle); + R_UNLESS(code_mem.IsNotNull(), ResultInvalidHandle); + + // NOTE: Here, Atmosphere extends the SVC to allow code memory operations on one's own process. + // This enables homebrew usage of these SVCs for JIT. + + // Perform the operation. + switch (static_cast<CodeMemoryOperation>(operation)) { + case CodeMemoryOperation::Map: { + // Check that the region is in range. + R_UNLESS( + system.CurrentProcess()->PageTable().CanContain(address, size, KMemoryState::CodeOut), + ResultInvalidMemoryRegion); + + // Check the memory permission. + R_UNLESS(IsValidMapCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission); + + // Map the memory. + R_TRY(code_mem->Map(address, size)); + } break; + case CodeMemoryOperation::Unmap: { + // Check that the region is in range. + R_UNLESS( + system.CurrentProcess()->PageTable().CanContain(address, size, KMemoryState::CodeOut), + ResultInvalidMemoryRegion); + + // Check the memory permission. + R_UNLESS(IsValidUnmapCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission); + + // Unmap the memory. + R_TRY(code_mem->Unmap(address, size)); + } break; + case CodeMemoryOperation::MapToOwner: { + // Check that the region is in range. + R_UNLESS(code_mem->GetOwner()->PageTable().CanContain(address, size, + KMemoryState::GeneratedCode), + ResultInvalidMemoryRegion); + + // Check the memory permission. + R_UNLESS(IsValidMapToOwnerCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission); + + // Map the memory to its owner. + R_TRY(code_mem->MapToOwner(address, size, perm)); + } break; + case CodeMemoryOperation::UnmapFromOwner: { + // Check that the region is in range. + R_UNLESS(code_mem->GetOwner()->PageTable().CanContain(address, size, + KMemoryState::GeneratedCode), + ResultInvalidMemoryRegion); + + // Check the memory permission. + R_UNLESS(IsValidUnmapFromOwnerCodeMemoryPermission(perm), ResultInvalidNewMemoryPermission); + + // Unmap the memory from its owner. + R_TRY(code_mem->UnmapFromOwner(address, size)); + } break; + default: + return ResultInvalidEnumValue; + } + + return ResultSuccess; +} + static ResultCode QueryProcessMemory(Core::System& system, VAddr memory_info_address, VAddr page_info_address, Handle process_handle, VAddr address) { - std::lock_guard lock{HLE::g_hle_lock}; LOG_TRACE(Kernel_SVC, "called process=0x{:08X} address={:X}", process_handle, address); const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable(); KScopedAutoObject process = handle_table.GetObject<KProcess>(process_handle); @@ -2600,8 +2802,8 @@ static const FunctionDef SVC_Table_64[] = { {0x48, nullptr, "MapPhysicalMemoryUnsafe"}, {0x49, nullptr, "UnmapPhysicalMemoryUnsafe"}, {0x4A, nullptr, "SetUnsafeLimit"}, - {0x4B, nullptr, "CreateCodeMemory"}, - {0x4C, nullptr, "ControlCodeMemory"}, + {0x4B, SvcWrap64<CreateCodeMemory>, "CreateCodeMemory"}, + {0x4C, SvcWrap64<ControlCodeMemory>, "ControlCodeMemory"}, {0x4D, nullptr, "SleepSystem"}, {0x4E, nullptr, "ReadWriteRegister"}, {0x4F, nullptr, "SetProcessActivity"}, @@ -2641,8 +2843,8 @@ static const FunctionDef SVC_Table_64[] = { {0x71, nullptr, "ManageNamedPort"}, {0x72, nullptr, "ConnectToPort"}, {0x73, SvcWrap64<SetProcessMemoryPermission>, "SetProcessMemoryPermission"}, - {0x74, nullptr, "MapProcessMemory"}, - {0x75, nullptr, "UnmapProcessMemory"}, + {0x74, SvcWrap64<MapProcessMemory>, "MapProcessMemory"}, + {0x75, SvcWrap64<UnmapProcessMemory>, "UnmapProcessMemory"}, {0x76, SvcWrap64<QueryProcessMemory>, "QueryProcessMemory"}, {0x77, SvcWrap64<MapProcessCodeMemory>, "MapProcessCodeMemory"}, {0x78, SvcWrap64<UnmapProcessCodeMemory>, "UnmapProcessCodeMemory"}, diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h index 6e62e656f..86255fe6d 100644 --- a/src/core/hle/kernel/svc_wrap.h +++ b/src/core/hle/kernel/svc_wrap.h @@ -73,6 +73,23 @@ void SvcWrap64(Core::System& system) { .raw); } +// Used by MapProcessMemory and UnmapProcessMemory +template <ResultCode func(Core::System&, u64, u32, u64, u64)> +void SvcWrap64(Core::System& system) { + FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)), + Param(system, 2), Param(system, 3)) + .raw); +} + +// Used by ControlCodeMemory +template <ResultCode func(Core::System&, Handle, u32, u64, u64, Svc::MemoryPermission)> +void SvcWrap64(Core::System& system) { + FuncReturn(system, func(system, static_cast<Handle>(Param(system, 0)), + static_cast<u32>(Param(system, 1)), Param(system, 2), Param(system, 3), + static_cast<Svc::MemoryPermission>(Param(system, 4))) + .raw); +} + template <ResultCode func(Core::System&, u32*)> void SvcWrap64(Core::System& system) { u32 param = 0; @@ -301,6 +318,16 @@ void SvcWrap64(Core::System& system) { FuncReturn(system, retval); } +// Used by CreateCodeMemory +template <ResultCode func(Core::System&, Handle*, u64, u64)> +void SvcWrap64(Core::System& system) { + u32 param_1 = 0; + const u32 retval = func(system, ¶m_1, Param(system, 1), Param(system, 2)).raw; + + system.CurrentArmInterface().SetReg(1, param_1); + FuncReturn(system, retval); +} + template <ResultCode func(Core::System&, Handle*, u64, u32, u32)> void SvcWrap64(Core::System& system) { u32 param_1 = 0; diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp index 8cd7279a3..aa985d820 100644 --- a/src/core/hle/kernel/time_manager.cpp +++ b/src/core/hle/kernel/time_manager.cpp @@ -5,6 +5,7 @@ #include "common/assert.h" #include "core/core.h" #include "core/core_timing.h" +#include "core/hle/kernel/k_scheduler.h" #include "core/hle/kernel/k_thread.h" #include "core/hle/kernel/time_manager.h" @@ -15,7 +16,10 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} { Core::Timing::CreateEvent("Kernel::TimeManagerCallback", [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { KThread* thread = reinterpret_cast<KThread*>(thread_handle); - thread->Wakeup(); + { + KScopedSchedulerLock sl(system.Kernel()); + thread->OnTimer(); + } }); } diff --git a/src/core/hle/lock.cpp b/src/core/hle/lock.cpp deleted file mode 100644 index be4bfce3b..000000000 --- a/src/core/hle/lock.cpp +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2017 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#include <core/hle/lock.h> - -namespace HLE { -std::recursive_mutex g_hle_lock; -} diff --git a/src/core/hle/lock.h b/src/core/hle/lock.h deleted file mode 100644 index 5c99fe996..000000000 --- a/src/core/hle/lock.h +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2017 Citra Emulator Project -// Licensed under GPLv2 or any later version -// Refer to the license.txt file included. - -#pragma once - -#include <mutex> - -namespace HLE { -/* - * Synchronizes access to the internal HLE kernel structures, it is acquired when a guest - * application thread performs a syscall. It should be acquired by any host threads that read or - * modify the HLE kernel state. Note: Any operation that directly or indirectly reads from or writes - * to the emulated memory is not protected by this mutex, and should be avoided in any threads other - * than the CPU thread. - */ -extern std::recursive_mutex g_hle_lock; -} // namespace HLE diff --git a/src/core/hle/service/bcat/backend/backend.cpp b/src/core/hle/service/bcat/backend/backend.cpp index 4c7d3bb6e..ee49edbb9 100644 --- a/src/core/hle/service/bcat/backend/backend.cpp +++ b/src/core/hle/service/bcat/backend/backend.cpp @@ -6,7 +6,6 @@ #include "common/logging/log.h" #include "core/core.h" #include "core/hle/kernel/k_event.h" -#include "core/hle/lock.h" #include "core/hle/service/bcat/backend/backend.h" namespace Service::BCAT { @@ -29,10 +28,6 @@ DeliveryCacheProgressImpl& ProgressServiceBackend::GetImpl() { return impl; } -void ProgressServiceBackend::SetNeedHLELock(bool need) { - need_hle_lock = need; -} - void ProgressServiceBackend::SetTotalSize(u64 size) { impl.total_bytes = size; SignalUpdate(); @@ -88,12 +83,7 @@ void ProgressServiceBackend::FinishDownload(ResultCode result) { } void ProgressServiceBackend::SignalUpdate() { - if (need_hle_lock) { - std::lock_guard lock(HLE::g_hle_lock); - update_event->GetWritableEvent().Signal(); - } else { - update_event->GetWritableEvent().Signal(); - } + update_event->GetWritableEvent().Signal(); } Backend::Backend(DirectoryGetter getter) : dir_getter(std::move(getter)) {} diff --git a/src/core/hle/service/bcat/backend/backend.h b/src/core/hle/service/bcat/backend/backend.h index 59c6d4740..63833c927 100644 --- a/src/core/hle/service/bcat/backend/backend.h +++ b/src/core/hle/service/bcat/backend/backend.h @@ -71,10 +71,6 @@ class ProgressServiceBackend { public: ~ProgressServiceBackend(); - // Clients should call this with true if any of the functions are going to be called from a - // non-HLE thread and this class need to lock the hle mutex. (default is false) - void SetNeedHLELock(bool need); - // Sets the number of bytes total in the entire download. void SetTotalSize(u64 size); @@ -109,7 +105,6 @@ private: DeliveryCacheProgressImpl impl{}; Kernel::KEvent* update_event; - bool need_hle_lock = false; }; // A class representing an abstract backend for BCAT functionality. diff --git a/src/core/hle/service/glue/glue.cpp b/src/core/hle/service/glue/glue.cpp index a08dc9758..b24d469cf 100644 --- a/src/core/hle/service/glue/glue.cpp +++ b/src/core/hle/service/glue/glue.cpp @@ -8,6 +8,7 @@ #include "core/hle/service/glue/bgtc.h" #include "core/hle/service/glue/ectx.h" #include "core/hle/service/glue/glue.h" +#include "core/hle/service/glue/notif.h" namespace Service::Glue { @@ -24,6 +25,9 @@ void InstallInterfaces(Core::System& system) { // Error Context std::make_shared<ECTX_AW>(system)->InstallAsService(system.ServiceManager()); + + // Notification Services for application + std::make_shared<NOTIF_A>(system)->InstallAsService(system.ServiceManager()); } } // namespace Service::Glue diff --git a/src/core/hle/service/glue/notif.cpp b/src/core/hle/service/glue/notif.cpp new file mode 100644 index 000000000..c559ec9df --- /dev/null +++ b/src/core/hle/service/glue/notif.cpp @@ -0,0 +1,44 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/ipc_helpers.h" +#include "core/hle/service/glue/notif.h" + +namespace Service::Glue { + +NOTIF_A::NOTIF_A(Core::System& system_) : ServiceFramework{system_, "notif:a"} { + // clang-format off + static const FunctionInfo functions[] = { + {500, nullptr, "RegisterAlarmSetting"}, + {510, nullptr, "UpdateAlarmSetting"}, + {520, &NOTIF_A::ListAlarmSettings, "ListAlarmSettings"}, + {530, nullptr, "LoadApplicationParameter"}, + {540, nullptr, "DeleteAlarmSetting"}, + {1000, &NOTIF_A::Initialize, "Initialize"}, + }; + // clang-format on + + RegisterHandlers(functions); +} + +NOTIF_A::~NOTIF_A() = default; + +void NOTIF_A::ListAlarmSettings(Kernel::HLERequestContext& ctx) { + // Returns an array of AlarmSetting + constexpr s32 alarm_count = 0; + + LOG_WARNING(Service_NOTIF, "(STUBBED) called"); + + IPC::ResponseBuilder rb{ctx, 3}; + rb.Push(ResultSuccess); + rb.Push(alarm_count); +} + +void NOTIF_A::Initialize(Kernel::HLERequestContext& ctx) { + LOG_WARNING(Service_NOTIF, "(STUBBED) called"); + IPC::ResponseBuilder rb{ctx, 2}; + rb.Push(ResultSuccess); +} + +} // namespace Service::Glue diff --git a/src/core/hle/service/glue/notif.h b/src/core/hle/service/glue/notif.h new file mode 100644 index 000000000..6ecf2015c --- /dev/null +++ b/src/core/hle/service/glue/notif.h @@ -0,0 +1,25 @@ +// Copyright 2021 yuzu Emulator Project +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include "core/hle/service/service.h" + +namespace Core { +class System; +} + +namespace Service::Glue { + +class NOTIF_A final : public ServiceFramework<NOTIF_A> { +public: + explicit NOTIF_A(Core::System& system_); + ~NOTIF_A() override; + +private: + void ListAlarmSettings(Kernel::HLERequestContext& ctx); + void Initialize(Kernel::HLERequestContext& ctx); +}; + +} // namespace Service::Glue diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp index ae56f10cf..e5c951e06 100644 --- a/src/core/hle/service/hid/controllers/npad.cpp +++ b/src/core/hle/service/hid/controllers/npad.cpp @@ -66,9 +66,9 @@ Controller_NPad::Controller_NPad(Core::HID::HIDCore& hid_core_, auto& controller = controller_data[i]; controller.device = hid_core.GetEmulatedControllerByIndex(i); controller.vibration[Core::HID::EmulatedDeviceIndex::LeftIndex].latest_vibration_value = - DEFAULT_VIBRATION_VALUE; + Core::HID::DEFAULT_VIBRATION_VALUE; controller.vibration[Core::HID::EmulatedDeviceIndex::RightIndex].latest_vibration_value = - DEFAULT_VIBRATION_VALUE; + Core::HID::DEFAULT_VIBRATION_VALUE; Core::HID::ControllerUpdateCallback engine_callback{ .on_change = [this, i](Core::HID::ControllerTriggerType type) { ControllerUpdate(type, i); }, @@ -110,7 +110,7 @@ void Controller_NPad::ControllerUpdate(Core::HID::ControllerTriggerType type, UpdateControllerAt(npad_type, npad_id, is_connected); break; case Core::HID::ControllerTriggerType::Battery: { - if (!controller.is_connected) { + if (!controller.device->IsConnected()) { return; } auto& shared_memory = controller.shared_memory_entry; @@ -150,7 +150,6 @@ void Controller_NPad::InitNewlyAddedController(Core::HID::NpadIdType npad_id) { shared_memory.system_properties.is_vertical.Assign(1); shared_memory.system_properties.use_plus.Assign(1); shared_memory.system_properties.use_minus.Assign(1); - shared_memory.assignment_mode = NpadJoyAssignmentMode::Single; shared_memory.applet_footer.type = AppletFooterUiType::SwitchProController; break; case Core::HID::NpadStyleIndex::Handheld: @@ -166,21 +165,30 @@ void Controller_NPad::InitNewlyAddedController(Core::HID::NpadIdType npad_id) { break; case Core::HID::NpadStyleIndex::JoyconDual: shared_memory.style_tag.joycon_dual.Assign(1); - shared_memory.device_type.joycon_left.Assign(1); - shared_memory.device_type.joycon_right.Assign(1); - shared_memory.system_properties.is_vertical.Assign(1); - shared_memory.system_properties.use_plus.Assign(1); - shared_memory.system_properties.use_minus.Assign(1); + if (controller.is_dual_left_connected) { + shared_memory.device_type.joycon_left.Assign(1); + shared_memory.system_properties.use_minus.Assign(1); + } + if (controller.is_dual_right_connected) { + shared_memory.device_type.joycon_right.Assign(1); + shared_memory.system_properties.use_plus.Assign(1); + } shared_memory.system_properties.use_directional_buttons.Assign(1); + shared_memory.system_properties.is_vertical.Assign(1); shared_memory.assignment_mode = NpadJoyAssignmentMode::Dual; - shared_memory.applet_footer.type = AppletFooterUiType::JoyDual; + if (controller.is_dual_left_connected && controller.is_dual_right_connected) { + shared_memory.applet_footer.type = AppletFooterUiType::JoyDual; + } else if (controller.is_dual_left_connected) { + shared_memory.applet_footer.type = AppletFooterUiType::JoyDualLeftOnly; + } else { + shared_memory.applet_footer.type = AppletFooterUiType::JoyDualRightOnly; + } break; case Core::HID::NpadStyleIndex::JoyconLeft: shared_memory.style_tag.joycon_left.Assign(1); shared_memory.device_type.joycon_left.Assign(1); shared_memory.system_properties.is_horizontal.Assign(1); shared_memory.system_properties.use_minus.Assign(1); - shared_memory.assignment_mode = NpadJoyAssignmentMode::Single; shared_memory.applet_footer.type = AppletFooterUiType::JoyLeftHorizontal; break; case Core::HID::NpadStyleIndex::JoyconRight: @@ -188,7 +196,6 @@ void Controller_NPad::InitNewlyAddedController(Core::HID::NpadIdType npad_id) { shared_memory.device_type.joycon_right.Assign(1); shared_memory.system_properties.is_horizontal.Assign(1); shared_memory.system_properties.use_plus.Assign(1); - shared_memory.assignment_mode = NpadJoyAssignmentMode::Single; shared_memory.applet_footer.type = AppletFooterUiType::JoyRightHorizontal; break; case Core::HID::NpadStyleIndex::GameCube: @@ -200,7 +207,6 @@ void Controller_NPad::InitNewlyAddedController(Core::HID::NpadIdType npad_id) { case Core::HID::NpadStyleIndex::Pokeball: shared_memory.style_tag.palma.Assign(1); shared_memory.device_type.palma.Assign(1); - shared_memory.assignment_mode = NpadJoyAssignmentMode::Single; break; case Core::HID::NpadStyleIndex::NES: shared_memory.style_tag.lark.Assign(1); @@ -443,11 +449,15 @@ void Controller_NPad::OnUpdate(const Core::Timing::CoreTiming& core_timing, u8* case Core::HID::NpadStyleIndex::JoyconDual: pad_state.connection_status.raw = 0; pad_state.connection_status.is_connected.Assign(1); - pad_state.connection_status.is_left_connected.Assign(1); - pad_state.connection_status.is_right_connected.Assign(1); + if (controller.is_dual_left_connected) { + pad_state.connection_status.is_left_connected.Assign(1); + libnx_state.connection_status.is_left_connected.Assign(1); + } + if (controller.is_dual_right_connected) { + pad_state.connection_status.is_right_connected.Assign(1); + libnx_state.connection_status.is_right_connected.Assign(1); + } - libnx_state.connection_status.is_left_connected.Assign(1); - libnx_state.connection_status.is_right_connected.Assign(1); pad_state.sampling_number = npad.joy_dual_lifo.ReadCurrentEntry().state.sampling_number + 1; npad.joy_dual_lifo.WriteNextEntry(pad_state); @@ -687,7 +697,7 @@ Controller_NPad::NpadCommunicationMode Controller_NPad::GetNpadCommunicationMode return communication_mode; } -void Controller_NPad::SetNpadMode(Core::HID::NpadIdType npad_id, +void Controller_NPad::SetNpadMode(Core::HID::NpadIdType npad_id, NpadJoyDeviceType npad_device_type, NpadJoyAssignmentMode assignment_mode) { if (!IsNpadIdValid(npad_id)) { LOG_ERROR(Service_HID, "Invalid NpadIdType npad_id:{}", npad_id); @@ -698,6 +708,62 @@ void Controller_NPad::SetNpadMode(Core::HID::NpadIdType npad_id, if (controller.shared_memory_entry.assignment_mode != assignment_mode) { controller.shared_memory_entry.assignment_mode = assignment_mode; } + + if (!controller.device->IsConnected()) { + return; + } + + if (assignment_mode == NpadJoyAssignmentMode::Dual) { + if (controller.device->GetNpadStyleIndex() == Core::HID::NpadStyleIndex::JoyconLeft) { + DisconnectNpad(npad_id); + controller.is_dual_left_connected = true; + controller.is_dual_right_connected = false; + UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconDual, npad_id, true); + return; + } + if (controller.device->GetNpadStyleIndex() == Core::HID::NpadStyleIndex::JoyconRight) { + DisconnectNpad(npad_id); + controller.is_dual_left_connected = false; + controller.is_dual_right_connected = true; + UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconDual, npad_id, true); + return; + } + return; + } + + // This is for NpadJoyAssignmentMode::Single + + // Only JoyconDual get affected by this function + if (controller.device->GetNpadStyleIndex() != Core::HID::NpadStyleIndex::JoyconDual) { + return; + } + + if (controller.is_dual_left_connected && !controller.is_dual_right_connected) { + DisconnectNpad(npad_id); + UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconLeft, npad_id, true); + return; + } + if (!controller.is_dual_left_connected && controller.is_dual_right_connected) { + DisconnectNpad(npad_id); + UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconRight, npad_id, true); + return; + } + + // We have two controllers connected to the same npad_id we need to split them + const auto npad_id_2 = hid_core.GetFirstDisconnectedNpadId(); + auto& controller_2 = GetControllerFromNpadIdType(npad_id_2); + DisconnectNpad(npad_id); + if (npad_device_type == NpadJoyDeviceType::Left) { + UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconLeft, npad_id, true); + controller_2.is_dual_left_connected = false; + controller_2.is_dual_right_connected = true; + UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconDual, npad_id_2, true); + } else { + UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconRight, npad_id, true); + controller_2.is_dual_left_connected = true; + controller_2.is_dual_right_connected = false; + UpdateControllerAt(Core::HID::NpadStyleIndex::JoyconDual, npad_id_2, true); + } } bool Controller_NPad::VibrateControllerAtIndex(Core::HID::NpadIdType npad_id, @@ -715,7 +781,8 @@ bool Controller_NPad::VibrateControllerAtIndex(Core::HID::NpadIdType npad_id, Core::HID::VibrationValue vibration{0.0f, 160.0f, 0.0f, 320.0f}; controller.device->SetVibration(device_index, vibration); // Then reset the vibration value to its default value. - controller.vibration[device_index].latest_vibration_value = DEFAULT_VIBRATION_VALUE; + controller.vibration[device_index].latest_vibration_value = + Core::HID::DEFAULT_VIBRATION_VALUE; } return false; @@ -907,6 +974,7 @@ void Controller_NPad::DisconnectNpad(Core::HID::NpadIdType npad_id) { } auto& shared_memory_entry = controller.shared_memory_entry; + // Don't reset shared_memory_entry.assignment_mode this value is persistent shared_memory_entry.style_tag.raw = Core::HID::NpadStyleSet::None; // Zero out shared_memory_entry.device_type.raw = 0; shared_memory_entry.system_properties.raw = 0; @@ -923,9 +991,10 @@ void Controller_NPad::DisconnectNpad(Core::HID::NpadIdType npad_id) { .left = {}, .right = {}, }; - shared_memory_entry.assignment_mode = NpadJoyAssignmentMode::Dual; shared_memory_entry.applet_footer.type = AppletFooterUiType::None; + controller.is_dual_left_connected = true; + controller.is_dual_right_connected = true; controller.is_connected = false; controller.device->Disconnect(); SignalStyleSetChangedEvent(npad_id); @@ -1022,19 +1091,70 @@ void Controller_NPad::MergeSingleJoyAsDualJoy(Core::HID::NpadIdType npad_id_1, npad_id_2); return; } - auto& controller_1 = GetControllerFromNpadIdType(npad_id_1).device; - auto& controller_2 = GetControllerFromNpadIdType(npad_id_2).device; + auto& controller_1 = GetControllerFromNpadIdType(npad_id_1); + auto& controller_2 = GetControllerFromNpadIdType(npad_id_2); + const auto controller_style_1 = controller_1.device->GetNpadStyleIndex(); + const auto controller_style_2 = controller_2.device->GetNpadStyleIndex(); + bool merge_controllers = false; // If the controllers at both npad indices form a pair of left and right joycons, merge them. // Otherwise, do nothing. - if ((controller_1->GetNpadStyleIndex() == Core::HID::NpadStyleIndex::JoyconLeft && - controller_2->GetNpadStyleIndex() == Core::HID::NpadStyleIndex::JoyconRight) || - (controller_2->GetNpadStyleIndex() == Core::HID::NpadStyleIndex::JoyconLeft && - controller_1->GetNpadStyleIndex() == Core::HID::NpadStyleIndex::JoyconRight)) { + if (controller_style_1 == Core::HID::NpadStyleIndex::JoyconLeft && + controller_style_2 == Core::HID::NpadStyleIndex::JoyconRight) { + merge_controllers = true; + } + if (controller_style_2 == Core::HID::NpadStyleIndex::JoyconLeft && + controller_style_1 == Core::HID::NpadStyleIndex::JoyconRight) { + merge_controllers = true; + } + if (controller_style_1 == Core::HID::NpadStyleIndex::JoyconDual && + controller_style_2 == Core::HID::NpadStyleIndex::JoyconRight && + controller_1.is_dual_left_connected && !controller_1.is_dual_right_connected) { + merge_controllers = true; + } + if (controller_style_1 == Core::HID::NpadStyleIndex::JoyconDual && + controller_style_2 == Core::HID::NpadStyleIndex::JoyconLeft && + !controller_1.is_dual_left_connected && controller_1.is_dual_right_connected) { + merge_controllers = true; + } + if (controller_style_2 == Core::HID::NpadStyleIndex::JoyconDual && + controller_style_1 == Core::HID::NpadStyleIndex::JoyconRight && + controller_2.is_dual_left_connected && !controller_2.is_dual_right_connected) { + merge_controllers = true; + } + if (controller_style_2 == Core::HID::NpadStyleIndex::JoyconDual && + controller_style_1 == Core::HID::NpadStyleIndex::JoyconLeft && + !controller_2.is_dual_left_connected && controller_2.is_dual_right_connected) { + merge_controllers = true; + } + if (controller_style_1 == Core::HID::NpadStyleIndex::JoyconDual && + controller_style_2 == Core::HID::NpadStyleIndex::JoyconDual && + controller_1.is_dual_left_connected && !controller_1.is_dual_right_connected && + !controller_2.is_dual_left_connected && controller_2.is_dual_right_connected) { + merge_controllers = true; + } + if (controller_style_1 == Core::HID::NpadStyleIndex::JoyconDual && + controller_style_2 == Core::HID::NpadStyleIndex::JoyconDual && + !controller_1.is_dual_left_connected && controller_1.is_dual_right_connected && + controller_2.is_dual_left_connected && !controller_2.is_dual_right_connected) { + merge_controllers = true; + } + + if (merge_controllers) { // Disconnect the joycon at the second id and connect the dual joycon at the first index. DisconnectNpad(npad_id_2); + controller_1.is_dual_left_connected = true; + controller_1.is_dual_right_connected = true; AddNewControllerAt(Core::HID::NpadStyleIndex::JoyconDual, npad_id_1); + return; } + LOG_WARNING(Service_HID, + "Controllers can't be merged npad_id_1:{}, npad_id_2:{}, type_1:{}, type_2:{}, " + "dual_1(left/right):{}/{}, dual_2(left/right):{}/{}", + npad_id_1, npad_id_2, controller_1.device->GetNpadStyleIndex(), + controller_2.device->GetNpadStyleIndex(), controller_1.is_dual_left_connected, + controller_1.is_dual_right_connected, controller_2.is_dual_left_connected, + controller_2.is_dual_right_connected); } void Controller_NPad::StartLRAssignmentMode() { diff --git a/src/core/hle/service/hid/controllers/npad.h b/src/core/hle/service/hid/controllers/npad.h index de5fa5a64..6b2872bad 100644 --- a/src/core/hle/service/hid/controllers/npad.h +++ b/src/core/hle/service/hid/controllers/npad.h @@ -90,13 +90,6 @@ public: Default = 3, }; - static constexpr Core::HID::VibrationValue DEFAULT_VIBRATION_VALUE{ - .low_amplitude = 0.0f, - .low_frequency = 160.0f, - .high_amplitude = 0.0f, - .high_frequency = 320.0f, - }; - void SetSupportedStyleSet(Core::HID::NpadStyleTag style_set); Core::HID::NpadStyleTag GetSupportedStyleSet() const; @@ -113,7 +106,8 @@ public: void SetNpadCommunicationMode(NpadCommunicationMode communication_mode_); NpadCommunicationMode GetNpadCommunicationMode() const; - void SetNpadMode(Core::HID::NpadIdType npad_id, NpadJoyAssignmentMode assignment_mode); + void SetNpadMode(Core::HID::NpadIdType npad_id, NpadJoyDeviceType npad_device_type, + NpadJoyAssignmentMode assignment_mode); bool VibrateControllerAtIndex(Core::HID::NpadIdType npad_id, std::size_t device_index, const Core::HID::VibrationValue& vibration_value); @@ -464,7 +458,10 @@ private: std::array<VibrationData, 2> vibration{}; bool unintended_home_button_input_protection{}; bool is_connected{}; - Core::HID::NpadStyleIndex npad_type{Core::HID::NpadStyleIndex::None}; + + // Dual joycons can have only one side connected + bool is_dual_left_connected{true}; + bool is_dual_right_connected{true}; // Motion parameters bool sixaxis_at_rest{true}; diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp index 14f737d39..6e12381fb 100644 --- a/src/core/hle/service/hid/hid.cpp +++ b/src/core/hle/service/hid/hid.cpp @@ -975,35 +975,35 @@ void Hid::SetNpadJoyAssignmentModeSingleByDefault(Kernel::HLERequestContext& ctx const auto parameters{rp.PopRaw<Parameters>()}; applet_resource->GetController<Controller_NPad>(HidController::NPad) - .SetNpadMode(parameters.npad_id, Controller_NPad::NpadJoyAssignmentMode::Single); + .SetNpadMode(parameters.npad_id, Controller_NPad::NpadJoyDeviceType::Left, + Controller_NPad::NpadJoyAssignmentMode::Single); - LOG_WARNING(Service_HID, "(STUBBED) called, npad_id={}, applet_resource_user_id={}", - parameters.npad_id, parameters.applet_resource_user_id); + LOG_INFO(Service_HID, "called, npad_id={}, applet_resource_user_id={}", parameters.npad_id, + parameters.applet_resource_user_id); IPC::ResponseBuilder rb{ctx, 2}; rb.Push(ResultSuccess); } void Hid::SetNpadJoyAssignmentModeSingle(Kernel::HLERequestContext& ctx) { - // TODO: Check the differences between this and SetNpadJoyAssignmentModeSingleByDefault IPC::RequestParser rp{ctx}; struct Parameters { Core::HID::NpadIdType npad_id; INSERT_PADDING_WORDS_NOINIT(1); u64 applet_resource_user_id; - u64 npad_joy_device_type; + Controller_NPad::NpadJoyDeviceType npad_joy_device_type; }; static_assert(sizeof(Parameters) == 0x18, "Parameters has incorrect size."); const auto parameters{rp.PopRaw<Parameters>()}; applet_resource->GetController<Controller_NPad>(HidController::NPad) - .SetNpadMode(parameters.npad_id, Controller_NPad::NpadJoyAssignmentMode::Single); + .SetNpadMode(parameters.npad_id, parameters.npad_joy_device_type, + Controller_NPad::NpadJoyAssignmentMode::Single); - LOG_WARNING(Service_HID, - "(STUBBED) called, npad_id={}, applet_resource_user_id={}, npad_joy_device_type={}", - parameters.npad_id, parameters.applet_resource_user_id, - parameters.npad_joy_device_type); + LOG_INFO(Service_HID, "called, npad_id={}, applet_resource_user_id={}, npad_joy_device_type={}", + parameters.npad_id, parameters.applet_resource_user_id, + parameters.npad_joy_device_type); IPC::ResponseBuilder rb{ctx, 2}; rb.Push(ResultSuccess); @@ -1021,10 +1021,10 @@ void Hid::SetNpadJoyAssignmentModeDual(Kernel::HLERequestContext& ctx) { const auto parameters{rp.PopRaw<Parameters>()}; applet_resource->GetController<Controller_NPad>(HidController::NPad) - .SetNpadMode(parameters.npad_id, Controller_NPad::NpadJoyAssignmentMode::Dual); + .SetNpadMode(parameters.npad_id, {}, Controller_NPad::NpadJoyAssignmentMode::Dual); - LOG_WARNING(Service_HID, "(STUBBED) called, npad_id={}, applet_resource_user_id={}", - parameters.npad_id, parameters.applet_resource_user_id); + LOG_INFO(Service_HID, "called, npad_id={}, applet_resource_user_id={}", parameters.npad_id, + parameters.applet_resource_user_id); IPC::ResponseBuilder rb{ctx, 2}; rb.Push(ResultSuccess); @@ -1404,7 +1404,7 @@ void Hid::SendVibrationGcErmCommand(Kernel::HLERequestContext& ctx) { .high_frequency = 0.0f, }; default: - return Controller_NPad::DEFAULT_VIBRATION_VALUE; + return Core::HID::DEFAULT_VIBRATION_VALUE; } }(); diff --git a/src/core/hle/service/nfp/nfp.cpp b/src/core/hle/service/nfp/nfp.cpp index 693ffc71a..761d0d3c6 100644 --- a/src/core/hle/service/nfp/nfp.cpp +++ b/src/core/hle/service/nfp/nfp.cpp @@ -9,7 +9,6 @@ #include "core/core.h" #include "core/hle/ipc_helpers.h" #include "core/hle/kernel/k_event.h" -#include "core/hle/lock.h" #include "core/hle/service/nfp/nfp.h" #include "core/hle/service/nfp/nfp_user.h" @@ -337,7 +336,6 @@ void Module::Interface::CreateUserInterface(Kernel::HLERequestContext& ctx) { } bool Module::Interface::LoadAmiibo(const std::vector<u8>& buffer) { - std::lock_guard lock{HLE::g_hle_lock}; if (buffer.size() < sizeof(AmiiboFile)) { return false; } diff --git a/src/core/loader/kip.cpp b/src/core/loader/kip.cpp index 3ae9e6e0e..99ed34b00 100644 --- a/src/core/loader/kip.cpp +++ b/src/core/loader/kip.cpp @@ -71,7 +71,6 @@ AppLoader::LoadResult AppLoader_KIP::Load(Kernel::KProcess& process, kip->GetTitleID(), 0xFFFFFFFFFFFFFFFF, 0x1FE00000, kip->GetKernelCapabilities()); - const VAddr base_address = process.PageTable().GetCodeRegionStart(); Kernel::CodeSet codeset; Kernel::PhysicalMemory program_image; @@ -91,7 +90,14 @@ AppLoader::LoadResult AppLoader_KIP::Load(Kernel::KProcess& process, program_image.resize(PageAlignSize(kip->GetBSSOffset()) + kip->GetBSSSize()); codeset.DataSegment().size += kip->GetBSSSize(); + // Setup the process code layout + if (process.LoadFromMetadata(FileSys::ProgramMetadata::GetDefault(), program_image.size()) + .IsError()) { + return {ResultStatus::ErrorNotInitialized, {}}; + } + codeset.memory = std::move(program_image); + const VAddr base_address = process.PageTable().GetCodeRegionStart(); process.LoadModule(std::move(codeset), base_address); LOG_DEBUG(Loader, "loaded module {} @ 0x{:X}", kip->GetName(), base_address); |