diff options
Diffstat (limited to 'src/core')
19 files changed, 550 insertions, 40 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 8e3fd4505..3ef19f9c2 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -550,6 +550,12 @@ add_library(core STATIC hle/service/ns/ns.h hle/service/ns/pdm_qry.cpp hle/service/ns/pdm_qry.h + hle/service/nvdrv/core/container.cpp + hle/service/nvdrv/core/container.h + hle/service/nvdrv/core/nvmap.cpp + hle/service/nvdrv/core/nvmap.h + hle/service/nvdrv/core/syncpoint_manager.cpp + hle/service/nvdrv/core/syncpoint_manager.h hle/service/nvdrv/devices/nvdevice.h hle/service/nvdrv/devices/nvdisp_disp0.cpp hle/service/nvdrv/devices/nvdisp_disp0.h @@ -578,8 +584,6 @@ add_library(core STATIC hle/service/nvdrv/nvdrv_interface.h hle/service/nvdrv/nvmemp.cpp hle/service/nvdrv/nvmemp.h - hle/service/nvdrv/syncpoint_manager.cpp - hle/service/nvdrv/syncpoint_manager.h hle/service/nvflinger/binder.h hle/service/nvflinger/buffer_item.h hle/service/nvflinger/buffer_item_consumer.cpp diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp new file mode 100644 index 000000000..97b5b2c86 --- /dev/null +++ b/src/core/hle/service/nvdrv/core/container.cpp @@ -0,0 +1,41 @@ +// Copyright 2021 yuzu emulator team +// Copyright 2021 Skyline Team and Contributors (https://github.com/skyline-emu/) +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "core/hle/service/nvdrv/core/container.h" +#include "core/hle/service/nvdrv/core/nvmap.h" +#include "core/hle/service/nvdrv/core/syncpoint_manager.h" +#include "video_core/gpu.h" + +namespace Service::Nvidia::NvCore { + +struct ContainerImpl { + ContainerImpl(Tegra::GPU& gpu_) : file{}, manager{gpu_} {} + NvMap file; + SyncpointManager manager; +}; + +Container::Container(Tegra::GPU& gpu_) { + impl = std::make_unique<ContainerImpl>(gpu_); +} + +Container::~Container() = default; + +NvMap& Container::GetNvMapFile() { + return impl->file; +} + +const NvMap& Container::GetNvMapFile() const { + return impl->file; +} + +SyncpointManager& Container::GetSyncpointManager() { + return impl->manager; +} + +const SyncpointManager& Container::GetSyncpointManager() const { + return impl->manager; +} + +} // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/core/container.h b/src/core/hle/service/nvdrv/core/container.h new file mode 100644 index 000000000..91ac2305a --- /dev/null +++ b/src/core/hle/service/nvdrv/core/container.h @@ -0,0 +1,38 @@ +// Copyright 2021 yuzu emulator team +// Copyright 2021 Skyline Team and Contributors (https://github.com/skyline-emu/) +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <memory> + +namespace Tegra { +class GPU; +} + +namespace Service::Nvidia::NvCore { + +class NvMap; +class SyncpointManager; + +struct ContainerImpl; + +class Container { +public: + Container(Tegra::GPU& gpu_); + ~Container(); + + NvMap& GetNvMapFile(); + + const NvMap& GetNvMapFile() const; + + SyncpointManager& GetSyncpointManager(); + + const SyncpointManager& GetSyncpointManager() const; + +private: + std::unique_ptr<ContainerImpl> impl; +}; + +} // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp new file mode 100644 index 000000000..d3f227f52 --- /dev/null +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -0,0 +1,245 @@ +// Copyright 2021 Skyline Team and Contributors (https://github.com/skyline-emu/) +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#include "common/alignment.h" +#include "common/assert.h" +#include "common/logging/log.h" +#include "core/hle/service/nvdrv/core/nvmap.h" +#include "core/memory.h" + +using Core::Memory::YUZU_PAGESIZE; + +namespace Service::Nvidia::NvCore { +NvMap::Handle::Handle(u64 size, Id id) : size(size), aligned_size(size), orig_size(size), id(id) {} + +NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) { + std::scoped_lock lock(mutex); + + // Handles cannot be allocated twice + if (allocated) [[unlikely]] + return NvResult::AccessDenied; + + flags = pFlags; + kind = pKind; + align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign; + + // This flag is only applicable for handles with an address passed + if (pAddress) + flags.keep_uncached_after_free = 0; + else + LOG_CRITICAL(Service_NVDRV, + "Mapping nvmap handles without a CPU side address is unimplemented!"); + + size = Common::AlignUp(size, YUZU_PAGESIZE); + aligned_size = Common::AlignUp(size, align); + address = pAddress; + + // TODO: pin init + + allocated = true; + + return NvResult::Success; +} + +NvResult NvMap::Handle::Duplicate(bool internal_session) { + // Unallocated handles cannot be duplicated as duplication requires memory accounting (in HOS) + if (!allocated) [[unlikely]] + return NvResult::BadValue; + + std::scoped_lock lock(mutex); + + // If we internally use FromId the duplication tracking of handles won't work accurately due to + // us not implementing per-process handle refs. + if (internal_session) + internal_dupes++; + else + dupes++; + + return NvResult::Success; +} + +NvMap::NvMap() = default; + +void NvMap::AddHandle(std::shared_ptr<Handle> handleDesc) { + std::scoped_lock lock(handles_lock); + + handles.emplace(handleDesc->id, std::move(handleDesc)); +} + +void NvMap::UnmapHandle(Handle& handleDesc) { + // Remove pending unmap queue entry if needed + if (handleDesc.unmap_queue_entry) { + unmap_queue.erase(*handleDesc.unmap_queue_entry); + handleDesc.unmap_queue_entry.reset(); + } + + // Free and unmap the handle from the SMMU + /* + state.soc->smmu.Unmap(handleDesc.pin_virt_address, static_cast<u32>(handleDesc.aligned_size)); + smmuAllocator.Free(handleDesc.pin_virt_address, static_cast<u32>(handleDesc.aligned_size)); + handleDesc.pin_virt_address = 0; + */ +} + +bool NvMap::TryRemoveHandle(const Handle& handleDesc) { + // No dupes left, we can remove from handle map + if (handleDesc.dupes == 0 && handleDesc.internal_dupes == 0) { + std::scoped_lock lock(handles_lock); + + auto it{handles.find(handleDesc.id)}; + if (it != handles.end()) + handles.erase(it); + + return true; + } else { + return false; + } +} + +NvResult NvMap::CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_out) { + if (!size) [[unlikely]] + return NvResult::BadValue; + + u32 id{next_handle_id.fetch_add(HandleIdIncrement, std::memory_order_relaxed)}; + auto handleDesc{std::make_shared<Handle>(size, id)}; + AddHandle(handleDesc); + + result_out = handleDesc; + return NvResult::Success; +} + +std::shared_ptr<NvMap::Handle> NvMap::GetHandle(Handle::Id handle) { + std::scoped_lock lock(handles_lock); + try { + return handles.at(handle); + } catch ([[maybe_unused]] std::out_of_range& e) { + return nullptr; + } +} + +u32 NvMap::PinHandle(NvMap::Handle::Id handle) { + UNIMPLEMENTED_MSG("pinning"); + return 0; + /* + auto handleDesc{GetHandle(handle)}; + if (!handleDesc) + [[unlikely]] return 0; + + std::scoped_lock lock(handleDesc->mutex); + if (!handleDesc->pins) { + // If we're in the unmap queue we can just remove ourselves and return since we're already + // mapped + { + // Lock now to prevent our queue entry from being removed for allocation in-between the + // following check and erase + std::scoped_lock queueLock(unmap_queue_lock); + if (handleDesc->unmap_queue_entry) { + unmap_queue.erase(*handleDesc->unmap_queue_entry); + handleDesc->unmap_queue_entry.reset(); + + handleDesc->pins++; + return handleDesc->pin_virt_address; + } + } + + // If not then allocate some space and map it + u32 address{}; + while (!(address = smmuAllocator.Allocate(static_cast<u32>(handleDesc->aligned_size)))) { + // Free handles until the allocation succeeds + std::scoped_lock queueLock(unmap_queue_lock); + if (auto freeHandleDesc{unmap_queue.front()}) { + // Handles in the unmap queue are guaranteed not to be pinned so don't bother + // checking if they are before unmapping + std::scoped_lock freeLock(freeHandleDesc->mutex); + if (handleDesc->pin_virt_address) + UnmapHandle(*freeHandleDesc); + } else { + LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!"); + } + } + + state.soc->smmu.Map(address, handleDesc->GetPointer(), + static_cast<u32>(handleDesc->aligned_size)); + handleDesc->pin_virt_address = address; + } + + handleDesc->pins++; + return handleDesc->pin_virt_address; + */ +} + +void NvMap::UnpinHandle(Handle::Id handle) { + UNIMPLEMENTED_MSG("Unpinning"); + /* + auto handleDesc{GetHandle(handle)}; + if (!handleDesc) + return; + + std::scoped_lock lock(handleDesc->mutex); + if (--handleDesc->pins < 0) { + LOG_WARNING(Service_NVDRV, "Pin count imbalance detected!"); + } else if (!handleDesc->pins) { + std::scoped_lock queueLock(unmap_queue_lock); + + // Add to the unmap queue allowing this handle's memory to be freed if needed + unmap_queue.push_back(handleDesc); + handleDesc->unmap_queue_entry = std::prev(unmap_queue.end()); + } + */ +} + +std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool internal_session) { + std::weak_ptr<Handle> hWeak{GetHandle(handle)}; + FreeInfo freeInfo; + + // We use a weak ptr here so we can tell when the handle has been freed and report that back to + // guest + if (auto handleDesc = hWeak.lock()) { + std::scoped_lock lock(handleDesc->mutex); + + if (internal_session) { + if (--handleDesc->internal_dupes < 0) + LOG_WARNING(Service_NVDRV, "Internal duplicate count imbalance detected!"); + } else { + if (--handleDesc->dupes < 0) { + LOG_WARNING(Service_NVDRV, "User duplicate count imbalance detected!"); + } else if (handleDesc->dupes == 0) { + // Force unmap the handle + if (handleDesc->pin_virt_address) { + std::scoped_lock queueLock(unmap_queue_lock); + UnmapHandle(*handleDesc); + } + + handleDesc->pins = 0; + } + } + + // Try to remove the shared ptr to the handle from the map, if nothing else is using the + // handle then it will now be freed when `handleDesc` goes out of scope + if (TryRemoveHandle(*handleDesc)) + LOG_ERROR(Service_NVDRV, "Removed nvmap handle: {}", handle); + else + LOG_ERROR(Service_NVDRV, + "Tried to free nvmap handle: {} but didn't as it still has duplicates", + handle); + + freeInfo = { + .address = handleDesc->address, + .size = handleDesc->size, + .was_uncached = handleDesc->flags.map_uncached.Value() != 0, + }; + } else { + return std::nullopt; + } + + // Handle hasn't been freed from memory, set address to 0 to mark that the handle wasn't freed + if (!hWeak.expired()) { + LOG_ERROR(Service_NVDRV, "nvmap handle: {} wasn't freed as it is still in use", handle); + freeInfo.address = 0; + } + + return freeInfo; +} + +} // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h new file mode 100644 index 000000000..e47aa755d --- /dev/null +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -0,0 +1,155 @@ +// Copyright 2021 Skyline Team and Contributors (https://github.com/skyline-emu/) +// Licensed under GPLv2 or any later version +// Refer to the license.txt file included. + +#pragma once + +#include <list> +#include <memory> +#include <mutex> +#include <optional> +#include <unordered_map> +#include <assert.h> + +#include "common/bit_field.h" +#include "common/common_types.h" +#include "core/hle/service/nvdrv/nvdata.h" + +namespace Service::Nvidia::NvCore { +/** + * @brief The nvmap core class holds the global state for nvmap and provides methods to manage + * handles + */ +class NvMap { +public: + /** + * @brief A handle to a contiguous block of memory in an application's address space + */ + struct Handle { + std::mutex mutex; + + u64 align{}; //!< The alignment to use when pinning the handle onto the SMMU + u64 size; //!< Page-aligned size of the memory the handle refers to + u64 aligned_size; //!< `align`-aligned size of the memory the handle refers to + u64 orig_size; //!< Original unaligned size of the memory this handle refers to + + s32 dupes{1}; //!< How many guest references there are to this handle + s32 internal_dupes{0}; //!< How many emulator-internal references there are to this handle + + using Id = u32; + Id id; //!< A globally unique identifier for this handle + + s32 pins{}; + u32 pin_virt_address{}; + std::optional<typename std::list<std::shared_ptr<Handle>>::iterator> unmap_queue_entry{}; + + union Flags { + BitField<0, 1, u32> map_uncached; //!< If the handle should be mapped as uncached + BitField<2, 1, u32> keep_uncached_after_free; //!< Only applicable when the handle was + //!< allocated with a fixed address + BitField<4, 1, u32> _unk0_; //!< Passed to IOVMM for pins + } flags{}; + static_assert(sizeof(Flags) == sizeof(u32)); + + u64 address{}; //!< The memory location in the guest's AS that this handle corresponds to, + //!< this can also be in the nvdrv tmem + bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC + //!< call + + u8 kind{}; //!< Used for memory compression + bool allocated{}; //!< If the handle has been allocated with `Alloc` + + Handle(u64 size, Id id); + + /** + * @brief Sets up the handle with the given memory config, can allocate memory from the tmem + * if a 0 address is passed + */ + [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress); + + /** + * @brief Increases the dupe counter of the handle for the given session + */ + [[nodiscard]] NvResult Duplicate(bool internal_session); + + /** + * @brief Obtains a pointer to the handle's memory and marks the handle it as having been + * mapped + */ + u8* GetPointer() { + if (!address) { + return nullptr; + } + + is_shared_mem_mapped = true; + return reinterpret_cast<u8*>(address); + } + }; + +private: + std::list<std::shared_ptr<Handle>> unmap_queue; + std::mutex unmap_queue_lock; //!< Protects access to `unmap_queue` + + std::unordered_map<Handle::Id, std::shared_ptr<Handle>> handles; //!< Main owning map of handles + std::mutex handles_lock; //!< Protects access to `handles` + + static constexpr u32 HandleIdIncrement{ + 4}; //!< Each new handle ID is an increment of 4 from the previous + std::atomic<u32> next_handle_id{HandleIdIncrement}; + + void AddHandle(std::shared_ptr<Handle> handle); + + /** + * @brief Unmaps and frees the SMMU memory region a handle is mapped to + * @note Both `unmap_queue_lock` and `handleDesc.mutex` MUST be locked when calling this + */ + void UnmapHandle(Handle& handleDesc); + + /** + * @brief Removes a handle from the map taking its dupes into account + * @note handleDesc.mutex MUST be locked when calling this + * @return If the handle was removed from the map + */ + bool TryRemoveHandle(const Handle& handleDesc); + +public: + /** + * @brief Encapsulates the result of a FreeHandle operation + */ + struct FreeInfo { + u64 address; //!< Address the handle referred to before deletion + u64 size; //!< Page-aligned handle size + bool was_uncached; //!< If the handle was allocated as uncached + }; + + NvMap(); + + /** + * @brief Creates an unallocated handle of the given size + */ + [[nodiscard]] NvResult CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_out); + + std::shared_ptr<Handle> GetHandle(Handle::Id handle); + + /** + * @brief Maps a handle into the SMMU address space + * @note This operation is refcounted, the number of calls to this must eventually match the + * number of calls to `UnpinHandle` + * @return The SMMU virtual address that the handle has been mapped to + */ + u32 PinHandle(Handle::Id handle); + + /** + * @brief When this has been called an equal number of times to `PinHandle` for the supplied + * handle it will be added to a list of handles to be freed when necessary + */ + void UnpinHandle(Handle::Id handle); + + /** + * @brief Tries to free a handle and remove a single dupe + * @note If a handle has no dupes left and has no other users a FreeInfo struct will be returned + * describing the prior state of the handle + */ + std::optional<FreeInfo> FreeHandle(Handle::Id handle, bool internal_session); +}; +} // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/syncpoint_manager.cpp b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp index a6fa943e8..ff6cbb37e 100644 --- a/src/core/hle/service/nvdrv/syncpoint_manager.cpp +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include "common/assert.h" -#include "core/hle/service/nvdrv/syncpoint_manager.h" +#include "core/hle/service/nvdrv/core/syncpoint_manager.h" #include "video_core/gpu.h" -namespace Service::Nvidia { +namespace Service::Nvidia::NvCore { SyncpointManager::SyncpointManager(Tegra::GPU& gpu_) : gpu{gpu_} {} @@ -35,4 +35,4 @@ u32 SyncpointManager::IncreaseSyncpoint(u32 syncpoint_id, u32 value) { return GetSyncpointMax(syncpoint_id); } -} // namespace Service::Nvidia +} // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/syncpoint_manager.h b/src/core/hle/service/nvdrv/core/syncpoint_manager.h index 7f080f76e..cf7f0b4be 100644 --- a/src/core/hle/service/nvdrv/syncpoint_manager.h +++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.h @@ -13,7 +13,7 @@ namespace Tegra { class GPU; } -namespace Service::Nvidia { +namespace Service::Nvidia::NvCore { class SyncpointManager final { public: @@ -81,4 +81,4 @@ private: Tegra::GPU& gpu; }; -} // namespace Service::Nvidia +} // namespace Service::Nvidia::NvCore diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp index 55acd4f78..5e2155e6c 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp @@ -12,15 +12,17 @@ #include "core/core.h" #include "core/hle/kernel/k_event.h" #include "core/hle/kernel/k_writable_event.h" +#include "core/hle/service/nvdrv/core/container.h" +#include "core/hle/service/nvdrv/core/syncpoint_manager.h" #include "core/hle/service/nvdrv/devices/nvhost_ctrl.h" #include "video_core/gpu.h" namespace Service::Nvidia::Devices { nvhost_ctrl::nvhost_ctrl(Core::System& system_, EventInterface& events_interface_, - SyncpointManager& syncpoint_manager_) - : nvdevice{system_}, events_interface{events_interface_}, syncpoint_manager{ - syncpoint_manager_} {} + NvCore::Container& core_) + : nvdevice{system_}, events_interface{events_interface_}, core{core_}, + syncpoint_manager{core_.GetSyncpointManager()} {} nvhost_ctrl::~nvhost_ctrl() = default; NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h index d548a7827..9fd46ea5f 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h @@ -10,12 +10,17 @@ #include "core/hle/service/nvdrv/devices/nvdevice.h" #include "core/hle/service/nvdrv/nvdrv.h" +namespace Service::Nvidia::NvCore { +class Container; +class SyncpointManager; +} // namespace Service::Nvidia::NvCore + namespace Service::Nvidia::Devices { class nvhost_ctrl final : public nvdevice { public: explicit nvhost_ctrl(Core::System& system_, EventInterface& events_interface_, - SyncpointManager& syncpoint_manager_); + NvCore::Container& core); ~nvhost_ctrl() override; NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, @@ -145,7 +150,8 @@ private: NvResult FreeEvent(u32 slot); EventInterface& events_interface; - SyncpointManager& syncpoint_manager; + NvCore::Container& core; + NvCore::SyncpointManager& syncpoint_manager; }; } // namespace Service::Nvidia::Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp index e87fa5992..a480bfc47 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp @@ -5,9 +5,10 @@ #include "common/assert.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/hle/service/nvdrv/core/container.h" +#include "core/hle/service/nvdrv/core/syncpoint_manager.h" #include "core/hle/service/nvdrv/devices/nvhost_gpu.h" #include "core/hle/service/nvdrv/nvdrv.h" -#include "core/hle/service/nvdrv/syncpoint_manager.h" #include "core/memory.h" #include "video_core/gpu.h" @@ -22,10 +23,10 @@ Tegra::CommandHeader BuildFenceAction(Tegra::GPU::FenceOperation op, u32 syncpoi } // namespace nvhost_gpu::nvhost_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, - EventInterface& events_interface_, SyncpointManager& syncpoint_manager_) + EventInterface& events_interface_, NvCore::Container& core_) : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, events_interface{events_interface_}, - syncpoint_manager{syncpoint_manager_} { - channel_fence.id = syncpoint_manager_.AllocateSyncpoint(); + core{core_}, syncpoint_manager{core_.GetSyncpointManager()} { + channel_fence.id = syncpoint_manager.AllocateSyncpoint(); channel_fence.value = system_.GPU().GetSyncpointValue(channel_fence.id); sm_exception_breakpoint_int_report_event = events_interface.CreateNonCtrlEvent("GpuChannelSMExceptionBreakpointInt"); diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h index eb4936df0..4f73a7bae 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h @@ -14,7 +14,12 @@ #include "video_core/dma_pusher.h" namespace Service::Nvidia { + +namespace NvCore { +class Container; class SyncpointManager; +} // namespace NvCore + class EventInterface; } // namespace Service::Nvidia @@ -24,7 +29,7 @@ class nvmap; class nvhost_gpu final : public nvdevice { public: explicit nvhost_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, - EventInterface& events_interface_, SyncpointManager& syncpoint_manager_); + EventInterface& events_interface_, NvCore::Container& core); ~nvhost_gpu() override; NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, @@ -196,7 +201,8 @@ private: std::shared_ptr<nvmap> nvmap_dev; EventInterface& events_interface; - SyncpointManager& syncpoint_manager; + NvCore::Container& core; + NvCore::SyncpointManager& syncpoint_manager; NvFence channel_fence; // Events diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp index a7385fce8..2c9158c7c 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp @@ -11,8 +11,8 @@ namespace Service::Nvidia::Devices { nvhost_nvdec::nvhost_nvdec(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, - SyncpointManager& syncpoint_manager_) - : nvhost_nvdec_common{system_, std::move(nvmap_dev_), syncpoint_manager_} {} + NvCore::Container& core) + : nvhost_nvdec_common{system_, std::move(nvmap_dev_), core} {} nvhost_nvdec::~nvhost_nvdec() = default; NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h index 29b3e6a36..04da4a913 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h @@ -11,7 +11,7 @@ namespace Service::Nvidia::Devices { class nvhost_nvdec final : public nvhost_nvdec_common { public: explicit nvhost_nvdec(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, - SyncpointManager& syncpoint_manager_); + NvCore::Container& core); ~nvhost_nvdec() override; NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp index 8b2cd9bf1..5a9c59f37 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp @@ -8,9 +8,10 @@ #include "common/common_types.h" #include "common/logging/log.h" #include "core/core.h" +#include "core/hle/service/nvdrv/core/container.h" +#include "core/hle/service/nvdrv/core/syncpoint_manager.h" #include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h" #include "core/hle/service/nvdrv/devices/nvmap.h" -#include "core/hle/service/nvdrv/syncpoint_manager.h" #include "core/memory.h" #include "video_core/memory_manager.h" #include "video_core/renderer_base.h" @@ -45,8 +46,9 @@ std::size_t WriteVectors(std::vector<u8>& dst, const std::vector<T>& src, std::s } // Anonymous namespace nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, - SyncpointManager& syncpoint_manager_) - : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, syncpoint_manager{syncpoint_manager_} {} + NvCore::Container& core_) + : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, core{core_}, + syncpoint_manager{core.GetSyncpointManager()} {} nvhost_nvdec_common::~nvhost_nvdec_common() = default; NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector<u8>& input) { diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h index 12d39946d..cccc94a58 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h @@ -9,7 +9,11 @@ #include "core/hle/service/nvdrv/devices/nvdevice.h" namespace Service::Nvidia { + +namespace NvCore { class SyncpointManager; +class Container; +} // namespace NvCore namespace Devices { class nvmap; @@ -17,7 +21,7 @@ class nvmap; class nvhost_nvdec_common : public nvdevice { public: explicit nvhost_nvdec_common(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, - SyncpointManager& syncpoint_manager_); + NvCore::Container& core); ~nvhost_nvdec_common() override; protected: @@ -114,7 +118,8 @@ protected: s32_le nvmap_fd{}; u32_le submit_timeout{}; std::shared_ptr<nvmap> nvmap_dev; - SyncpointManager& syncpoint_manager; + NvCore::Container& core; + NvCore::SyncpointManager& syncpoint_manager; std::array<u32, MaxSyncPoints> device_syncpoints{}; }; }; // namespace Devices diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp index f58e8bada..66558c331 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp @@ -9,8 +9,8 @@ namespace Service::Nvidia::Devices { nvhost_vic::nvhost_vic(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, - SyncpointManager& syncpoint_manager_) - : nvhost_nvdec_common{system_, std::move(nvmap_dev_), syncpoint_manager_} {} + NvCore::Container& core) + : nvhost_nvdec_common{system_, std::move(nvmap_dev_), core} {} nvhost_vic::~nvhost_vic() = default; diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.h b/src/core/hle/service/nvdrv/devices/nvhost_vic.h index b41b195ae..6f9838b2d 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.h @@ -10,7 +10,7 @@ namespace Service::Nvidia::Devices { class nvhost_vic final : public nvhost_nvdec_common { public: explicit nvhost_vic(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, - SyncpointManager& syncpoint_manager_); + NvCore::Container& core); ~nvhost_vic(); NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp index df4656240..824c0e290 100644 --- a/src/core/hle/service/nvdrv/nvdrv.cpp +++ b/src/core/hle/service/nvdrv/nvdrv.cpp @@ -11,6 +11,7 @@ #include "core/hle/ipc_helpers.h" #include "core/hle/kernel/k_event.h" #include "core/hle/kernel/k_writable_event.h" +#include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/devices/nvdevice.h" #include "core/hle/service/nvdrv/devices/nvdisp_disp0.h" #include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" @@ -24,8 +25,8 @@ #include "core/hle/service/nvdrv/nvdrv.h" #include "core/hle/service/nvdrv/nvdrv_interface.h" #include "core/hle/service/nvdrv/nvmemp.h" -#include "core/hle/service/nvdrv/syncpoint_manager.h" #include "core/hle/service/nvflinger/nvflinger.h" +#include "video_core/gpu.h" namespace Service::Nvidia { @@ -75,6 +76,7 @@ void EventInterface::Create(u32 event_id) { const u64 mask = 1ULL << event_id; fails[event_id] = 0; events_mask |= mask; + assigned_syncpt[event_id] = 0; } void EventInterface::Free(u32 event_id) { @@ -135,22 +137,22 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger } Module::Module(Core::System& system) - : syncpoint_manager{system.GPU()}, service_context{system, "nvdrv"}, events_interface{*this} { + : service_context{system, "nvdrv"}, events_interface{*this}, container{system.GPU()} { auto nvmap_dev = std::make_shared<Devices::nvmap>(system); devices["/dev/nvhost-as-gpu"] = std::make_shared<Devices::nvhost_as_gpu>(system, nvmap_dev); - devices["/dev/nvhost-gpu"] = std::make_shared<Devices::nvhost_gpu>( - system, nvmap_dev, events_interface, syncpoint_manager); + devices["/dev/nvhost-gpu"] = + std::make_shared<Devices::nvhost_gpu>(system, nvmap_dev, events_interface, container); devices["/dev/nvhost-ctrl-gpu"] = std::make_shared<Devices::nvhost_ctrl_gpu>(system, events_interface); devices["/dev/nvmap"] = nvmap_dev; devices["/dev/nvdisp_disp0"] = std::make_shared<Devices::nvdisp_disp0>(system, nvmap_dev); devices["/dev/nvhost-ctrl"] = - std::make_shared<Devices::nvhost_ctrl>(system, events_interface, syncpoint_manager); + std::make_shared<Devices::nvhost_ctrl>(system, events_interface, container); devices["/dev/nvhost-nvdec"] = - std::make_shared<Devices::nvhost_nvdec>(system, nvmap_dev, syncpoint_manager); + std::make_shared<Devices::nvhost_nvdec>(system, nvmap_dev, container); devices["/dev/nvhost-nvjpg"] = std::make_shared<Devices::nvhost_nvjpg>(system); devices["/dev/nvhost-vic"] = - std::make_shared<Devices::nvhost_vic>(system, nvmap_dev, syncpoint_manager); + std::make_shared<Devices::nvhost_vic>(system, nvmap_dev, container); } Module::~Module() = default; diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h index d24b57539..96adf2ffb 100644 --- a/src/core/hle/service/nvdrv/nvdrv.h +++ b/src/core/hle/service/nvdrv/nvdrv.h @@ -12,8 +12,8 @@ #include "common/common_types.h" #include "core/hle/service/kernel_helpers.h" +#include "core/hle/service/nvdrv/core/container.h" #include "core/hle/service/nvdrv/nvdata.h" -#include "core/hle/service/nvdrv/syncpoint_manager.h" #include "core/hle/service/nvflinger/ui/fence.h" #include "core/hle/service/service.h" @@ -31,7 +31,10 @@ class NVFlinger; namespace Service::Nvidia { +namespace NvCore { +class Container; class SyncpointManager; +} // namespace NvCore namespace Devices { class nvdevice; @@ -126,9 +129,6 @@ public: private: friend class EventInterface; - /// Manages syncpoints on the host - SyncpointManager syncpoint_manager; - /// Id to use for the next open file descriptor. DeviceFD next_fd = 1; @@ -142,6 +142,9 @@ private: EventInterface events_interface; + /// Manages syncpoints on the host + NvCore::Container container; + void CreateEvent(u32 event_id); void FreeEvent(u32 event_id); }; |