diff options
Diffstat (limited to 'src/core/hle/service')
-rw-r--r-- | src/core/hle/service/hle_ipc.cpp | 2 | ||||
-rw-r--r-- | src/core/hle/service/nvdrv/core/container.cpp | 8 | ||||
-rw-r--r-- | src/core/hle/service/nvdrv/core/container.h | 8 | ||||
-rw-r--r-- | src/core/hle/service/nvdrv/core/heap_mapper.cpp | 25 | ||||
-rw-r--r-- | src/core/hle/service/nvdrv/core/nvmap.cpp | 8 | ||||
-rw-r--r-- | src/core/hle/service/nvdrv/core/nvmap.h | 11 | ||||
-rw-r--r-- | src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h | 2 | ||||
-rw-r--r-- | src/core/hle/service/nvdrv/devices/nvhost_vic.cpp | 2 | ||||
-rw-r--r-- | src/core/hle/service/nvdrv/devices/nvmap.cpp | 4 | ||||
-rw-r--r-- | src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp | 10 | ||||
-rw-r--r-- | src/core/hle/service/nvnflinger/fb_share_buffer_manager.h | 3 |
11 files changed, 52 insertions, 31 deletions
diff --git a/src/core/hle/service/hle_ipc.cpp b/src/core/hle/service/hle_ipc.cpp index 9f6274c7d..e491dd260 100644 --- a/src/core/hle/service/hle_ipc.cpp +++ b/src/core/hle/service/hle_ipc.cpp @@ -12,6 +12,7 @@ #include "common/common_types.h" #include "common/logging/log.h" #include "common/scratch_buffer.h" +#include "core/guest_memory.h" #include "core/hle/kernel/k_auto_object.h" #include "core/hle/kernel/k_handle_table.h" #include "core/hle/kernel/k_process.h" @@ -22,7 +23,6 @@ #include "core/hle/service/hle_ipc.h" #include "core/hle/service/ipc_helpers.h" #include "core/memory.h" -#include "core/guest_memory.h" namespace Service { diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp index 4d3a9d696..d04b7f5ff 100644 --- a/src/core/hle/service/nvdrv/core/container.cpp +++ b/src/core/hle/service/nvdrv/core/container.cpp @@ -16,6 +16,12 @@ namespace Service::Nvidia::NvCore { +Session::Session(size_t id_, Kernel::KProcess* process_, size_t smmu_id_) + : id{id_}, process{process_}, smmu_id{smmu_id_}, + has_preallocated_area{}, mapper{}, is_active{} {} + +Session::~Session() = default; + struct ContainerImpl { explicit ContainerImpl(Container& core, Tegra::Host1x::Host1x& host1x_) : host1x{host1x_}, file{core, host1x_}, manager{host1x_}, device_file_data{} {} @@ -54,8 +60,8 @@ size_t Container::OpenSession(Kernel::KProcess* process) { impl->id_pool.pop_front(); impl->sessions[new_id] = Session{new_id, process, smmu_id}; } else { - impl->sessions.emplace_back(new_id, process, smmu_id); new_id = impl->new_ids++; + impl->sessions.emplace_back(new_id, process, smmu_id); } auto& session = impl->sessions[new_id]; session.is_active = true; diff --git a/src/core/hle/service/nvdrv/core/container.h b/src/core/hle/service/nvdrv/core/container.h index 86705cbc8..4b8452844 100644 --- a/src/core/hle/service/nvdrv/core/container.h +++ b/src/core/hle/service/nvdrv/core/container.h @@ -27,6 +27,14 @@ class SyncpointManager; struct ContainerImpl; struct Session { + Session(size_t id_, Kernel::KProcess* process_, size_t smmu_id_); + ~Session(); + + Session(const Session&) = delete; + Session& operator=(const Session&) = delete; + Session(Session&&) = default; + Session& operator=(Session&&) = default; + size_t id; Kernel::KProcess* process; size_t smmu_id; diff --git a/src/core/hle/service/nvdrv/core/heap_mapper.cpp b/src/core/hle/service/nvdrv/core/heap_mapper.cpp index 59d993bc6..c29191b92 100644 --- a/src/core/hle/service/nvdrv/core/heap_mapper.cpp +++ b/src/core/hle/service/nvdrv/core/heap_mapper.cpp @@ -124,10 +124,11 @@ DAddr HeapMapper::Map(VAddr start, size_t size) { m_internal->base_set.clear(); const IntervalType interval{start, start + size}; m_internal->base_set.insert(interval); - m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size, [this](VAddr start_addr, VAddr end_addr, int){ - const IntervalType other{start_addr, end_addr}; - m_internal->base_set.subtract(other); - }); + m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size, + [this](VAddr start_addr, VAddr end_addr, int) { + const IntervalType other{start_addr, end_addr}; + m_internal->base_set.subtract(other); + }); if (!m_internal->base_set.empty()) { auto it = m_internal->base_set.begin(); auto end_it = m_internal->base_set.end(); @@ -136,7 +137,8 @@ DAddr HeapMapper::Map(VAddr start, size_t size) { const VAddr inter_addr = it->lower(); const size_t offset = inter_addr - m_vaddress; const size_t sub_size = inter_addr_end - inter_addr; - m_internal->device_memory.Map(m_daddress + offset, m_vaddress + offset, sub_size, m_smmu_id); + m_internal->device_memory.Map(m_daddress + offset, m_vaddress + offset, sub_size, + m_smmu_id); } } m_internal->mapping_overlaps += std::make_pair(interval, 1); @@ -147,12 +149,13 @@ DAddr HeapMapper::Map(VAddr start, size_t size) { void HeapMapper::Unmap(VAddr start, size_t size) { std::scoped_lock lk(m_internal->guard); m_internal->base_set.clear(); - m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size, [this](VAddr start_addr, VAddr end_addr, int value) { - if (value <= 1) { - const IntervalType other{start_addr, end_addr}; - m_internal->base_set.insert(other); - } - }); + m_internal->ForEachInOverlapCounter(m_internal->mapping_overlaps, start, size, + [this](VAddr start_addr, VAddr end_addr, int value) { + if (value <= 1) { + const IntervalType other{start_addr, end_addr}; + m_internal->base_set.insert(other); + } + }); if (!m_internal->base_set.empty()) { auto it = m_internal->base_set.begin(); auto end_it = m_internal->base_set.end(); diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp index 296b4d8d2..6e59d4fe1 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.cpp +++ b/src/core/hle/service/nvdrv/core/nvmap.cpp @@ -13,8 +13,8 @@ #include "core/memory.h" #include "video_core/host1x/host1x.h" - using Core::Memory::YUZU_PAGESIZE; +constexpr size_t BIG_PAGE_SIZE = YUZU_PAGESIZE * 16; namespace Service::Nvidia::NvCore { NvMap::Handle::Handle(u64 size_, Id id_) @@ -96,8 +96,9 @@ void NvMap::UnmapHandle(Handle& handle_description) { const size_t map_size = handle_description.aligned_size; if (!handle_description.in_heap) { auto& smmu = host1x.MemoryManager(); + size_t aligned_up = Common::AlignUp(map_size, BIG_PAGE_SIZE); smmu.Unmap(handle_description.d_address, map_size); - smmu.Free(handle_description.d_address, static_cast<size_t>(map_size)); + smmu.Free(handle_description.d_address, static_cast<size_t>(aligned_up)); handle_description.d_address = 0; return; } @@ -206,7 +207,8 @@ DAddr NvMap::PinHandle(NvMap::Handle::Id handle, bool low_area_pin) { handle_description->d_address = session->mapper->Map(vaddress, map_size); handle_description->in_heap = true; } else { - while ((address = smmu.Allocate(map_size)) == 0) { + size_t aligned_up = Common::AlignUp(map_size, BIG_PAGE_SIZE); + while ((address = smmu.Allocate(aligned_up)) == 0) { // Free handles until the allocation succeeds std::scoped_lock queueLock(unmap_queue_lock); if (auto freeHandleDesc{unmap_queue.front()}) { diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h index 119efc38d..aa5cd21ec 100644 --- a/src/core/hle/service/nvdrv/core/nvmap.h +++ b/src/core/hle/service/nvdrv/core/nvmap.h @@ -63,8 +63,8 @@ public: } flags{}; static_assert(sizeof(Flags) == sizeof(u32)); - VAddr address{}; //!< The memory location in the guest's AS that this handle corresponds to, - //!< this can also be in the nvdrv tmem + VAddr address{}; //!< The memory location in the guest's AS that this handle corresponds to, + //!< this can also be in the nvdrv tmem bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC //!< call @@ -73,8 +73,8 @@ public: bool in_heap{}; size_t session_id{}; - DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds to, - //!< this can also be in the nvdrv tmem + DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds + //!< to, this can also be in the nvdrv tmem Handle(u64 size, Id id); @@ -82,7 +82,8 @@ public: * @brief Sets up the handle with the given memory config, can allocate memory from the tmem * if a 0 address is passed */ - [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, size_t pSessionId); + [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, + size_t pSessionId); /** * @brief Increases the dupe counter of the handle for the given session diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h index b44b17a82..718e0fecd 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h +++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h @@ -4,8 +4,8 @@ #pragma once #include <deque> -#include <vector> #include <unordered_map> +#include <vector> #include "common/common_types.h" #include "common/swap.h" diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp index d4c93ea5d..a27bed29b 100644 --- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp +++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp @@ -69,7 +69,7 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inpu } void nvhost_vic::OnOpen(size_t session_id, DeviceFD fd) { - sessions[fd] = session_id; + sessions[fd] = session_id; } void nvhost_vic::OnClose(DeviceFD fd) { diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp index 24f49ddcd..08ee8ec24 100644 --- a/src/core/hle/service/nvdrv/devices/nvmap.cpp +++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp @@ -123,8 +123,8 @@ NvResult nvmap::IocAlloc(IocAllocParams& params, DeviceFD fd) { return NvResult::InsufficientMemory; } - const auto result = - handle_description->Alloc(params.flags, params.align, params.kind, params.address, sessions[fd]); + const auto result = handle_description->Alloc(params.flags, params.align, params.kind, + params.address, sessions[fd]); if (result != NvResult::Success) { LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle); return result; diff --git a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp index d36eff4ec..86e272b41 100644 --- a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp +++ b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.cpp @@ -92,7 +92,8 @@ Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Nvidia::Device Nvidia::Devices::nvmap::IocFreeParams free_params{ .handle = handle, }; - R_UNLESS(nvmap.IocFree(free_params, nvmap_fd) == Nvidia::NvResult::Success, VI::ResultOperationFailed); + R_UNLESS(nvmap.IocFree(free_params, nvmap_fd) == Nvidia::NvResult::Success, + VI::ResultOperationFailed); // We succeeded. R_SUCCEED(); @@ -109,7 +110,8 @@ Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::Proce .kind = 0, .address = GetInteger(buffer), }; - R_UNLESS(nvmap.IocAlloc(alloc_params, nvmap_fd) == Nvidia::NvResult::Success, VI::ResultOperationFailed); + R_UNLESS(nvmap.IocAlloc(alloc_params, nvmap_fd) == Nvidia::NvResult::Success, + VI::ResultOperationFailed); // We succeeded. R_SUCCEED(); @@ -201,8 +203,8 @@ Result FbShareBufferManager::Initialize(u64* out_buffer_id, u64* out_layer_id, u m_nvmap_fd = m_nvdrv->Open("/dev/nvmap", m_session_id); // Create an nvmap handle for the buffer and assign the memory to it. - R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, m_nvmap_fd, map_address, - SharedBufferSize)); + R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, m_nvmap_fd, + map_address, SharedBufferSize)); // Record the display id. m_display_id = display_id; diff --git a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h index 4b1a3d430..d2ec7a9b9 100644 --- a/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h +++ b/src/core/hle/service/nvnflinger/fb_share_buffer_manager.h @@ -4,9 +4,9 @@ #pragma once #include "common/math_util.h" +#include "core/hle/service/nvdrv/nvdata.h" #include "core/hle/service/nvnflinger/nvnflinger.h" #include "core/hle/service/nvnflinger/ui/fence.h" -#include "core/hle/service/nvdrv/nvdata.h" namespace Kernel { class KPageGroup; @@ -62,7 +62,6 @@ private: Core::System& m_system; Nvnflinger& m_flinger; std::shared_ptr<Nvidia::Module> m_nvdrv; - }; } // namespace Service::Nvnflinger |