summaryrefslogtreecommitdiffstats
path: root/src/common
diff options
context:
space:
mode:
Diffstat (limited to 'src/common')
-rw-r--r--src/common/CMakeLists.txt50
-rw-r--r--src/common/address_space.cpp10
-rw-r--r--src/common/address_space.h150
-rw-r--r--src/common/address_space.inc366
-rw-r--r--src/common/algorithm.h13
-rw-r--r--src/common/alignment.h3
-rw-r--r--src/common/announce_multiplayer_room.h140
-rw-r--r--src/common/assert.cpp12
-rw-r--r--src/common/assert.h61
-rw-r--r--src/common/atomic_helpers.h775
-rw-r--r--src/common/atomic_ops.h100
-rw-r--r--src/common/bit_cast.h5
-rw-r--r--src/common/bit_field.h42
-rw-r--r--src/common/bit_set.h17
-rw-r--r--src/common/bit_util.h12
-rw-r--r--src/common/bounded_threadsafe_queue.h167
-rw-r--r--src/common/cityhash.cpp25
-rw-r--r--src/common/cityhash.h25
-rw-r--r--src/common/common_funcs.h15
-rw-r--r--src/common/common_types.h4
-rw-r--r--src/common/concepts.h5
-rw-r--r--src/common/detached_tasks.cpp9
-rw-r--r--src/common/detached_tasks.h5
-rw-r--r--src/common/div_ceil.h5
-rw-r--r--src/common/dynamic_library.cpp6
-rw-r--r--src/common/dynamic_library.h5
-rw-r--r--src/common/elf.h333
-rw-r--r--src/common/error.cpp6
-rw-r--r--src/common/error.h6
-rw-r--r--src/common/expected.h5
-rw-r--r--src/common/fiber.cpp31
-rw-r--r--src/common/fiber.h12
-rw-r--r--src/common/fixed_point.h706
-rw-r--r--src/common/fs/file.cpp6
-rw-r--r--src/common/fs/file.h7
-rw-r--r--src/common/fs/fs.cpp5
-rw-r--r--src/common/fs/fs.h5
-rw-r--r--src/common/fs/fs_paths.h5
-rw-r--r--src/common/fs/fs_types.h6
-rw-r--r--src/common/fs/fs_util.cpp13
-rw-r--r--src/common/fs/fs_util.h24
-rw-r--r--src/common/fs/path_util.cpp9
-rw-r--r--src/common/fs/path_util.h5
-rw-r--r--src/common/hash.h12
-rw-r--r--src/common/hex_util.cpp6
-rw-r--r--src/common/hex_util.h7
-rw-r--r--src/common/host_memory.cpp15
-rw-r--r--src/common/host_memory.h5
-rw-r--r--src/common/input.h67
-rw-r--r--src/common/intrusive_red_black_tree.h395
-rw-r--r--src/common/literals.h5
-rw-r--r--src/common/logging/backend.cpp27
-rw-r--r--src/common/logging/backend.h6
-rw-r--r--src/common/logging/filter.cpp10
-rw-r--r--src/common/logging/filter.h6
-rw-r--r--src/common/logging/formatter.h5
-rw-r--r--src/common/logging/log.h5
-rw-r--r--src/common/logging/log_entry.h5
-rw-r--r--src/common/logging/text_formatter.cpp7
-rw-r--r--src/common/logging/text_formatter.h6
-rw-r--r--src/common/logging/types.h10
-rw-r--r--src/common/lru_cache.h5
-rw-r--r--src/common/lz4_compression.cpp5
-rw-r--r--src/common/lz4_compression.h5
-rw-r--r--src/common/math_util.h56
-rw-r--r--src/common/memory_detect.cpp7
-rw-r--r--src/common/memory_detect.h5
-rw-r--r--src/common/microprofile.cpp5
-rw-r--r--src/common/microprofile.h14
-rw-r--r--src/common/microprofileui.h5
-rw-r--r--src/common/multi_level_page_table.cpp9
-rw-r--r--src/common/multi_level_page_table.h78
-rw-r--r--src/common/multi_level_page_table.inc84
-rw-r--r--src/common/nvidia_flags.cpp6
-rw-r--r--src/common/nvidia_flags.h5
-rw-r--r--src/common/page_table.cpp63
-rw-r--r--src/common/page_table.h33
-rw-r--r--src/common/param_package.cpp11
-rw-r--r--src/common/param_package.h5
-rw-r--r--src/common/parent_of_member.h8
-rw-r--r--src/common/point.h5
-rw-r--r--src/common/quaternion.h5
-rw-r--r--src/common/reader_writer_queue.h940
-rw-r--r--src/common/ring_buffer.h6
-rw-r--r--src/common/scm_rev.cpp.in5
-rw-r--r--src/common/scm_rev.h5
-rw-r--r--src/common/scope_exit.h5
-rw-r--r--src/common/settings.cpp19
-rw-r--r--src/common/settings.h465
-rw-r--r--src/common/settings_input.cpp5
-rw-r--r--src/common/settings_input.h6
-rw-r--r--src/common/socket_types.h51
-rw-r--r--src/common/spin_lock.cpp5
-rw-r--r--src/common/spin_lock.h5
-rw-r--r--src/common/stream.cpp5
-rw-r--r--src/common/stream.h5
-rw-r--r--src/common/string_util.cpp12
-rw-r--r--src/common/string_util.h8
-rw-r--r--src/common/swap.h16
-rw-r--r--src/common/telemetry.cpp66
-rw-r--r--src/common/telemetry.h14
-rw-r--r--src/common/thread.cpp18
-rw-r--r--src/common/thread.h13
-rw-r--r--src/common/thread_queue_list.h6
-rw-r--r--src/common/thread_worker.h5
-rw-r--r--src/common/threadsafe_queue.h11
-rw-r--r--src/common/time_zone.cpp5
-rw-r--r--src/common/time_zone.h5
-rw-r--r--src/common/tiny_mt.h5
-rw-r--r--src/common/tree.h653
-rw-r--r--src/common/uint128.h8
-rw-r--r--src/common/unique_function.h5
-rw-r--r--src/common/uuid.cpp5
-rw-r--r--src/common/uuid.h6
-rw-r--r--src/common/vector_math.h32
-rw-r--r--src/common/virtual_buffer.cpp5
-rw-r--r--src/common/virtual_buffer.h6
-rw-r--r--src/common/wall_clock.cpp9
-rw-r--r--src/common/wall_clock.h5
-rw-r--r--src/common/x64/cpu_detect.cpp138
-rw-r--r--src/common/x64/cpu_detect.h85
-rw-r--r--src/common/x64/native_clock.cpp69
-rw-r--r--src/common/x64/native_clock.h13
-rw-r--r--src/common/x64/xbyak_abi.h5
-rw-r--r--src/common/x64/xbyak_util.h5
-rw-r--r--src/common/zstd_compression.cpp5
-rw-r--r--src/common/zstd_compression.h5
127 files changed, 5496 insertions, 1503 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index adf70eb8b..a02696873 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -1,3 +1,6 @@
+# SPDX-FileCopyrightText: 2018 yuzu Emulator Project
+# SPDX-License-Identifier: GPL-2.0-or-later
+
if (DEFINED ENV{AZURECIREPO})
set(BUILD_REPOSITORY $ENV{AZURECIREPO})
endif()
@@ -11,38 +14,17 @@ if (DEFINED ENV{DISPLAYVERSION})
set(DISPLAY_VERSION $ENV{DISPLAYVERSION})
endif ()
-# Pass the path to git to the GenerateSCMRev.cmake as well
-find_package(Git QUIET)
-
-add_custom_command(OUTPUT scm_rev.cpp
- COMMAND ${CMAKE_COMMAND}
- -DSRC_DIR=${CMAKE_SOURCE_DIR}
- -DBUILD_REPOSITORY=${BUILD_REPOSITORY}
- -DTITLE_BAR_FORMAT_IDLE=${TITLE_BAR_FORMAT_IDLE}
- -DTITLE_BAR_FORMAT_RUNNING=${TITLE_BAR_FORMAT_RUNNING}
- -DBUILD_TAG=${BUILD_TAG}
- -DBUILD_ID=${DISPLAY_VERSION}
- -DGIT_REF_SPEC=${GIT_REF_SPEC}
- -DGIT_REV=${GIT_REV}
- -DGIT_DESC=${GIT_DESC}
- -DGIT_BRANCH=${GIT_BRANCH}
- -DBUILD_FULLNAME=${BUILD_FULLNAME}
- -DGIT_EXECUTABLE=${GIT_EXECUTABLE}
- -P ${CMAKE_SOURCE_DIR}/CMakeModules/GenerateSCMRev.cmake
- DEPENDS
- # Check that the scm_rev files haven't changed
- "${CMAKE_CURRENT_SOURCE_DIR}/scm_rev.cpp.in"
- "${CMAKE_CURRENT_SOURCE_DIR}/scm_rev.h"
- # technically we should regenerate if the git version changed, but its not worth the effort imo
- "${CMAKE_SOURCE_DIR}/CMakeModules/GenerateSCMRev.cmake"
- VERBATIM
-)
+include(GenerateSCMRev)
add_library(common STATIC
+ address_space.cpp
+ address_space.h
algorithm.h
alignment.h
+ announce_multiplayer_room.h
assert.cpp
assert.h
+ atomic_helpers.h
atomic_ops.h
detached_tasks.cpp
detached_tasks.h
@@ -58,11 +40,13 @@ add_library(common STATIC
div_ceil.h
dynamic_library.cpp
dynamic_library.h
+ elf.h
error.cpp
error.h
expected.h
fiber.cpp
fiber.h
+ fixed_point.h
fs/file.cpp
fs/file.h
fs/fs.cpp
@@ -99,6 +83,8 @@ add_library(common STATIC
microprofile.cpp
microprofile.h
microprofileui.h
+ multi_level_page_table.cpp
+ multi_level_page_table.h
nvidia_flags.cpp
nvidia_flags.h
page_table.cpp
@@ -108,14 +94,16 @@ add_library(common STATIC
parent_of_member.h
point.h
quaternion.h
+ reader_writer_queue.h
ring_buffer.h
- scm_rev.cpp
+ ${CMAKE_CURRENT_BINARY_DIR}/scm_rev.cpp
scm_rev.h
scope_exit.h
settings.cpp
settings.h
settings_input.cpp
settings_input.h
+ socket_types.h
spin_lock.cpp
spin_lock.h
stream.cpp
@@ -157,6 +145,7 @@ if(ARCHITECTURE_x86_64)
x64/xbyak_abi.h
x64/xbyak_util.h
)
+ target_link_libraries(common PRIVATE xbyak)
endif()
if (MSVC)
@@ -180,9 +169,10 @@ endif()
create_target_directory_groups(common)
target_link_libraries(common PUBLIC ${Boost_LIBRARIES} fmt::fmt microprofile Threads::Threads)
-target_link_libraries(common PRIVATE lz4::lz4 xbyak)
-if (MSVC)
+target_link_libraries(common PRIVATE lz4::lz4)
+if (TARGET zstd::zstd)
target_link_libraries(common PRIVATE zstd::zstd)
else()
- target_link_libraries(common PRIVATE zstd)
+ target_link_libraries(common PRIVATE
+ $<IF:$<TARGET_EXISTS:zstd::libzstd_shared>,zstd::libzstd_shared,zstd::libzstd_static>)
endif()
diff --git a/src/common/address_space.cpp b/src/common/address_space.cpp
new file mode 100644
index 000000000..866e78dbe
--- /dev/null
+++ b/src/common/address_space.cpp
@@ -0,0 +1,10 @@
+// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common/address_space.inc"
+
+namespace Common {
+
+template class Common::FlatAllocator<u32, 0, 32>;
+
+}
diff --git a/src/common/address_space.h b/src/common/address_space.h
new file mode 100644
index 000000000..9222b2fdc
--- /dev/null
+++ b/src/common/address_space.h
@@ -0,0 +1,150 @@
+// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#pragma once
+
+#include <concepts>
+#include <functional>
+#include <mutex>
+#include <vector>
+
+#include "common/common_types.h"
+
+namespace Common {
+template <typename VaType, size_t AddressSpaceBits>
+concept AddressSpaceValid = std::is_unsigned_v<VaType> && sizeof(VaType) * 8 >= AddressSpaceBits;
+
+struct EmptyStruct {};
+
+/**
+ * @brief FlatAddressSpaceMap provides a generic VA->PA mapping implementation using a sorted vector
+ */
+template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa,
+ bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo = EmptyStruct>
+requires AddressSpaceValid<VaType, AddressSpaceBits>
+class FlatAddressSpaceMap {
+public:
+ /// The maximum VA that this AS can technically reach
+ static constexpr VaType VaMaximum{(1ULL << (AddressSpaceBits - 1)) +
+ ((1ULL << (AddressSpaceBits - 1)) - 1)};
+
+ explicit FlatAddressSpaceMap(VaType va_limit,
+ std::function<void(VaType, VaType)> unmap_callback = {});
+
+ FlatAddressSpaceMap() = default;
+
+ void Map(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info = {}) {
+ std::scoped_lock lock(block_mutex);
+ MapLocked(virt, phys, size, extra_info);
+ }
+
+ void Unmap(VaType virt, VaType size) {
+ std::scoped_lock lock(block_mutex);
+ UnmapLocked(virt, size);
+ }
+
+ VaType GetVALimit() const {
+ return va_limit;
+ }
+
+protected:
+ /**
+ * @brief Represents a block of memory in the AS, the physical mapping is contiguous until
+ * another block with a different phys address is hit
+ */
+ struct Block {
+ /// VA of the block
+ VaType virt{UnmappedVa};
+ /// PA of the block, will increase 1-1 with VA until a new block is encountered
+ PaType phys{UnmappedPa};
+ [[no_unique_address]] ExtraBlockInfo extra_info;
+
+ Block() = default;
+
+ Block(VaType virt_, PaType phys_, ExtraBlockInfo extra_info_)
+ : virt(virt_), phys(phys_), extra_info(extra_info_) {}
+
+ bool Valid() const {
+ return virt != UnmappedVa;
+ }
+
+ bool Mapped() const {
+ return phys != UnmappedPa;
+ }
+
+ bool Unmapped() const {
+ return phys == UnmappedPa;
+ }
+
+ bool operator<(const VaType& p_virt) const {
+ return virt < p_virt;
+ }
+ };
+
+ /**
+ * @brief Maps a PA range into the given AS region
+ * @note block_mutex MUST be locked when calling this
+ */
+ void MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info);
+
+ /**
+ * @brief Unmaps the given range and merges it with other unmapped regions
+ * @note block_mutex MUST be locked when calling this
+ */
+ void UnmapLocked(VaType virt, VaType size);
+
+ std::mutex block_mutex;
+ std::vector<Block> blocks{Block{}};
+
+ /// a soft limit on the maximum VA of the AS
+ VaType va_limit{VaMaximum};
+
+private:
+ /// Callback called when the mappings in an region have changed
+ std::function<void(VaType, VaType)> unmap_callback{};
+};
+
+/**
+ * @brief FlatMemoryManager specialises FlatAddressSpaceMap to work as an allocator, with an
+ * initial, fast linear pass and a subsequent slower pass that iterates until it finds a free block
+ */
+template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits>
+requires AddressSpaceValid<VaType, AddressSpaceBits>
+class FlatAllocator
+ : public FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits> {
+private:
+ using Base = FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits>;
+
+public:
+ explicit FlatAllocator(VaType virt_start, VaType va_limit = Base::VaMaximum);
+
+ /**
+ * @brief Allocates a region in the AS of the given size and returns its address
+ */
+ VaType Allocate(VaType size);
+
+ /**
+ * @brief Marks the given region in the AS as allocated
+ */
+ void AllocateFixed(VaType virt, VaType size);
+
+ /**
+ * @brief Frees an AS region so it can be used again
+ */
+ void Free(VaType virt, VaType size);
+
+ VaType GetVAStart() const {
+ return virt_start;
+ }
+
+private:
+ /// The base VA of the allocator, no allocations will be below this
+ VaType virt_start;
+
+ /**
+ * The end address for the initial linear allocation pass
+ * Once this reaches the AS limit the slower allocation path will be used
+ */
+ VaType current_linear_alloc_end;
+};
+} // namespace Common
diff --git a/src/common/address_space.inc b/src/common/address_space.inc
new file mode 100644
index 000000000..2195dabd5
--- /dev/null
+++ b/src/common/address_space.inc
@@ -0,0 +1,366 @@
+// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "common/address_space.h"
+#include "common/assert.h"
+
+#define MAP_MEMBER(returnType) \
+ template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, \
+ bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo> \
+ requires AddressSpaceValid<VaType, AddressSpaceBits> returnType FlatAddressSpaceMap< \
+ VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo>
+#define MAP_MEMBER_CONST() \
+ template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, \
+ bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo> \
+ requires AddressSpaceValid<VaType, AddressSpaceBits> FlatAddressSpaceMap< \
+ VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo>
+
+#define MM_MEMBER(returnType) \
+ template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \
+ requires AddressSpaceValid<VaType, AddressSpaceBits> returnType \
+ FlatMemoryManager<VaType, UnmappedVa, AddressSpaceBits>
+
+#define ALLOC_MEMBER(returnType) \
+ template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \
+ requires AddressSpaceValid<VaType, AddressSpaceBits> returnType \
+ FlatAllocator<VaType, UnmappedVa, AddressSpaceBits>
+#define ALLOC_MEMBER_CONST() \
+ template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \
+ requires AddressSpaceValid<VaType, AddressSpaceBits> \
+ FlatAllocator<VaType, UnmappedVa, AddressSpaceBits>
+
+namespace Common {
+MAP_MEMBER_CONST()::FlatAddressSpaceMap(VaType va_limit_,
+ std::function<void(VaType, VaType)> unmap_callback_)
+ : va_limit{va_limit_}, unmap_callback{std::move(unmap_callback_)} {
+ if (va_limit > VaMaximum) {
+ ASSERT_MSG(false, "Invalid VA limit!");
+ }
+}
+
+MAP_MEMBER(void)::MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info) {
+ VaType virt_end{virt + size};
+
+ if (virt_end > va_limit) {
+ ASSERT_MSG(false,
+ "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}",
+ virt_end, va_limit);
+ }
+
+ auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)};
+ if (block_end_successor == blocks.begin()) {
+ ASSERT_MSG(false, "Trying to map a block before the VA start: virt_end: 0x{:X}", virt_end);
+ }
+
+ auto block_end_predecessor{std::prev(block_end_successor)};
+
+ if (block_end_successor != blocks.end()) {
+ // We have blocks in front of us, if one is directly in front then we don't have to add a
+ // tail
+ if (block_end_successor->virt != virt_end) {
+ PaType tailPhys{[&]() -> PaType {
+ if constexpr (!PaContigSplit) {
+ // Always propagate unmapped regions rather than calculating offset
+ return block_end_predecessor->phys;
+ } else {
+ if (block_end_predecessor->Unmapped()) {
+ // Always propagate unmapped regions rather than calculating offset
+ return block_end_predecessor->phys;
+ } else {
+ return block_end_predecessor->phys + virt_end - block_end_predecessor->virt;
+ }
+ }
+ }()};
+
+ if (block_end_predecessor->virt >= virt) {
+ // If this block's start would be overlapped by the map then reuse it as a tail
+ // block
+ block_end_predecessor->virt = virt_end;
+ block_end_predecessor->phys = tailPhys;
+ block_end_predecessor->extra_info = block_end_predecessor->extra_info;
+
+ // No longer predecessor anymore
+ block_end_successor = block_end_predecessor--;
+ } else {
+ // Else insert a new one and we're done
+ blocks.insert(block_end_successor,
+ {Block(virt, phys, extra_info),
+ Block(virt_end, tailPhys, block_end_predecessor->extra_info)});
+ if (unmap_callback) {
+ unmap_callback(virt, size);
+ }
+
+ return;
+ }
+ }
+ } else {
+ // block_end_predecessor will always be unmapped as blocks has to be terminated by an
+ // unmapped chunk
+ if (block_end_predecessor != blocks.begin() && block_end_predecessor->virt >= virt) {
+ // Move the unmapped block start backwards
+ block_end_predecessor->virt = virt_end;
+
+ // No longer predecessor anymore
+ block_end_successor = block_end_predecessor--;
+ } else {
+ // Else insert a new one and we're done
+ blocks.insert(block_end_successor,
+ {Block(virt, phys, extra_info), Block(virt_end, UnmappedPa, {})});
+ if (unmap_callback) {
+ unmap_callback(virt, size);
+ }
+
+ return;
+ }
+ }
+
+ auto block_start_successor{block_end_successor};
+
+ // Walk the block vector to find the start successor as this is more efficient than another
+ // binary search in most scenarios
+ while (std::prev(block_start_successor)->virt >= virt) {
+ block_start_successor--;
+ }
+
+ // Check that the start successor is either the end block or something in between
+ if (block_start_successor->virt > virt_end) {
+ ASSERT_MSG(false, "Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt);
+ } else if (block_start_successor->virt == virt_end) {
+ // We need to create a new block as there are none spare that we would overwrite
+ blocks.insert(block_start_successor, Block(virt, phys, extra_info));
+ } else {
+ // Erase overwritten blocks
+ if (auto eraseStart{std::next(block_start_successor)}; eraseStart != block_end_successor) {
+ blocks.erase(eraseStart, block_end_successor);
+ }
+
+ // Reuse a block that would otherwise be overwritten as a start block
+ block_start_successor->virt = virt;
+ block_start_successor->phys = phys;
+ block_start_successor->extra_info = extra_info;
+ }
+
+ if (unmap_callback) {
+ unmap_callback(virt, size);
+ }
+}
+
+MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) {
+ VaType virt_end{virt + size};
+
+ if (virt_end > va_limit) {
+ ASSERT_MSG(false,
+ "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}",
+ virt_end, va_limit);
+ }
+
+ auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)};
+ if (block_end_successor == blocks.begin()) {
+ ASSERT_MSG(false, "Trying to unmap a block before the VA start: virt_end: 0x{:X}",
+ virt_end);
+ }
+
+ auto block_end_predecessor{std::prev(block_end_successor)};
+
+ auto walk_back_to_predecessor{[&](auto iter) {
+ while (iter->virt >= virt) {
+ iter--;
+ }
+
+ return iter;
+ }};
+
+ auto erase_blocks_with_end_unmapped{[&](auto unmappedEnd) {
+ auto block_start_predecessor{walk_back_to_predecessor(unmappedEnd)};
+ auto block_start_successor{std::next(block_start_predecessor)};
+
+ auto eraseEnd{[&]() {
+ if (block_start_predecessor->Unmapped()) {
+ // If the start predecessor is unmapped then we can erase everything in our region
+ // and be done
+ return std::next(unmappedEnd);
+ } else {
+ // Else reuse the end predecessor as the start of our unmapped region then erase all
+ // up to it
+ unmappedEnd->virt = virt;
+ return unmappedEnd;
+ }
+ }()};
+
+ // We can't have two unmapped regions after each other
+ if (eraseEnd != blocks.end() &&
+ (eraseEnd == block_start_successor ||
+ (block_start_predecessor->Unmapped() && eraseEnd->Unmapped()))) {
+ ASSERT_MSG(false, "Multiple contiguous unmapped regions are unsupported!");
+ }
+
+ blocks.erase(block_start_successor, eraseEnd);
+ }};
+
+ // We can avoid any splitting logic if these are the case
+ if (block_end_predecessor->Unmapped()) {
+ if (block_end_predecessor->virt > virt) {
+ erase_blocks_with_end_unmapped(block_end_predecessor);
+ }
+
+ if (unmap_callback) {
+ unmap_callback(virt, size);
+ }
+
+ return; // The region is unmapped, bail out early
+ } else if (block_end_successor->virt == virt_end && block_end_successor->Unmapped()) {
+ erase_blocks_with_end_unmapped(block_end_successor);
+
+ if (unmap_callback) {
+ unmap_callback(virt, size);
+ }
+
+ return; // The region is unmapped here and doesn't need splitting, bail out early
+ } else if (block_end_successor == blocks.end()) {
+ // This should never happen as the end should always follow an unmapped block
+ ASSERT_MSG(false, "Unexpected Memory Manager state!");
+ } else if (block_end_successor->virt != virt_end) {
+ // If one block is directly in front then we don't have to add a tail
+
+ // The previous block is mapped so we will need to add a tail with an offset
+ PaType tailPhys{[&]() {
+ if constexpr (PaContigSplit) {
+ return block_end_predecessor->phys + virt_end - block_end_predecessor->virt;
+ } else {
+ return block_end_predecessor->phys;
+ }
+ }()};
+
+ if (block_end_predecessor->virt >= virt) {
+ // If this block's start would be overlapped by the unmap then reuse it as a tail block
+ block_end_predecessor->virt = virt_end;
+ block_end_predecessor->phys = tailPhys;
+
+ // No longer predecessor anymore
+ block_end_successor = block_end_predecessor--;
+ } else {
+ blocks.insert(block_end_successor,
+ {Block(virt, UnmappedPa, {}),
+ Block(virt_end, tailPhys, block_end_predecessor->extra_info)});
+ if (unmap_callback) {
+ unmap_callback(virt, size);
+ }
+
+ // The previous block is mapped and ends before
+ return;
+ }
+ }
+
+ // Walk the block vector to find the start predecessor as this is more efficient than another
+ // binary search in most scenarios
+ auto block_start_predecessor{walk_back_to_predecessor(block_end_successor)};
+ auto block_start_successor{std::next(block_start_predecessor)};
+
+ if (block_start_successor->virt > virt_end) {
+ ASSERT_MSG(false, "Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt);
+ } else if (block_start_successor->virt == virt_end) {
+ // There are no blocks between the start and the end that would let us skip inserting a new
+ // one for head
+
+ // The previous block is may be unmapped, if so we don't need to insert any unmaps after it
+ if (block_start_predecessor->Mapped()) {
+ blocks.insert(block_start_successor, Block(virt, UnmappedPa, {}));
+ }
+ } else if (block_start_predecessor->Unmapped()) {
+ // If the previous block is unmapped
+ blocks.erase(block_start_successor, block_end_predecessor);
+ } else {
+ // Erase overwritten blocks, skipping the first one as we have written the unmapped start
+ // block there
+ if (auto eraseStart{std::next(block_start_successor)}; eraseStart != block_end_successor) {
+ blocks.erase(eraseStart, block_end_successor);
+ }
+
+ // Add in the unmapped block header
+ block_start_successor->virt = virt;
+ block_start_successor->phys = UnmappedPa;
+ }
+
+ if (unmap_callback)
+ unmap_callback(virt, size);
+}
+
+ALLOC_MEMBER_CONST()::FlatAllocator(VaType virt_start_, VaType va_limit_)
+ : Base{va_limit_}, virt_start{virt_start_}, current_linear_alloc_end{virt_start_} {}
+
+ALLOC_MEMBER(VaType)::Allocate(VaType size) {
+ std::scoped_lock lock(this->block_mutex);
+
+ VaType alloc_start{UnmappedVa};
+ VaType alloc_end{current_linear_alloc_end + size};
+
+ // Avoid searching backwards in the address space if possible
+ if (alloc_end >= current_linear_alloc_end && alloc_end <= this->va_limit) {
+ auto alloc_end_successor{
+ std::lower_bound(this->blocks.begin(), this->blocks.end(), alloc_end)};
+ if (alloc_end_successor == this->blocks.begin()) {
+ ASSERT_MSG(false, "First block in AS map is invalid!");
+ }
+
+ auto alloc_end_predecessor{std::prev(alloc_end_successor)};
+ if (alloc_end_predecessor->virt <= current_linear_alloc_end) {
+ alloc_start = current_linear_alloc_end;
+ } else {
+ // Skip over fixed any mappings in front of us
+ while (alloc_end_successor != this->blocks.end()) {
+ if (alloc_end_successor->virt - alloc_end_predecessor->virt < size ||
+ alloc_end_predecessor->Mapped()) {
+ alloc_start = alloc_end_predecessor->virt;
+ break;
+ }
+
+ alloc_end_predecessor = alloc_end_successor++;
+
+ // Use the VA limit to calculate if we can fit in the final block since it has no
+ // successor
+ if (alloc_end_successor == this->blocks.end()) {
+ alloc_end = alloc_end_predecessor->virt + size;
+
+ if (alloc_end >= alloc_end_predecessor->virt && alloc_end <= this->va_limit) {
+ alloc_start = alloc_end_predecessor->virt;
+ }
+ }
+ }
+ }
+ }
+
+ if (alloc_start != UnmappedVa) {
+ current_linear_alloc_end = alloc_start + size;
+ } else { // If linear allocation overflows the AS then find a gap
+ if (this->blocks.size() <= 2) {
+ ASSERT_MSG(false, "Unexpected allocator state!");
+ }
+
+ auto search_predecessor{this->blocks.begin()};
+ auto search_successor{std::next(search_predecessor)};
+
+ while (search_successor != this->blocks.end() &&
+ (search_successor->virt - search_predecessor->virt < size ||
+ search_predecessor->Mapped())) {
+ search_predecessor = search_successor++;
+ }
+
+ if (search_successor != this->blocks.end()) {
+ alloc_start = search_predecessor->virt;
+ } else {
+ return {}; // AS is full
+ }
+ }
+
+ this->MapLocked(alloc_start, true, size, {});
+ return alloc_start;
+}
+
+ALLOC_MEMBER(void)::AllocateFixed(VaType virt, VaType size) {
+ this->Map(virt, true, size);
+}
+
+ALLOC_MEMBER(void)::Free(VaType virt, VaType size) {
+ this->Unmap(virt, size);
+}
+} // namespace Common
diff --git a/src/common/algorithm.h b/src/common/algorithm.h
index 4804a3421..c27c9241d 100644
--- a/src/common/algorithm.h
+++ b/src/common/algorithm.h
@@ -1,6 +1,5 @@
-// Copyright 2019 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -25,4 +24,12 @@ template <class ForwardIt, class T, class Compare = std::less<>>
return first != last && !comp(value, *first) ? first : last;
}
+template <typename T, typename Func, typename... Args>
+T FoldRight(T initial_value, Func&& func, Args&&... args) {
+ T value{initial_value};
+ const auto high_func = [&value, &func]<typename U>(U x) { value = func(value, x); };
+ (std::invoke(high_func, std::forward<Args>(args)), ...);
+ return value;
+}
+
} // namespace Common
diff --git a/src/common/alignment.h b/src/common/alignment.h
index 8570c7d3c..7e897334b 100644
--- a/src/common/alignment.h
+++ b/src/common/alignment.h
@@ -1,4 +1,5 @@
-// This file is under the public domain.
+// SPDX-FileCopyrightText: 2014 Jannik Vogel <email@jannikvogel.de>
+// SPDX-License-Identifier: CC0-1.0
#pragma once
diff --git a/src/common/announce_multiplayer_room.h b/src/common/announce_multiplayer_room.h
new file mode 100644
index 000000000..4a3100fa4
--- /dev/null
+++ b/src/common/announce_multiplayer_room.h
@@ -0,0 +1,140 @@
+// SPDX-FileCopyrightText: Copyright 2017 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <array>
+#include <functional>
+#include <string>
+#include <vector>
+#include "common/common_types.h"
+#include "common/socket_types.h"
+#include "web_service/web_result.h"
+
+namespace AnnounceMultiplayerRoom {
+
+struct GameInfo {
+ std::string name{""};
+ u64 id{0};
+ std::string version{""};
+};
+
+struct Member {
+ std::string username;
+ std::string nickname;
+ std::string display_name;
+ std::string avatar_url;
+ Network::IPv4Address fake_ip;
+ GameInfo game;
+};
+
+struct RoomInformation {
+ std::string name; ///< Name of the server
+ std::string description; ///< Server description
+ u32 member_slots; ///< Maximum number of members in this room
+ u16 port; ///< The port of this room
+ GameInfo preferred_game; ///< Game to advertise that you want to play
+ std::string host_username; ///< Forum username of the host
+ bool enable_yuzu_mods; ///< Allow yuzu Moderators to moderate on this room
+};
+
+struct Room {
+ RoomInformation information;
+
+ std::string id;
+ std::string verify_uid; ///< UID used for verification
+ std::string ip;
+ u32 net_version;
+ bool has_password;
+
+ std::vector<Member> members;
+};
+using RoomList = std::vector<Room>;
+
+/**
+ * A AnnounceMultiplayerRoom interface class. A backend to submit/get to/from a web service should
+ * implement this interface.
+ */
+class Backend {
+public:
+ virtual ~Backend() = default;
+
+ /**
+ * Sets the Information that gets used for the announce
+ * @param uid The Id of the room
+ * @param name The name of the room
+ * @param description The room description
+ * @param port The port of the room
+ * @param net_version The version of the libNetwork that gets used
+ * @param has_password True if the room is passowrd protected
+ * @param preferred_game The preferred game of the room
+ * @param preferred_game_id The title id of the preferred game
+ */
+ virtual void SetRoomInformation(const std::string& name, const std::string& description,
+ const u16 port, const u32 max_player, const u32 net_version,
+ const bool has_password, const GameInfo& preferred_game) = 0;
+ /**
+ * Adds a player information to the data that gets announced
+ * @param member The player to add
+ */
+ virtual void AddPlayer(const Member& member) = 0;
+
+ /**
+ * Updates the data in the announce service. Re-register the room when required.
+ * @result The result of the update attempt
+ */
+ virtual WebService::WebResult Update() = 0;
+
+ /**
+ * Registers the data in the announce service
+ * @result The result of the register attempt. When the result code is Success, A global Guid of
+ * the room which may be used for verification will be in the result's returned_data.
+ */
+ virtual WebService::WebResult Register() = 0;
+
+ /**
+ * Empties the stored players
+ */
+ virtual void ClearPlayers() = 0;
+
+ /**
+ * Get the room information from the announce service
+ * @result A list of all rooms the announce service has
+ */
+ virtual RoomList GetRoomList() = 0;
+
+ /**
+ * Sends a delete message to the announce service
+ */
+ virtual void Delete() = 0;
+};
+
+/**
+ * Empty implementation of AnnounceMultiplayerRoom interface that drops all data. Used when a
+ * functional backend implementation is not available.
+ */
+class NullBackend : public Backend {
+public:
+ ~NullBackend() = default;
+ void SetRoomInformation(const std::string& /*name*/, const std::string& /*description*/,
+ const u16 /*port*/, const u32 /*max_player*/, const u32 /*net_version*/,
+ const bool /*has_password*/,
+ const GameInfo& /*preferred_game*/) override {}
+ void AddPlayer(const Member& /*member*/) override {}
+ WebService::WebResult Update() override {
+ return WebService::WebResult{WebService::WebResult::Code::NoWebservice,
+ "WebService is missing", ""};
+ }
+ WebService::WebResult Register() override {
+ return WebService::WebResult{WebService::WebResult::Code::NoWebservice,
+ "WebService is missing", ""};
+ }
+ void ClearPlayers() override {}
+ RoomList GetRoomList() override {
+ return RoomList{};
+ }
+
+ void Delete() override {}
+};
+
+} // namespace AnnounceMultiplayerRoom
diff --git a/src/common/assert.cpp b/src/common/assert.cpp
index 72f1121aa..6026b7dc2 100644
--- a/src/common/assert.cpp
+++ b/src/common/assert.cpp
@@ -1,14 +1,18 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/settings.h"
-void assert_handle_failure() {
+void assert_fail_impl() {
if (Settings::values.use_debug_asserts) {
Crash();
}
}
+
+[[noreturn]] void unreachable_impl() {
+ Crash();
+ throw std::runtime_error("Unreachable code");
+}
diff --git a/src/common/assert.h b/src/common/assert.h
index 33060d865..8c927fcc0 100644
--- a/src/common/assert.h
+++ b/src/common/assert.h
@@ -1,6 +1,6 @@
-// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2013 Dolphin Emulator Project
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -9,44 +9,43 @@
// Sometimes we want to try to continue even after hitting an assert.
// However touching this file yields a global recompilation as this header is included almost
// everywhere. So let's just move the handling of the failed assert to a single cpp file.
-void assert_handle_failure();
-// For asserts we'd like to keep all the junk executed when an assert happens away from the
-// important code in the function. One way of doing this is to put all the relevant code inside a
-// lambda and force the compiler to not inline it. Unfortunately, MSVC seems to have no syntax to
-// specify __declspec on lambda functions, so what we do instead is define a noinline wrapper
-// template that calls the lambda. This seems to generate an extra instruction at the call-site
-// compared to the ideal implementation (which wouldn't support ASSERT_MSG parameters), but is good
-// enough for our purposes.
-template <typename Fn>
-#if defined(_MSC_VER)
-[[msvc::noinline]]
-#elif defined(__GNUC__)
-[[gnu::cold, gnu::noinline]]
+void assert_fail_impl();
+[[noreturn]] void unreachable_impl();
+
+#ifdef _MSC_VER
+#define YUZU_NO_INLINE __declspec(noinline)
+#else
+#define YUZU_NO_INLINE __attribute__((noinline))
#endif
-static void
-assert_noinline_call(const Fn& fn) {
- fn();
- assert_handle_failure();
-}
#define ASSERT(_a_) \
- do \
- if (!(_a_)) { \
- assert_noinline_call([] { LOG_CRITICAL(Debug, "Assertion Failed!"); }); \
+ ([&]() YUZU_NO_INLINE { \
+ if (!(_a_)) [[unlikely]] { \
+ LOG_CRITICAL(Debug, "Assertion Failed!"); \
+ assert_fail_impl(); \
} \
- while (0)
+ }())
#define ASSERT_MSG(_a_, ...) \
- do \
- if (!(_a_)) { \
- assert_noinline_call([&] { LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); }); \
+ ([&]() YUZU_NO_INLINE { \
+ if (!(_a_)) [[unlikely]] { \
+ LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); \
+ assert_fail_impl(); \
} \
- while (0)
+ }())
+
+#define UNREACHABLE() \
+ do { \
+ LOG_CRITICAL(Debug, "Unreachable code!"); \
+ unreachable_impl(); \
+ } while (0)
-#define UNREACHABLE() assert_noinline_call([] { LOG_CRITICAL(Debug, "Unreachable code!"); })
#define UNREACHABLE_MSG(...) \
- assert_noinline_call([&] { LOG_CRITICAL(Debug, "Unreachable code!\n" __VA_ARGS__); })
+ do { \
+ LOG_CRITICAL(Debug, "Unreachable code!\n" __VA_ARGS__); \
+ unreachable_impl(); \
+ } while (0)
#ifdef _DEBUG
#define DEBUG_ASSERT(_a_) ASSERT(_a_)
diff --git a/src/common/atomic_helpers.h b/src/common/atomic_helpers.h
new file mode 100644
index 000000000..bef5015c1
--- /dev/null
+++ b/src/common/atomic_helpers.h
@@ -0,0 +1,775 @@
+// SPDX-FileCopyrightText: 2013-2016 Cameron Desrochers
+// SPDX-FileCopyrightText: 2015 Jeff Preshing
+// SPDX-License-Identifier: BSD-2-Clause AND Zlib
+
+// Distributed under the simplified BSD license (see the license file that
+// should have come with this header).
+// Uses Jeff Preshing's semaphore implementation (under the terms of its
+// separate zlib license, embedded below).
+
+#pragma once
+
+// Provides portable (VC++2010+, Intel ICC 13, GCC 4.7+, and anything C++11 compliant)
+// implementation of low-level memory barriers, plus a few semi-portable utility macros (for
+// inlining and alignment). Also has a basic atomic type (limited to hardware-supported atomics with
+// no memory ordering guarantees). Uses the AE_* prefix for macros (historical reasons), and the
+// "moodycamel" namespace for symbols.
+
+#include <cassert>
+#include <cerrno>
+#include <cstdint>
+#include <ctime>
+#include <type_traits>
+
+// Platform detection
+#if defined(__INTEL_COMPILER)
+#define AE_ICC
+#elif defined(_MSC_VER)
+#define AE_VCPP
+#elif defined(__GNUC__)
+#define AE_GCC
+#endif
+
+#if defined(_M_IA64) || defined(__ia64__)
+#define AE_ARCH_IA64
+#elif defined(_WIN64) || defined(__amd64__) || defined(_M_X64) || defined(__x86_64__)
+#define AE_ARCH_X64
+#elif defined(_M_IX86) || defined(__i386__)
+#define AE_ARCH_X86
+#elif defined(_M_PPC) || defined(__powerpc__)
+#define AE_ARCH_PPC
+#else
+#define AE_ARCH_UNKNOWN
+#endif
+
+// AE_UNUSED
+#define AE_UNUSED(x) ((void)x)
+
+// AE_NO_TSAN/AE_TSAN_ANNOTATE_*
+#if defined(__has_feature)
+#if __has_feature(thread_sanitizer)
+#if __cplusplus >= 201703L // inline variables require C++17
+namespace Common {
+inline int ae_tsan_global;
+}
+#define AE_TSAN_ANNOTATE_RELEASE() \
+ AnnotateHappensBefore(__FILE__, __LINE__, (void*)(&::moodycamel::ae_tsan_global))
+#define AE_TSAN_ANNOTATE_ACQUIRE() \
+ AnnotateHappensAfter(__FILE__, __LINE__, (void*)(&::moodycamel::ae_tsan_global))
+extern "C" void AnnotateHappensBefore(const char*, int, void*);
+extern "C" void AnnotateHappensAfter(const char*, int, void*);
+#else // when we can't work with tsan, attempt to disable its warnings
+#define AE_NO_TSAN __attribute__((no_sanitize("thread")))
+#endif
+#endif
+#endif
+#ifndef AE_NO_TSAN
+#define AE_NO_TSAN
+#endif
+#ifndef AE_TSAN_ANNOTATE_RELEASE
+#define AE_TSAN_ANNOTATE_RELEASE()
+#define AE_TSAN_ANNOTATE_ACQUIRE()
+#endif
+
+// AE_FORCEINLINE
+#if defined(AE_VCPP) || defined(AE_ICC)
+#define AE_FORCEINLINE __forceinline
+#elif defined(AE_GCC)
+//#define AE_FORCEINLINE __attribute__((always_inline))
+#define AE_FORCEINLINE inline
+#else
+#define AE_FORCEINLINE inline
+#endif
+
+// AE_ALIGN
+#if defined(AE_VCPP) || defined(AE_ICC)
+#define AE_ALIGN(x) __declspec(align(x))
+#elif defined(AE_GCC)
+#define AE_ALIGN(x) __attribute__((aligned(x)))
+#else
+// Assume GCC compliant syntax...
+#define AE_ALIGN(x) __attribute__((aligned(x)))
+#endif
+
+// Portable atomic fences implemented below:
+
+namespace Common {
+
+enum memory_order {
+ memory_order_relaxed,
+ memory_order_acquire,
+ memory_order_release,
+ memory_order_acq_rel,
+ memory_order_seq_cst,
+
+ // memory_order_sync: Forces a full sync:
+ // #LoadLoad, #LoadStore, #StoreStore, and most significantly, #StoreLoad
+ memory_order_sync = memory_order_seq_cst
+};
+
+} // namespace Common
+
+#if (defined(AE_VCPP) && (_MSC_VER < 1700 || defined(__cplusplus_cli))) || \
+ (defined(AE_ICC) && __INTEL_COMPILER < 1600)
+// VS2010 and ICC13 don't support std::atomic_*_fence, implement our own fences
+
+#include <intrin.h>
+
+#if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
+#define AeFullSync _mm_mfence
+#define AeLiteSync _mm_mfence
+#elif defined(AE_ARCH_IA64)
+#define AeFullSync __mf
+#define AeLiteSync __mf
+#elif defined(AE_ARCH_PPC)
+#include <ppcintrinsics.h>
+#define AeFullSync __sync
+#define AeLiteSync __lwsync
+#endif
+
+#ifdef AE_VCPP
+#pragma warning(push)
+#pragma warning(disable : 4365) // Disable erroneous 'conversion from long to unsigned int,
+ // signed/unsigned mismatch' error when using `assert`
+#ifdef __cplusplus_cli
+#pragma managed(push, off)
+#endif
+#endif
+
+namespace Common {
+
+AE_FORCEINLINE void compiler_fence(memory_order order) AE_NO_TSAN {
+ switch (order) {
+ case memory_order_relaxed:
+ break;
+ case memory_order_acquire:
+ _ReadBarrier();
+ break;
+ case memory_order_release:
+ _WriteBarrier();
+ break;
+ case memory_order_acq_rel:
+ _ReadWriteBarrier();
+ break;
+ case memory_order_seq_cst:
+ _ReadWriteBarrier();
+ break;
+ default:
+ assert(false);
+ }
+}
+
+// x86/x64 have a strong memory model -- all loads and stores have
+// acquire and release semantics automatically (so only need compiler
+// barriers for those).
+#if defined(AE_ARCH_X86) || defined(AE_ARCH_X64)
+AE_FORCEINLINE void fence(memory_order order) AE_NO_TSAN {
+ switch (order) {
+ case memory_order_relaxed:
+ break;
+ case memory_order_acquire:
+ _ReadBarrier();
+ break;
+ case memory_order_release:
+ _WriteBarrier();
+ break;
+ case memory_order_acq_rel:
+ _ReadWriteBarrier();
+ break;
+ case memory_order_seq_cst:
+ _ReadWriteBarrier();
+ AeFullSync();
+ _ReadWriteBarrier();
+ break;
+ default:
+ assert(false);
+ }
+}
+#else
+AE_FORCEINLINE void fence(memory_order order) AE_NO_TSAN {
+ // Non-specialized arch, use heavier memory barriers everywhere just in case :-(
+ switch (order) {
+ case memory_order_relaxed:
+ break;
+ case memory_order_acquire:
+ _ReadBarrier();
+ AeLiteSync();
+ _ReadBarrier();
+ break;
+ case memory_order_release:
+ _WriteBarrier();
+ AeLiteSync();
+ _WriteBarrier();
+ break;
+ case memory_order_acq_rel:
+ _ReadWriteBarrier();
+ AeLiteSync();
+ _ReadWriteBarrier();
+ break;
+ case memory_order_seq_cst:
+ _ReadWriteBarrier();
+ AeFullSync();
+ _ReadWriteBarrier();
+ break;
+ default:
+ assert(false);
+ }
+}
+#endif
+} // namespace Common
+#else
+// Use standard library of atomics
+#include <atomic>
+
+namespace Common {
+
+AE_FORCEINLINE void compiler_fence(memory_order order) AE_NO_TSAN {
+ switch (order) {
+ case memory_order_relaxed:
+ break;
+ case memory_order_acquire:
+ std::atomic_signal_fence(std::memory_order_acquire);
+ break;
+ case memory_order_release:
+ std::atomic_signal_fence(std::memory_order_release);
+ break;
+ case memory_order_acq_rel:
+ std::atomic_signal_fence(std::memory_order_acq_rel);
+ break;
+ case memory_order_seq_cst:
+ std::atomic_signal_fence(std::memory_order_seq_cst);
+ break;
+ default:
+ assert(false);
+ }
+}
+
+AE_FORCEINLINE void fence(memory_order order) AE_NO_TSAN {
+ switch (order) {
+ case memory_order_relaxed:
+ break;
+ case memory_order_acquire:
+ AE_TSAN_ANNOTATE_ACQUIRE();
+ std::atomic_thread_fence(std::memory_order_acquire);
+ break;
+ case memory_order_release:
+ AE_TSAN_ANNOTATE_RELEASE();
+ std::atomic_thread_fence(std::memory_order_release);
+ break;
+ case memory_order_acq_rel:
+ AE_TSAN_ANNOTATE_ACQUIRE();
+ AE_TSAN_ANNOTATE_RELEASE();
+ std::atomic_thread_fence(std::memory_order_acq_rel);
+ break;
+ case memory_order_seq_cst:
+ AE_TSAN_ANNOTATE_ACQUIRE();
+ AE_TSAN_ANNOTATE_RELEASE();
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+ break;
+ default:
+ assert(false);
+ }
+}
+
+} // namespace Common
+
+#endif
+
+#if !defined(AE_VCPP) || (_MSC_VER >= 1700 && !defined(__cplusplus_cli))
+#define AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
+#endif
+
+#ifdef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
+#include <atomic>
+#endif
+#include <utility>
+
+// WARNING: *NOT* A REPLACEMENT FOR std::atomic. READ CAREFULLY:
+// Provides basic support for atomic variables -- no memory ordering guarantees are provided.
+// The guarantee of atomicity is only made for types that already have atomic load and store
+// guarantees at the hardware level -- on most platforms this generally means aligned pointers and
+// integers (only).
+namespace Common {
+template <typename T>
+class weak_atomic {
+public:
+ AE_NO_TSAN weak_atomic() : value() {}
+#ifdef AE_VCPP
+#pragma warning(push)
+#pragma warning(disable : 4100) // Get rid of (erroneous) 'unreferenced formal parameter' warning
+#endif
+ template <typename U>
+ AE_NO_TSAN weak_atomic(U&& x) : value(std::forward<U>(x)) {}
+#ifdef __cplusplus_cli
+ // Work around bug with universal reference/nullptr combination that only appears when /clr is
+ // on
+ AE_NO_TSAN weak_atomic(nullptr_t) : value(nullptr) {}
+#endif
+ AE_NO_TSAN weak_atomic(weak_atomic const& other) : value(other.load()) {}
+ AE_NO_TSAN weak_atomic(weak_atomic&& other) : value(std::move(other.load())) {}
+#ifdef AE_VCPP
+#pragma warning(pop)
+#endif
+
+ AE_FORCEINLINE operator T() const AE_NO_TSAN {
+ return load();
+ }
+
+#ifndef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
+ template <typename U>
+ AE_FORCEINLINE weak_atomic const& operator=(U&& x) AE_NO_TSAN {
+ value = std::forward<U>(x);
+ return *this;
+ }
+ AE_FORCEINLINE weak_atomic const& operator=(weak_atomic const& other) AE_NO_TSAN {
+ value = other.value;
+ return *this;
+ }
+
+ AE_FORCEINLINE T load() const AE_NO_TSAN {
+ return value;
+ }
+
+ AE_FORCEINLINE T fetch_add_acquire(T increment) AE_NO_TSAN {
+#if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
+ if (sizeof(T) == 4)
+ return _InterlockedExchangeAdd((long volatile*)&value, (long)increment);
+#if defined(_M_AMD64)
+ else if (sizeof(T) == 8)
+ return _InterlockedExchangeAdd64((long long volatile*)&value, (long long)increment);
+#endif
+#else
+#error Unsupported platform
+#endif
+ assert(false && "T must be either a 32 or 64 bit type");
+ return value;
+ }
+
+ AE_FORCEINLINE T fetch_add_release(T increment) AE_NO_TSAN {
+#if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
+ if (sizeof(T) == 4)
+ return _InterlockedExchangeAdd((long volatile*)&value, (long)increment);
+#if defined(_M_AMD64)
+ else if (sizeof(T) == 8)
+ return _InterlockedExchangeAdd64((long long volatile*)&value, (long long)increment);
+#endif
+#else
+#error Unsupported platform
+#endif
+ assert(false && "T must be either a 32 or 64 bit type");
+ return value;
+ }
+#else
+ template <typename U>
+ AE_FORCEINLINE weak_atomic const& operator=(U&& x) AE_NO_TSAN {
+ value.store(std::forward<U>(x), std::memory_order_relaxed);
+ return *this;
+ }
+
+ AE_FORCEINLINE weak_atomic const& operator=(weak_atomic const& other) AE_NO_TSAN {
+ value.store(other.value.load(std::memory_order_relaxed), std::memory_order_relaxed);
+ return *this;
+ }
+
+ AE_FORCEINLINE T load() const AE_NO_TSAN {
+ return value.load(std::memory_order_relaxed);
+ }
+
+ AE_FORCEINLINE T fetch_add_acquire(T increment) AE_NO_TSAN {
+ return value.fetch_add(increment, std::memory_order_acquire);
+ }
+
+ AE_FORCEINLINE T fetch_add_release(T increment) AE_NO_TSAN {
+ return value.fetch_add(increment, std::memory_order_release);
+ }
+#endif
+
+private:
+#ifndef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
+ // No std::atomic support, but still need to circumvent compiler optimizations.
+ // `volatile` will make memory access slow, but is guaranteed to be reliable.
+ volatile T value;
+#else
+ std::atomic<T> value;
+#endif
+};
+
+} // namespace Common
+
+// Portable single-producer, single-consumer semaphore below:
+
+#if defined(_WIN32)
+// Avoid including windows.h in a header; we only need a handful of
+// items, so we'll redeclare them here (this is relatively safe since
+// the API generally has to remain stable between Windows versions).
+// I know this is an ugly hack but it still beats polluting the global
+// namespace with thousands of generic names or adding a .cpp for nothing.
+extern "C" {
+struct _SECURITY_ATTRIBUTES;
+__declspec(dllimport) void* __stdcall CreateSemaphoreW(_SECURITY_ATTRIBUTES* lpSemaphoreAttributes,
+ long lInitialCount, long lMaximumCount,
+ const wchar_t* lpName);
+__declspec(dllimport) int __stdcall CloseHandle(void* hObject);
+__declspec(dllimport) unsigned long __stdcall WaitForSingleObject(void* hHandle,
+ unsigned long dwMilliseconds);
+__declspec(dllimport) int __stdcall ReleaseSemaphore(void* hSemaphore, long lReleaseCount,
+ long* lpPreviousCount);
+}
+#elif defined(__MACH__)
+#include <mach/mach.h>
+#elif defined(__unix__)
+#include <semaphore.h>
+#elif defined(FREERTOS)
+#include <FreeRTOS.h>
+#include <semphr.h>
+#include <task.h>
+#endif
+
+namespace Common {
+// Code in the spsc_sema namespace below is an adaptation of Jeff Preshing's
+// portable + lightweight semaphore implementations, originally from
+// https://github.com/preshing/cpp11-on-multicore/blob/master/common/sema.h
+// LICENSE:
+// Copyright (c) 2015 Jeff Preshing
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgement in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+namespace spsc_sema {
+#if defined(_WIN32)
+class Semaphore {
+private:
+ void* m_hSema;
+
+ Semaphore(const Semaphore& other);
+ Semaphore& operator=(const Semaphore& other);
+
+public:
+ AE_NO_TSAN Semaphore(int initialCount = 0) : m_hSema() {
+ assert(initialCount >= 0);
+ const long maxLong = 0x7fffffff;
+ m_hSema = CreateSemaphoreW(nullptr, initialCount, maxLong, nullptr);
+ assert(m_hSema);
+ }
+
+ AE_NO_TSAN ~Semaphore() {
+ CloseHandle(m_hSema);
+ }
+
+ bool wait() AE_NO_TSAN {
+ const unsigned long infinite = 0xffffffff;
+ return WaitForSingleObject(m_hSema, infinite) == 0;
+ }
+
+ bool try_wait() AE_NO_TSAN {
+ return WaitForSingleObject(m_hSema, 0) == 0;
+ }
+
+ bool timed_wait(std::uint64_t usecs) AE_NO_TSAN {
+ return WaitForSingleObject(m_hSema, (unsigned long)(usecs / 1000)) == 0;
+ }
+
+ void signal(int count = 1) AE_NO_TSAN {
+ while (!ReleaseSemaphore(m_hSema, count, nullptr))
+ ;
+ }
+};
+#elif defined(__MACH__)
+//---------------------------------------------------------
+// Semaphore (Apple iOS and OSX)
+// Can't use POSIX semaphores due to
+// http://lists.apple.com/archives/darwin-kernel/2009/Apr/msg00010.html
+//---------------------------------------------------------
+class Semaphore {
+private:
+ semaphore_t m_sema;
+
+ Semaphore(const Semaphore& other);
+ Semaphore& operator=(const Semaphore& other);
+
+public:
+ AE_NO_TSAN Semaphore(int initialCount = 0) : m_sema() {
+ assert(initialCount >= 0);
+ kern_return_t rc =
+ semaphore_create(mach_task_self(), &m_sema, SYNC_POLICY_FIFO, initialCount);
+ assert(rc == KERN_SUCCESS);
+ AE_UNUSED(rc);
+ }
+
+ AE_NO_TSAN ~Semaphore() {
+ semaphore_destroy(mach_task_self(), m_sema);
+ }
+
+ bool wait() AE_NO_TSAN {
+ return semaphore_wait(m_sema) == KERN_SUCCESS;
+ }
+
+ bool try_wait() AE_NO_TSAN {
+ return timed_wait(0);
+ }
+
+ bool timed_wait(std::uint64_t timeout_usecs) AE_NO_TSAN {
+ mach_timespec_t ts;
+ ts.tv_sec = static_cast<unsigned int>(timeout_usecs / 1000000);
+ ts.tv_nsec = static_cast<int>((timeout_usecs % 1000000) * 1000);
+
+ // added in OSX 10.10:
+ // https://developer.apple.com/library/prerelease/mac/documentation/General/Reference/APIDiffsMacOSX10_10SeedDiff/modules/Darwin.html
+ kern_return_t rc = semaphore_timedwait(m_sema, ts);
+ return rc == KERN_SUCCESS;
+ }
+
+ void signal() AE_NO_TSAN {
+ while (semaphore_signal(m_sema) != KERN_SUCCESS)
+ ;
+ }
+
+ void signal(int count) AE_NO_TSAN {
+ while (count-- > 0) {
+ while (semaphore_signal(m_sema) != KERN_SUCCESS)
+ ;
+ }
+ }
+};
+#elif defined(__unix__)
+//---------------------------------------------------------
+// Semaphore (POSIX, Linux)
+//---------------------------------------------------------
+class Semaphore {
+private:
+ sem_t m_sema;
+
+ Semaphore(const Semaphore& other);
+ Semaphore& operator=(const Semaphore& other);
+
+public:
+ AE_NO_TSAN Semaphore(int initialCount = 0) : m_sema() {
+ assert(initialCount >= 0);
+ int rc = sem_init(&m_sema, 0, static_cast<unsigned int>(initialCount));
+ assert(rc == 0);
+ AE_UNUSED(rc);
+ }
+
+ AE_NO_TSAN ~Semaphore() {
+ sem_destroy(&m_sema);
+ }
+
+ bool wait() AE_NO_TSAN {
+ // http://stackoverflow.com/questions/2013181/gdb-causes-sem-wait-to-fail-with-eintr-error
+ int rc;
+ do {
+ rc = sem_wait(&m_sema);
+ } while (rc == -1 && errno == EINTR);
+ return rc == 0;
+ }
+
+ bool try_wait() AE_NO_TSAN {
+ int rc;
+ do {
+ rc = sem_trywait(&m_sema);
+ } while (rc == -1 && errno == EINTR);
+ return rc == 0;
+ }
+
+ bool timed_wait(std::uint64_t usecs) AE_NO_TSAN {
+ struct timespec ts;
+ const int usecs_in_1_sec = 1000000;
+ const int nsecs_in_1_sec = 1000000000;
+ clock_gettime(CLOCK_REALTIME, &ts);
+ ts.tv_sec += static_cast<time_t>(usecs / usecs_in_1_sec);
+ ts.tv_nsec += static_cast<long>(usecs % usecs_in_1_sec) * 1000;
+ // sem_timedwait bombs if you have more than 1e9 in tv_nsec
+ // so we have to clean things up before passing it in
+ if (ts.tv_nsec >= nsecs_in_1_sec) {
+ ts.tv_nsec -= nsecs_in_1_sec;
+ ++ts.tv_sec;
+ }
+
+ int rc;
+ do {
+ rc = sem_timedwait(&m_sema, &ts);
+ } while (rc == -1 && errno == EINTR);
+ return rc == 0;
+ }
+
+ void signal() AE_NO_TSAN {
+ while (sem_post(&m_sema) == -1)
+ ;
+ }
+
+ void signal(int count) AE_NO_TSAN {
+ while (count-- > 0) {
+ while (sem_post(&m_sema) == -1)
+ ;
+ }
+ }
+};
+#elif defined(FREERTOS)
+//---------------------------------------------------------
+// Semaphore (FreeRTOS)
+//---------------------------------------------------------
+class Semaphore {
+private:
+ SemaphoreHandle_t m_sema;
+
+ Semaphore(const Semaphore& other);
+ Semaphore& operator=(const Semaphore& other);
+
+public:
+ AE_NO_TSAN Semaphore(int initialCount = 0) : m_sema() {
+ assert(initialCount >= 0);
+ m_sema = xSemaphoreCreateCounting(static_cast<UBaseType_t>(~0ull),
+ static_cast<UBaseType_t>(initialCount));
+ assert(m_sema);
+ }
+
+ AE_NO_TSAN ~Semaphore() {
+ vSemaphoreDelete(m_sema);
+ }
+
+ bool wait() AE_NO_TSAN {
+ return xSemaphoreTake(m_sema, portMAX_DELAY) == pdTRUE;
+ }
+
+ bool try_wait() AE_NO_TSAN {
+ // Note: In an ISR context, if this causes a task to unblock,
+ // the caller won't know about it
+ if (xPortIsInsideInterrupt())
+ return xSemaphoreTakeFromISR(m_sema, NULL) == pdTRUE;
+ return xSemaphoreTake(m_sema, 0) == pdTRUE;
+ }
+
+ bool timed_wait(std::uint64_t usecs) AE_NO_TSAN {
+ std::uint64_t msecs = usecs / 1000;
+ TickType_t ticks = static_cast<TickType_t>(msecs / portTICK_PERIOD_MS);
+ if (ticks == 0)
+ return try_wait();
+ return xSemaphoreTake(m_sema, ticks) == pdTRUE;
+ }
+
+ void signal() AE_NO_TSAN {
+ // Note: In an ISR context, if this causes a task to unblock,
+ // the caller won't know about it
+ BaseType_t rc;
+ if (xPortIsInsideInterrupt())
+ rc = xSemaphoreGiveFromISR(m_sema, NULL);
+ else
+ rc = xSemaphoreGive(m_sema);
+ assert(rc == pdTRUE);
+ AE_UNUSED(rc);
+ }
+
+ void signal(int count) AE_NO_TSAN {
+ while (count-- > 0)
+ signal();
+ }
+};
+#else
+#error Unsupported platform! (No semaphore wrapper available)
+#endif
+
+//---------------------------------------------------------
+// LightweightSemaphore
+//---------------------------------------------------------
+class LightweightSemaphore {
+public:
+ typedef std::make_signed<std::size_t>::type ssize_t;
+
+private:
+ weak_atomic<ssize_t> m_count;
+ Semaphore m_sema;
+
+ bool waitWithPartialSpinning(std::int64_t timeout_usecs = -1) AE_NO_TSAN {
+ ssize_t oldCount;
+ // Is there a better way to set the initial spin count?
+ // If we lower it to 1000, testBenaphore becomes 15x slower on my Core i7-5930K Windows PC,
+ // as threads start hitting the kernel semaphore.
+ int spin = 1024;
+ while (--spin >= 0) {
+ if (m_count.load() > 0) {
+ m_count.fetch_add_acquire(-1);
+ return true;
+ }
+ compiler_fence(memory_order_acquire); // Prevent the compiler from collapsing the loop.
+ }
+ oldCount = m_count.fetch_add_acquire(-1);
+ if (oldCount > 0)
+ return true;
+ if (timeout_usecs < 0) {
+ if (m_sema.wait())
+ return true;
+ }
+ if (timeout_usecs > 0 && m_sema.timed_wait(static_cast<uint64_t>(timeout_usecs)))
+ return true;
+ // At this point, we've timed out waiting for the semaphore, but the
+ // count is still decremented indicating we may still be waiting on
+ // it. So we have to re-adjust the count, but only if the semaphore
+ // wasn't signaled enough times for us too since then. If it was, we
+ // need to release the semaphore too.
+ while (true) {
+ oldCount = m_count.fetch_add_release(1);
+ if (oldCount < 0)
+ return false; // successfully restored things to the way they were
+ // Oh, the producer thread just signaled the semaphore after all. Try again:
+ oldCount = m_count.fetch_add_acquire(-1);
+ if (oldCount > 0 && m_sema.try_wait())
+ return true;
+ }
+ }
+
+public:
+ AE_NO_TSAN LightweightSemaphore(ssize_t initialCount = 0) : m_count(initialCount), m_sema() {
+ assert(initialCount >= 0);
+ }
+
+ bool tryWait() AE_NO_TSAN {
+ if (m_count.load() > 0) {
+ m_count.fetch_add_acquire(-1);
+ return true;
+ }
+ return false;
+ }
+
+ bool wait() AE_NO_TSAN {
+ return tryWait() || waitWithPartialSpinning();
+ }
+
+ bool wait(std::int64_t timeout_usecs) AE_NO_TSAN {
+ return tryWait() || waitWithPartialSpinning(timeout_usecs);
+ }
+
+ void signal(ssize_t count = 1) AE_NO_TSAN {
+ assert(count >= 0);
+ ssize_t oldCount = m_count.fetch_add_release(count);
+ assert(oldCount >= -1);
+ if (oldCount < 0) {
+ m_sema.signal(1);
+ }
+ }
+
+ std::size_t availableApprox() const AE_NO_TSAN {
+ ssize_t count = m_count.load();
+ return count > 0 ? static_cast<std::size_t>(count) : 0;
+ }
+};
+} // namespace spsc_sema
+} // namespace Common
+
+#if defined(AE_VCPP) && (_MSC_VER < 1700 || defined(__cplusplus_cli))
+#pragma warning(pop)
+#ifdef __cplusplus_cli
+#pragma managed(pop)
+#endif
+#endif
diff --git a/src/common/atomic_ops.h b/src/common/atomic_ops.h
index 2b1f515e8..c18bb33c4 100644
--- a/src/common/atomic_ops.h
+++ b/src/common/atomic_ops.h
@@ -1,16 +1,14 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
-#include <cstring>
-#include <memory>
-
#include "common/common_types.h"
#if _MSC_VER
#include <intrin.h>
+#else
+#include <cstring>
#endif
namespace Common {
@@ -47,6 +45,50 @@ namespace Common {
reinterpret_cast<__int64*>(expected.data())) != 0;
}
+[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected,
+ u8& actual) {
+ actual =
+ _InterlockedCompareExchange8(reinterpret_cast<volatile char*>(pointer), value, expected);
+ return actual == expected;
+}
+
+[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected,
+ u16& actual) {
+ actual =
+ _InterlockedCompareExchange16(reinterpret_cast<volatile short*>(pointer), value, expected);
+ return actual == expected;
+}
+
+[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected,
+ u32& actual) {
+ actual =
+ _InterlockedCompareExchange(reinterpret_cast<volatile long*>(pointer), value, expected);
+ return actual == expected;
+}
+
+[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected,
+ u64& actual) {
+ actual = _InterlockedCompareExchange64(reinterpret_cast<volatile __int64*>(pointer), value,
+ expected);
+ return actual == expected;
+}
+
+[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected,
+ u128& actual) {
+ const bool result =
+ _InterlockedCompareExchange128(reinterpret_cast<volatile __int64*>(pointer), value[1],
+ value[0], reinterpret_cast<__int64*>(expected.data())) != 0;
+ actual = expected;
+ return result;
+}
+
+[[nodiscard]] inline u128 AtomicLoad128(volatile u64* pointer) {
+ u128 result{};
+ _InterlockedCompareExchange128(reinterpret_cast<volatile __int64*>(pointer), result[1],
+ result[0], reinterpret_cast<__int64*>(result.data()));
+ return result;
+}
+
#else
[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected) {
@@ -73,6 +115,52 @@ namespace Common {
return __sync_bool_compare_and_swap((unsigned __int128*)pointer, expected_a, value_a);
}
+[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected,
+ u8& actual) {
+ actual = __sync_val_compare_and_swap(pointer, expected, value);
+ return actual == expected;
+}
+
+[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected,
+ u16& actual) {
+ actual = __sync_val_compare_and_swap(pointer, expected, value);
+ return actual == expected;
+}
+
+[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected,
+ u32& actual) {
+ actual = __sync_val_compare_and_swap(pointer, expected, value);
+ return actual == expected;
+}
+
+[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected,
+ u64& actual) {
+ actual = __sync_val_compare_and_swap(pointer, expected, value);
+ return actual == expected;
+}
+
+[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected,
+ u128& actual) {
+ unsigned __int128 value_a;
+ unsigned __int128 expected_a;
+ unsigned __int128 actual_a;
+ std::memcpy(&value_a, value.data(), sizeof(u128));
+ std::memcpy(&expected_a, expected.data(), sizeof(u128));
+ actual_a = __sync_val_compare_and_swap((unsigned __int128*)pointer, expected_a, value_a);
+ std::memcpy(actual.data(), &actual_a, sizeof(u128));
+ return actual_a == expected_a;
+}
+
+[[nodiscard]] inline u128 AtomicLoad128(volatile u64* pointer) {
+ unsigned __int128 zeros_a = 0;
+ unsigned __int128 result_a =
+ __sync_val_compare_and_swap((unsigned __int128*)pointer, zeros_a, zeros_a);
+
+ u128 result;
+ std::memcpy(result.data(), &result_a, sizeof(u128));
+ return result;
+}
+
#endif
} // namespace Common
diff --git a/src/common/bit_cast.h b/src/common/bit_cast.h
index a32a063d1..535148b4d 100644
--- a/src/common/bit_cast.h
+++ b/src/common/bit_cast.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/bit_field.h b/src/common/bit_field.h
index 0f0661172..7e1df62b1 100644
--- a/src/common/bit_field.h
+++ b/src/common/bit_field.h
@@ -1,39 +1,12 @@
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-// Copyright 2014 Tony Wasserka
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-// * Neither the name of the owner nor the names of its contributors may
-// be used to endorse or promote products derived from this software
-// without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// SPDX-FileCopyrightText: 2014 Tony Wasserka
+// SPDX-FileCopyrightText: 2014 Dolphin Emulator Project
+// SPDX-License-Identifier: BSD-3-Clause AND GPL-2.0-or-later
#pragma once
#include <cstddef>
#include <limits>
#include <type_traits>
-#include "common/common_funcs.h"
#include "common/swap.h"
/*
@@ -173,7 +146,16 @@ public:
}
constexpr void Assign(const T& value) {
+#ifdef _MSC_VER
storage = static_cast<StorageType>((storage & ~mask) | FormatValue(value));
+#else
+ // Explicitly reload with memcpy to avoid compiler aliasing quirks
+ // regarding optimization: GCC/Clang clobber chained stores to
+ // different bitfields in the same struct with the last value.
+ StorageTypeWithEndian storage_;
+ std::memcpy(&storage_, &storage, sizeof(storage_));
+ storage = static_cast<StorageType>((storage_ & ~mask) | FormatValue(value));
+#endif
}
[[nodiscard]] constexpr T Value() const {
diff --git a/src/common/bit_set.h b/src/common/bit_set.h
index 9235ad412..74754504b 100644
--- a/src/common/bit_set.h
+++ b/src/common/bit_set.h
@@ -1,18 +1,5 @@
-/*
- * Copyright (c) 2018-2020 Atmosphère-NX
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/bit_util.h b/src/common/bit_util.h
index f50d3308a..e4e6287f3 100644
--- a/src/common/bit_util.h
+++ b/src/common/bit_util.h
@@ -1,6 +1,5 @@
-// Copyright 2018 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -57,4 +56,11 @@ requires std::is_integral_v<T>
return static_cast<T>(1ULL << ((8U * sizeof(T)) - std::countl_zero(value - 1U)));
}
+template <size_t bit_index, typename T>
+requires std::is_integral_v<T>
+[[nodiscard]] constexpr bool Bit(const T value) {
+ static_assert(bit_index < BitSize<T>(), "bit_index must be smaller than size of T");
+ return ((value >> bit_index) & T(1)) == T(1);
+}
+
} // namespace Common
diff --git a/src/common/bounded_threadsafe_queue.h b/src/common/bounded_threadsafe_queue.h
new file mode 100644
index 000000000..7e465549b
--- /dev/null
+++ b/src/common/bounded_threadsafe_queue.h
@@ -0,0 +1,167 @@
+// SPDX-FileCopyrightText: Copyright (c) 2020 Erik Rigtorp <erik@rigtorp.se>
+// SPDX-License-Identifier: MIT
+
+#pragma once
+
+#include <atomic>
+#include <bit>
+#include <condition_variable>
+#include <memory>
+#include <mutex>
+#include <new>
+#include <stop_token>
+#include <type_traits>
+#include <utility>
+
+namespace Common {
+
+#if defined(__cpp_lib_hardware_interference_size)
+constexpr size_t hardware_interference_size = std::hardware_destructive_interference_size;
+#else
+constexpr size_t hardware_interference_size = 64;
+#endif
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable : 4324)
+#endif
+
+template <typename T, size_t capacity = 0x400>
+class MPSCQueue {
+public:
+ explicit MPSCQueue() : allocator{std::allocator<Slot<T>>()} {
+ // Allocate one extra slot to prevent false sharing on the last slot
+ slots = allocator.allocate(capacity + 1);
+ // Allocators are not required to honor alignment for over-aligned types
+ // (see http://eel.is/c++draft/allocator.requirements#10) so we verify
+ // alignment here
+ if (reinterpret_cast<uintptr_t>(slots) % alignof(Slot<T>) != 0) {
+ allocator.deallocate(slots, capacity + 1);
+ throw std::bad_alloc();
+ }
+ for (size_t i = 0; i < capacity; ++i) {
+ std::construct_at(&slots[i]);
+ }
+ static_assert(std::has_single_bit(capacity), "capacity must be an integer power of 2");
+ static_assert(alignof(Slot<T>) == hardware_interference_size,
+ "Slot must be aligned to cache line boundary to prevent false sharing");
+ static_assert(sizeof(Slot<T>) % hardware_interference_size == 0,
+ "Slot size must be a multiple of cache line size to prevent "
+ "false sharing between adjacent slots");
+ static_assert(sizeof(MPSCQueue) % hardware_interference_size == 0,
+ "Queue size must be a multiple of cache line size to "
+ "prevent false sharing between adjacent queues");
+ }
+
+ ~MPSCQueue() noexcept {
+ for (size_t i = 0; i < capacity; ++i) {
+ std::destroy_at(&slots[i]);
+ }
+ allocator.deallocate(slots, capacity + 1);
+ }
+
+ // The queue must be both non-copyable and non-movable
+ MPSCQueue(const MPSCQueue&) = delete;
+ MPSCQueue& operator=(const MPSCQueue&) = delete;
+
+ MPSCQueue(MPSCQueue&&) = delete;
+ MPSCQueue& operator=(MPSCQueue&&) = delete;
+
+ void Push(const T& v) noexcept {
+ static_assert(std::is_nothrow_copy_constructible_v<T>,
+ "T must be nothrow copy constructible");
+ emplace(v);
+ }
+
+ template <typename P, typename = std::enable_if_t<std::is_nothrow_constructible_v<T, P&&>>>
+ void Push(P&& v) noexcept {
+ emplace(std::forward<P>(v));
+ }
+
+ void Pop(T& v, std::stop_token stop) noexcept {
+ auto const tail = tail_.fetch_add(1);
+ auto& slot = slots[idx(tail)];
+ if (!slot.turn.test()) {
+ std::unique_lock lock{cv_mutex};
+ cv.wait(lock, stop, [&slot] { return slot.turn.test(); });
+ }
+ v = slot.move();
+ slot.destroy();
+ slot.turn.clear();
+ slot.turn.notify_one();
+ }
+
+private:
+ template <typename U = T>
+ struct Slot {
+ ~Slot() noexcept {
+ if (turn.test()) {
+ destroy();
+ }
+ }
+
+ template <typename... Args>
+ void construct(Args&&... args) noexcept {
+ static_assert(std::is_nothrow_constructible_v<U, Args&&...>,
+ "T must be nothrow constructible with Args&&...");
+ std::construct_at(reinterpret_cast<U*>(&storage), std::forward<Args>(args)...);
+ }
+
+ void destroy() noexcept {
+ static_assert(std::is_nothrow_destructible_v<U>, "T must be nothrow destructible");
+ std::destroy_at(reinterpret_cast<U*>(&storage));
+ }
+
+ U&& move() noexcept {
+ return reinterpret_cast<U&&>(storage);
+ }
+
+ // Align to avoid false sharing between adjacent slots
+ alignas(hardware_interference_size) std::atomic_flag turn{};
+ struct aligned_store {
+ struct type {
+ alignas(U) unsigned char data[sizeof(U)];
+ };
+ };
+ typename aligned_store::type storage;
+ };
+
+ template <typename... Args>
+ void emplace(Args&&... args) noexcept {
+ static_assert(std::is_nothrow_constructible_v<T, Args&&...>,
+ "T must be nothrow constructible with Args&&...");
+ auto const head = head_.fetch_add(1);
+ auto& slot = slots[idx(head)];
+ slot.turn.wait(true);
+ slot.construct(std::forward<Args>(args)...);
+ slot.turn.test_and_set();
+ cv.notify_one();
+ }
+
+ constexpr size_t idx(size_t i) const noexcept {
+ return i & mask;
+ }
+
+ static constexpr size_t mask = capacity - 1;
+
+ // Align to avoid false sharing between head_ and tail_
+ alignas(hardware_interference_size) std::atomic<size_t> head_{0};
+ alignas(hardware_interference_size) std::atomic<size_t> tail_{0};
+
+ std::mutex cv_mutex;
+ std::condition_variable_any cv;
+
+ Slot<T>* slots;
+ [[no_unique_address]] std::allocator<Slot<T>> allocator;
+
+ static_assert(std::is_nothrow_copy_assignable_v<T> || std::is_nothrow_move_assignable_v<T>,
+ "T must be nothrow copy or move assignable");
+
+ static_assert(std::is_nothrow_destructible_v<T>, "T must be nothrow destructible");
+};
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+} // namespace Common
diff --git a/src/common/cityhash.cpp b/src/common/cityhash.cpp
index 66218fc21..d50ac9e6c 100644
--- a/src/common/cityhash.cpp
+++ b/src/common/cityhash.cpp
@@ -1,23 +1,8 @@
-// Copyright (c) 2011 Google, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-//
+// SPDX-FileCopyrightText: 2011 Google, Inc.
+// SPDX-FileContributor: Geoff Pike
+// SPDX-FileContributor: Jyrki Alakuijala
+// SPDX-License-Identifier: MIT
+
// CityHash, by Geoff Pike and Jyrki Alakuijala
//
// This file provides CityHash64() and related functions.
diff --git a/src/common/cityhash.h b/src/common/cityhash.h
index d74fc7639..627ba81cd 100644
--- a/src/common/cityhash.h
+++ b/src/common/cityhash.h
@@ -1,23 +1,8 @@
-// Copyright (c) 2011 Google, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-//
+// SPDX-FileCopyrightText: 2011 Google, Inc.
+// SPDX-FileContributor: Geoff Pike
+// SPDX-FileContributor: Jyrki Alakuijala
+// SPDX-License-Identifier: MIT
+
// CityHash, by Geoff Pike and Jyrki Alakuijala
//
// http://code.google.com/p/cityhash/
diff --git a/src/common/common_funcs.h b/src/common/common_funcs.h
index 4c1e29de6..e1e2a90fc 100644
--- a/src/common/common_funcs.h
+++ b/src/common/common_funcs.h
@@ -1,6 +1,5 @@
-// Copyright 2019 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -19,14 +18,16 @@
/// Helper macros to insert unused bytes or words to properly align structs. These values will be
/// zero-initialized.
#define INSERT_PADDING_BYTES(num_bytes) \
- std::array<u8, num_bytes> CONCAT2(pad, __LINE__) {}
+ [[maybe_unused]] std::array<u8, num_bytes> CONCAT2(pad, __LINE__) {}
#define INSERT_PADDING_WORDS(num_words) \
- std::array<u32, num_words> CONCAT2(pad, __LINE__) {}
+ [[maybe_unused]] std::array<u32, num_words> CONCAT2(pad, __LINE__) {}
/// These are similar to the INSERT_PADDING_* macros but do not zero-initialize the contents.
/// This keeps the structure trivial to construct.
-#define INSERT_PADDING_BYTES_NOINIT(num_bytes) std::array<u8, num_bytes> CONCAT2(pad, __LINE__)
-#define INSERT_PADDING_WORDS_NOINIT(num_words) std::array<u32, num_words> CONCAT2(pad, __LINE__)
+#define INSERT_PADDING_BYTES_NOINIT(num_bytes) \
+ [[maybe_unused]] std::array<u8, num_bytes> CONCAT2(pad, __LINE__)
+#define INSERT_PADDING_WORDS_NOINIT(num_words) \
+ [[maybe_unused]] std::array<u32, num_words> CONCAT2(pad, __LINE__)
#ifndef _MSC_VER
diff --git a/src/common/common_types.h b/src/common/common_types.h
index 99bffc460..0fc225aff 100644
--- a/src/common/common_types.h
+++ b/src/common/common_types.h
@@ -1,3 +1,7 @@
+// SPDX-FileCopyrightText: 2012 Gekko Emulator
+// SPDX-FileContributor: ShizZy <shizzy247@gmail.com>
+// SPDX-License-Identifier: GPL-2.0-or-later
+
/**
* Copyright (C) 2005-2012 Gekko Emulator
*
diff --git a/src/common/concepts.h b/src/common/concepts.h
index aa08065a7..a97555f6a 100644
--- a/src/common/concepts.h
+++ b/src/common/concepts.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/detached_tasks.cpp b/src/common/detached_tasks.cpp
index c1362631e..da64848da 100644
--- a/src/common/detached_tasks.cpp
+++ b/src/common/detached_tasks.cpp
@@ -1,6 +1,5 @@
-// Copyright 2018 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2018 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <thread>
#include "common/assert.h"
@@ -33,9 +32,9 @@ void DetachedTasks::AddTask(std::function<void()> task) {
++instance->count;
std::thread([task{std::move(task)}]() {
task();
- std::unique_lock lock{instance->mutex};
+ std::unique_lock thread_lock{instance->mutex};
--instance->count;
- std::notify_all_at_thread_exit(instance->cv, std::move(lock));
+ std::notify_all_at_thread_exit(instance->cv, std::move(thread_lock));
}).detach();
}
diff --git a/src/common/detached_tasks.h b/src/common/detached_tasks.h
index 5dd8fc27b..416a2d7f3 100644
--- a/src/common/detached_tasks.h
+++ b/src/common/detached_tasks.h
@@ -1,6 +1,5 @@
-// Copyright 2018 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2018 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/div_ceil.h b/src/common/div_ceil.h
index e1db35464..eebc279c2 100644
--- a/src/common/div_ceil.h
+++ b/src/common/div_ceil.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/dynamic_library.cpp b/src/common/dynamic_library.cpp
index 7f0a10521..054277a2b 100644
--- a/src/common/dynamic_library.cpp
+++ b/src/common/dynamic_library.cpp
@@ -1,8 +1,6 @@
-// Copyright 2019 Dolphin Emulator Project
-// Licensed under GPLv2+
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2019 Dolphin Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
-#include <cstring>
#include <string>
#include <utility>
diff --git a/src/common/dynamic_library.h b/src/common/dynamic_library.h
index 3512da940..f42bdf441 100644
--- a/src/common/dynamic_library.h
+++ b/src/common/dynamic_library.h
@@ -1,6 +1,5 @@
-// Copyright 2019 Dolphin Emulator Project
-// Licensed under GPLv2+
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2019 Dolphin Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/elf.h b/src/common/elf.h
new file mode 100644
index 000000000..14a5e9597
--- /dev/null
+++ b/src/common/elf.h
@@ -0,0 +1,333 @@
+// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <array>
+#include <cstddef>
+
+#include "common_types.h"
+
+namespace Common {
+namespace ELF {
+
+/* Type for a 16-bit quantity. */
+using Elf32_Half = u16;
+using Elf64_Half = u16;
+
+/* Types for signed and unsigned 32-bit quantities. */
+using Elf32_Word = u32;
+using Elf32_Sword = s32;
+using Elf64_Word = u32;
+using Elf64_Sword = s32;
+
+/* Types for signed and unsigned 64-bit quantities. */
+using Elf32_Xword = u64;
+using Elf32_Sxword = s64;
+using Elf64_Xword = u64;
+using Elf64_Sxword = s64;
+
+/* Type of addresses. */
+using Elf32_Addr = u32;
+using Elf64_Addr = u64;
+
+/* Type of file offsets. */
+using Elf32_Off = u32;
+using Elf64_Off = u64;
+
+/* Type for section indices, which are 16-bit quantities. */
+using Elf32_Section = u16;
+using Elf64_Section = u16;
+
+/* Type for version symbol information. */
+using Elf32_Versym = Elf32_Half;
+using Elf64_Versym = Elf64_Half;
+
+constexpr size_t ElfIdentSize = 16;
+
+/* The ELF file header. This appears at the start of every ELF file. */
+
+struct Elf32_Ehdr {
+ std::array<u8, ElfIdentSize> e_ident; /* Magic number and other info */
+ Elf32_Half e_type; /* Object file type */
+ Elf32_Half e_machine; /* Architecture */
+ Elf32_Word e_version; /* Object file version */
+ Elf32_Addr e_entry; /* Entry point virtual address */
+ Elf32_Off e_phoff; /* Program header table file offset */
+ Elf32_Off e_shoff; /* Section header table file offset */
+ Elf32_Word e_flags; /* Processor-specific flags */
+ Elf32_Half e_ehsize; /* ELF header size in bytes */
+ Elf32_Half e_phentsize; /* Program header table entry size */
+ Elf32_Half e_phnum; /* Program header table entry count */
+ Elf32_Half e_shentsize; /* Section header table entry size */
+ Elf32_Half e_shnum; /* Section header table entry count */
+ Elf32_Half e_shstrndx; /* Section header string table index */
+};
+
+struct Elf64_Ehdr {
+ std::array<u8, ElfIdentSize> e_ident; /* Magic number and other info */
+ Elf64_Half e_type; /* Object file type */
+ Elf64_Half e_machine; /* Architecture */
+ Elf64_Word e_version; /* Object file version */
+ Elf64_Addr e_entry; /* Entry point virtual address */
+ Elf64_Off e_phoff; /* Program header table file offset */
+ Elf64_Off e_shoff; /* Section header table file offset */
+ Elf64_Word e_flags; /* Processor-specific flags */
+ Elf64_Half e_ehsize; /* ELF header size in bytes */
+ Elf64_Half e_phentsize; /* Program header table entry size */
+ Elf64_Half e_phnum; /* Program header table entry count */
+ Elf64_Half e_shentsize; /* Section header table entry size */
+ Elf64_Half e_shnum; /* Section header table entry count */
+ Elf64_Half e_shstrndx; /* Section header string table index */
+};
+
+constexpr u8 ElfClass32 = 1; /* 32-bit objects */
+constexpr u8 ElfClass64 = 2; /* 64-bit objects */
+constexpr u8 ElfData2Lsb = 1; /* 2's complement, little endian */
+constexpr u8 ElfVersionCurrent = 1; /* EV_CURRENT */
+constexpr u8 ElfOsAbiNone = 0; /* System V ABI */
+
+constexpr u16 ElfTypeNone = 0; /* No file type */
+constexpr u16 ElfTypeRel = 0; /* Relocatable file */
+constexpr u16 ElfTypeExec = 0; /* Executable file */
+constexpr u16 ElfTypeDyn = 0; /* Shared object file */
+
+constexpr u16 ElfMachineArm = 40; /* ARM */
+constexpr u16 ElfMachineAArch64 = 183; /* ARM AARCH64 */
+
+constexpr std::array<u8, ElfIdentSize> Elf32Ident{
+ 0x7f, 'E', 'L', 'F', ElfClass32, ElfData2Lsb, ElfVersionCurrent, ElfOsAbiNone};
+
+constexpr std::array<u8, ElfIdentSize> Elf64Ident{
+ 0x7f, 'E', 'L', 'F', ElfClass64, ElfData2Lsb, ElfVersionCurrent, ElfOsAbiNone};
+
+/* Section header. */
+
+struct Elf32_Shdr {
+ Elf32_Word sh_name; /* Section name (string tbl index) */
+ Elf32_Word sh_type; /* Section type */
+ Elf32_Word sh_flags; /* Section flags */
+ Elf32_Addr sh_addr; /* Section virtual addr at execution */
+ Elf32_Off sh_offset; /* Section file offset */
+ Elf32_Word sh_size; /* Section size in bytes */
+ Elf32_Word sh_link; /* Link to another section */
+ Elf32_Word sh_info; /* Additional section information */
+ Elf32_Word sh_addralign; /* Section alignment */
+ Elf32_Word sh_entsize; /* Entry size if section holds table */
+};
+
+struct Elf64_Shdr {
+ Elf64_Word sh_name; /* Section name (string tbl index) */
+ Elf64_Word sh_type; /* Section type */
+ Elf64_Xword sh_flags; /* Section flags */
+ Elf64_Addr sh_addr; /* Section virtual addr at execution */
+ Elf64_Off sh_offset; /* Section file offset */
+ Elf64_Xword sh_size; /* Section size in bytes */
+ Elf64_Word sh_link; /* Link to another section */
+ Elf64_Word sh_info; /* Additional section information */
+ Elf64_Xword sh_addralign; /* Section alignment */
+ Elf64_Xword sh_entsize; /* Entry size if section holds table */
+};
+
+constexpr u32 ElfShnUndef = 0; /* Undefined section */
+
+constexpr u32 ElfShtNull = 0; /* Section header table entry unused */
+constexpr u32 ElfShtProgBits = 1; /* Program data */
+constexpr u32 ElfShtSymtab = 2; /* Symbol table */
+constexpr u32 ElfShtStrtab = 3; /* String table */
+constexpr u32 ElfShtRela = 4; /* Relocation entries with addends */
+constexpr u32 ElfShtDynamic = 6; /* Dynamic linking information */
+constexpr u32 ElfShtNobits = 7; /* Program space with no data (bss) */
+constexpr u32 ElfShtRel = 9; /* Relocation entries, no addends */
+constexpr u32 ElfShtDynsym = 11; /* Dynamic linker symbol table */
+
+/* Symbol table entry. */
+
+struct Elf32_Sym {
+ Elf32_Word st_name; /* Symbol name (string tbl index) */
+ Elf32_Addr st_value; /* Symbol value */
+ Elf32_Word st_size; /* Symbol size */
+ u8 st_info; /* Symbol type and binding */
+ u8 st_other; /* Symbol visibility */
+ Elf32_Section st_shndx; /* Section index */
+};
+
+struct Elf64_Sym {
+ Elf64_Word st_name; /* Symbol name (string tbl index) */
+ u8 st_info; /* Symbol type and binding */
+ u8 st_other; /* Symbol visibility */
+ Elf64_Section st_shndx; /* Section index */
+ Elf64_Addr st_value; /* Symbol value */
+ Elf64_Xword st_size; /* Symbol size */
+};
+
+/* How to extract and insert information held in the st_info field. */
+
+static inline u8 ElfStBind(u8 st_info) {
+ return st_info >> 4;
+}
+static inline u8 ElfStType(u8 st_info) {
+ return st_info & 0xf;
+}
+static inline u8 ElfStInfo(u8 st_bind, u8 st_type) {
+ return static_cast<u8>((st_bind << 4) + (st_type & 0xf));
+}
+
+constexpr u8 ElfBindLocal = 0; /* Local symbol */
+constexpr u8 ElfBindGlobal = 1; /* Global symbol */
+constexpr u8 ElfBindWeak = 2; /* Weak symbol */
+
+constexpr u8 ElfTypeUnspec = 0; /* Symbol type is unspecified */
+constexpr u8 ElfTypeObject = 1; /* Symbol is a data object */
+constexpr u8 ElfTypeFunc = 2; /* Symbol is a code object */
+
+static inline u8 ElfStVisibility(u8 st_other) {
+ return static_cast<u8>(st_other & 0x3);
+}
+
+constexpr u8 ElfVisibilityDefault = 0; /* Default symbol visibility rules */
+constexpr u8 ElfVisibilityInternal = 1; /* Processor specific hidden class */
+constexpr u8 ElfVisibilityHidden = 2; /* Sym unavailable in other modules */
+constexpr u8 ElfVisibilityProtected = 3; /* Not preemptible, not exported */
+
+/* Relocation table entry without addend (in section of type ShtRel). */
+
+struct Elf32_Rel {
+ Elf32_Addr r_offset; /* Address */
+ Elf32_Word r_info; /* Relocation type and symbol index */
+};
+
+/* Relocation table entry with addend (in section of type ShtRela). */
+
+struct Elf32_Rela {
+ Elf32_Addr r_offset; /* Address */
+ Elf32_Word r_info; /* Relocation type and symbol index */
+ Elf32_Sword r_addend; /* Addend */
+};
+
+struct Elf64_Rela {
+ Elf64_Addr r_offset; /* Address */
+ Elf64_Xword r_info; /* Relocation type and symbol index */
+ Elf64_Sxword r_addend; /* Addend */
+};
+
+/* How to extract and insert information held in the r_info field. */
+
+static inline u32 Elf32RelSymIndex(Elf32_Word r_info) {
+ return r_info >> 8;
+}
+static inline u8 Elf32RelType(Elf32_Word r_info) {
+ return static_cast<u8>(r_info & 0xff);
+}
+static inline Elf32_Word Elf32RelInfo(u32 sym_index, u8 type) {
+ return (sym_index << 8) + type;
+}
+static inline u32 Elf64RelSymIndex(Elf64_Xword r_info) {
+ return static_cast<u32>(r_info >> 32);
+}
+static inline u32 Elf64RelType(Elf64_Xword r_info) {
+ return r_info & 0xffffffff;
+}
+static inline Elf64_Xword Elf64RelInfo(u32 sym_index, u32 type) {
+ return (static_cast<Elf64_Xword>(sym_index) << 32) + type;
+}
+
+constexpr u32 ElfArmCopy = 20; /* Copy symbol at runtime */
+constexpr u32 ElfArmGlobDat = 21; /* Create GOT entry */
+constexpr u32 ElfArmJumpSlot = 22; /* Create PLT entry */
+constexpr u32 ElfArmRelative = 23; /* Adjust by program base */
+
+constexpr u32 ElfAArch64Copy = 1024; /* Copy symbol at runtime */
+constexpr u32 ElfAArch64GlobDat = 1025; /* Create GOT entry */
+constexpr u32 ElfAArch64JumpSlot = 1026; /* Create PLT entry */
+constexpr u32 ElfAArch64Relative = 1027; /* Adjust by program base */
+
+/* Program segment header. */
+
+struct Elf32_Phdr {
+ Elf32_Word p_type; /* Segment type */
+ Elf32_Off p_offset; /* Segment file offset */
+ Elf32_Addr p_vaddr; /* Segment virtual address */
+ Elf32_Addr p_paddr; /* Segment physical address */
+ Elf32_Word p_filesz; /* Segment size in file */
+ Elf32_Word p_memsz; /* Segment size in memory */
+ Elf32_Word p_flags; /* Segment flags */
+ Elf32_Word p_align; /* Segment alignment */
+};
+
+struct Elf64_Phdr {
+ Elf64_Word p_type; /* Segment type */
+ Elf64_Word p_flags; /* Segment flags */
+ Elf64_Off p_offset; /* Segment file offset */
+ Elf64_Addr p_vaddr; /* Segment virtual address */
+ Elf64_Addr p_paddr; /* Segment physical address */
+ Elf64_Xword p_filesz; /* Segment size in file */
+ Elf64_Xword p_memsz; /* Segment size in memory */
+ Elf64_Xword p_align; /* Segment alignment */
+};
+
+/* Legal values for p_type (segment type). */
+
+constexpr u32 ElfPtNull = 0; /* Program header table entry unused */
+constexpr u32 ElfPtLoad = 1; /* Loadable program segment */
+constexpr u32 ElfPtDynamic = 2; /* Dynamic linking information */
+constexpr u32 ElfPtInterp = 3; /* Program interpreter */
+constexpr u32 ElfPtNote = 4; /* Auxiliary information */
+constexpr u32 ElfPtPhdr = 6; /* Entry for header table itself */
+constexpr u32 ElfPtTls = 7; /* Thread-local storage segment */
+
+/* Legal values for p_flags (segment flags). */
+
+constexpr u32 ElfPfExec = 0; /* Segment is executable */
+constexpr u32 ElfPfWrite = 1; /* Segment is writable */
+constexpr u32 ElfPfRead = 2; /* Segment is readable */
+
+/* Dynamic section entry. */
+
+struct Elf32_Dyn {
+ Elf32_Sword d_tag; /* Dynamic entry type */
+ union {
+ Elf32_Word d_val; /* Integer value */
+ Elf32_Addr d_ptr; /* Address value */
+ } d_un;
+};
+
+struct Elf64_Dyn {
+ Elf64_Sxword d_tag; /* Dynamic entry type */
+ union {
+ Elf64_Xword d_val; /* Integer value */
+ Elf64_Addr d_ptr; /* Address value */
+ } d_un;
+};
+
+/* Legal values for d_tag (dynamic entry type). */
+
+constexpr u32 ElfDtNull = 0; /* Marks end of dynamic section */
+constexpr u32 ElfDtNeeded = 1; /* Name of needed library */
+constexpr u32 ElfDtPltRelSz = 2; /* Size in bytes of PLT relocs */
+constexpr u32 ElfDtPltGot = 3; /* Processor defined value */
+constexpr u32 ElfDtHash = 4; /* Address of symbol hash table */
+constexpr u32 ElfDtStrtab = 5; /* Address of string table */
+constexpr u32 ElfDtSymtab = 6; /* Address of symbol table */
+constexpr u32 ElfDtRela = 7; /* Address of Rela relocs */
+constexpr u32 ElfDtRelasz = 8; /* Total size of Rela relocs */
+constexpr u32 ElfDtRelaent = 9; /* Size of one Rela reloc */
+constexpr u32 ElfDtStrsz = 10; /* Size of string table */
+constexpr u32 ElfDtSyment = 11; /* Size of one symbol table entry */
+constexpr u32 ElfDtInit = 12; /* Address of init function */
+constexpr u32 ElfDtFini = 13; /* Address of termination function */
+constexpr u32 ElfDtRel = 17; /* Address of Rel relocs */
+constexpr u32 ElfDtRelsz = 18; /* Total size of Rel relocs */
+constexpr u32 ElfDtRelent = 19; /* Size of one Rel reloc */
+constexpr u32 ElfDtPltRel = 20; /* Type of reloc in PLT */
+constexpr u32 ElfDtTextRel = 22; /* Reloc might modify .text */
+constexpr u32 ElfDtJmpRel = 23; /* Address of PLT relocs */
+constexpr u32 ElfDtBindNow = 24; /* Process relocations of object */
+constexpr u32 ElfDtInitArray = 25; /* Array with addresses of init fct */
+constexpr u32 ElfDtFiniArray = 26; /* Array with addresses of fini fct */
+constexpr u32 ElfDtInitArraySz = 27; /* Size in bytes of DT_INIT_ARRAY */
+constexpr u32 ElfDtFiniArraySz = 28; /* Size in bytes of DT_FINI_ARRAY */
+constexpr u32 ElfDtSymtabShndx = 34; /* Address of SYMTAB_SHNDX section */
+
+} // namespace ELF
+} // namespace Common
diff --git a/src/common/error.cpp b/src/common/error.cpp
index d4455e310..ddb03bd45 100644
--- a/src/common/error.cpp
+++ b/src/common/error.cpp
@@ -1,6 +1,6 @@
-// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2013 Dolphin Emulator Project
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <cstddef>
#ifdef _WIN32
diff --git a/src/common/error.h b/src/common/error.h
index e084d4b0f..62a3bd835 100644
--- a/src/common/error.h
+++ b/src/common/error.h
@@ -1,6 +1,6 @@
-// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2013 Dolphin Emulator Project
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/expected.h b/src/common/expected.h
index c8d8579c1..6e6c86ee7 100644
--- a/src/common/expected.h
+++ b/src/common/expected.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
// This is based on the proposed implementation of std::expected (P0323)
// https://github.com/TartanLlama/expected/blob/master/include/tl/expected.hpp
diff --git a/src/common/fiber.cpp b/src/common/fiber.cpp
index 81b212e4b..bc92b360b 100644
--- a/src/common/fiber.cpp
+++ b/src/common/fiber.cpp
@@ -1,10 +1,10 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <mutex>
#include "common/assert.h"
#include "common/fiber.h"
-#include "common/spin_lock.h"
#include "common/virtual_buffer.h"
#include <boost/context/detail/fcontext.hpp>
@@ -19,11 +19,9 @@ struct Fiber::FiberImpl {
VirtualBuffer<u8> stack;
VirtualBuffer<u8> rewind_stack;
- SpinLock guard{};
- std::function<void(void*)> entry_point;
- std::function<void(void*)> rewind_point;
- void* rewind_parameter{};
- void* start_parameter{};
+ std::mutex guard;
+ std::function<void()> entry_point;
+ std::function<void()> rewind_point;
std::shared_ptr<Fiber> previous_fiber;
bool is_thread_fiber{};
bool released{};
@@ -34,13 +32,8 @@ struct Fiber::FiberImpl {
boost::context::detail::fcontext_t rewind_context{};
};
-void Fiber::SetStartParameter(void* new_parameter) {
- impl->start_parameter = new_parameter;
-}
-
-void Fiber::SetRewindPoint(std::function<void(void*)>&& rewind_func, void* rewind_param) {
+void Fiber::SetRewindPoint(std::function<void()>&& rewind_func) {
impl->rewind_point = std::move(rewind_func);
- impl->rewind_parameter = rewind_param;
}
void Fiber::Start(boost::context::detail::transfer_t& transfer) {
@@ -48,7 +41,7 @@ void Fiber::Start(boost::context::detail::transfer_t& transfer) {
impl->previous_fiber->impl->context = transfer.fctx;
impl->previous_fiber->impl->guard.unlock();
impl->previous_fiber.reset();
- impl->entry_point(impl->start_parameter);
+ impl->entry_point();
UNREACHABLE();
}
@@ -59,7 +52,7 @@ void Fiber::OnRewind([[maybe_unused]] boost::context::detail::transfer_t& transf
u8* tmp = impl->stack_limit;
impl->stack_limit = impl->rewind_stack_limit;
impl->rewind_stack_limit = tmp;
- impl->rewind_point(impl->rewind_parameter);
+ impl->rewind_point();
UNREACHABLE();
}
@@ -73,10 +66,8 @@ void Fiber::RewindStartFunc(boost::context::detail::transfer_t transfer) {
fiber->OnRewind(transfer);
}
-Fiber::Fiber(std::function<void(void*)>&& entry_point_func, void* start_parameter)
- : impl{std::make_unique<FiberImpl>()} {
+Fiber::Fiber(std::function<void()>&& entry_point_func) : impl{std::make_unique<FiberImpl>()} {
impl->entry_point = std::move(entry_point_func);
- impl->start_parameter = start_parameter;
impl->stack_limit = impl->stack.data();
impl->rewind_stack_limit = impl->rewind_stack.data();
u8* stack_base = impl->stack_limit + default_stack_size;
diff --git a/src/common/fiber.h b/src/common/fiber.h
index f2a8ff29a..f24d333a3 100644
--- a/src/common/fiber.h
+++ b/src/common/fiber.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -30,7 +29,7 @@ namespace Common {
*/
class Fiber {
public:
- Fiber(std::function<void(void*)>&& entry_point_func, void* start_parameter);
+ Fiber(std::function<void()>&& entry_point_func);
~Fiber();
Fiber(const Fiber&) = delete;
@@ -44,16 +43,13 @@ public:
static void YieldTo(std::weak_ptr<Fiber> weak_from, Fiber& to);
[[nodiscard]] static std::shared_ptr<Fiber> ThreadToFiber();
- void SetRewindPoint(std::function<void(void*)>&& rewind_func, void* rewind_param);
+ void SetRewindPoint(std::function<void()>&& rewind_func);
void Rewind();
/// Only call from main thread's fiber
void Exit();
- /// Changes the start parameter of the fiber. Has no effect if the fiber already started
- void SetStartParameter(void* new_parameter);
-
private:
Fiber();
diff --git a/src/common/fixed_point.h b/src/common/fixed_point.h
new file mode 100644
index 000000000..4a0f72cc9
--- /dev/null
+++ b/src/common/fixed_point.h
@@ -0,0 +1,706 @@
+// SPDX-FileCopyrightText: 2015 Evan Teran
+// SPDX-License-Identifier: MIT
+
+// From: https://github.com/eteran/cpp-utilities/blob/master/fixed/include/cpp-utilities/fixed.h
+// See also: http://stackoverflow.com/questions/79677/whats-the-best-way-to-do-fixed-point-math
+
+#ifndef FIXED_H_
+#define FIXED_H_
+
+#if __cplusplus >= 201402L
+#define CONSTEXPR14 constexpr
+#else
+#define CONSTEXPR14
+#endif
+
+#include <cstddef> // for size_t
+#include <cstdint>
+#include <exception>
+#include <ostream>
+#include <type_traits>
+
+namespace Common {
+
+template <size_t I, size_t F>
+class FixedPoint;
+
+namespace detail {
+
+// helper templates to make magic with types :)
+// these allow us to determine resonable types from
+// a desired size, they also let us infer the next largest type
+// from a type which is nice for the division op
+template <size_t T>
+struct type_from_size {
+ using value_type = void;
+ using unsigned_type = void;
+ using signed_type = void;
+ static constexpr bool is_specialized = false;
+};
+
+#if defined(__GNUC__) && defined(__x86_64__) && !defined(__STRICT_ANSI__)
+template <>
+struct type_from_size<128> {
+ static constexpr bool is_specialized = true;
+ static constexpr size_t size = 128;
+
+ using value_type = __int128;
+ using unsigned_type = unsigned __int128;
+ using signed_type = __int128;
+ using next_size = type_from_size<256>;
+};
+#endif
+
+template <>
+struct type_from_size<64> {
+ static constexpr bool is_specialized = true;
+ static constexpr size_t size = 64;
+
+ using value_type = int64_t;
+ using unsigned_type = std::make_unsigned<value_type>::type;
+ using signed_type = std::make_signed<value_type>::type;
+ using next_size = type_from_size<128>;
+};
+
+template <>
+struct type_from_size<32> {
+ static constexpr bool is_specialized = true;
+ static constexpr size_t size = 32;
+
+ using value_type = int32_t;
+ using unsigned_type = std::make_unsigned<value_type>::type;
+ using signed_type = std::make_signed<value_type>::type;
+ using next_size = type_from_size<64>;
+};
+
+template <>
+struct type_from_size<16> {
+ static constexpr bool is_specialized = true;
+ static constexpr size_t size = 16;
+
+ using value_type = int16_t;
+ using unsigned_type = std::make_unsigned<value_type>::type;
+ using signed_type = std::make_signed<value_type>::type;
+ using next_size = type_from_size<32>;
+};
+
+template <>
+struct type_from_size<8> {
+ static constexpr bool is_specialized = true;
+ static constexpr size_t size = 8;
+
+ using value_type = int8_t;
+ using unsigned_type = std::make_unsigned<value_type>::type;
+ using signed_type = std::make_signed<value_type>::type;
+ using next_size = type_from_size<16>;
+};
+
+// this is to assist in adding support for non-native base
+// types (for adding big-int support), this should be fine
+// unless your bit-int class doesn't nicely support casting
+template <class B, class N>
+constexpr B next_to_base(N rhs) {
+ return static_cast<B>(rhs);
+}
+
+struct divide_by_zero : std::exception {};
+
+template <size_t I, size_t F>
+CONSTEXPR14 FixedPoint<I, F> divide(
+ FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder,
+ typename std::enable_if<type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
+
+ using next_type = typename FixedPoint<I, F>::next_type;
+ using base_type = typename FixedPoint<I, F>::base_type;
+ constexpr size_t fractional_bits = FixedPoint<I, F>::fractional_bits;
+
+ next_type t(numerator.to_raw());
+ t <<= fractional_bits;
+
+ FixedPoint<I, F> quotient;
+
+ quotient = FixedPoint<I, F>::from_base(next_to_base<base_type>(t / denominator.to_raw()));
+ remainder = FixedPoint<I, F>::from_base(next_to_base<base_type>(t % denominator.to_raw()));
+
+ return quotient;
+}
+
+template <size_t I, size_t F>
+CONSTEXPR14 FixedPoint<I, F> divide(
+ FixedPoint<I, F> numerator, FixedPoint<I, F> denominator, FixedPoint<I, F>& remainder,
+ typename std::enable_if<!type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
+
+ using unsigned_type = typename FixedPoint<I, F>::unsigned_type;
+
+ constexpr int bits = FixedPoint<I, F>::total_bits;
+
+ if (denominator == 0) {
+ throw divide_by_zero();
+ } else {
+
+ int sign = 0;
+
+ FixedPoint<I, F> quotient;
+
+ if (numerator < 0) {
+ sign ^= 1;
+ numerator = -numerator;
+ }
+
+ if (denominator < 0) {
+ sign ^= 1;
+ denominator = -denominator;
+ }
+
+ unsigned_type n = numerator.to_raw();
+ unsigned_type d = denominator.to_raw();
+ unsigned_type x = 1;
+ unsigned_type answer = 0;
+
+ // egyptian division algorithm
+ while ((n >= d) && (((d >> (bits - 1)) & 1) == 0)) {
+ x <<= 1;
+ d <<= 1;
+ }
+
+ while (x != 0) {
+ if (n >= d) {
+ n -= d;
+ answer += x;
+ }
+
+ x >>= 1;
+ d >>= 1;
+ }
+
+ unsigned_type l1 = n;
+ unsigned_type l2 = denominator.to_raw();
+
+ // calculate the lower bits (needs to be unsigned)
+ while (l1 >> (bits - F) > 0) {
+ l1 >>= 1;
+ l2 >>= 1;
+ }
+ const unsigned_type lo = (l1 << F) / l2;
+
+ quotient = FixedPoint<I, F>::from_base((answer << F) | lo);
+ remainder = n;
+
+ if (sign) {
+ quotient = -quotient;
+ }
+
+ return quotient;
+ }
+}
+
+// this is the usual implementation of multiplication
+template <size_t I, size_t F>
+CONSTEXPR14 FixedPoint<I, F> multiply(
+ FixedPoint<I, F> lhs, FixedPoint<I, F> rhs,
+ typename std::enable_if<type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
+
+ using next_type = typename FixedPoint<I, F>::next_type;
+ using base_type = typename FixedPoint<I, F>::base_type;
+
+ constexpr size_t fractional_bits = FixedPoint<I, F>::fractional_bits;
+
+ next_type t(static_cast<next_type>(lhs.to_raw()) * static_cast<next_type>(rhs.to_raw()));
+ t >>= fractional_bits;
+
+ return FixedPoint<I, F>::from_base(next_to_base<base_type>(t));
+}
+
+// this is the fall back version we use when we don't have a next size
+// it is slightly slower, but is more robust since it doesn't
+// require and upgraded type
+template <size_t I, size_t F>
+CONSTEXPR14 FixedPoint<I, F> multiply(
+ FixedPoint<I, F> lhs, FixedPoint<I, F> rhs,
+ typename std::enable_if<!type_from_size<I + F>::next_size::is_specialized>::type* = nullptr) {
+
+ using base_type = typename FixedPoint<I, F>::base_type;
+
+ constexpr size_t fractional_bits = FixedPoint<I, F>::fractional_bits;
+ constexpr base_type integer_mask = FixedPoint<I, F>::integer_mask;
+ constexpr base_type fractional_mask = FixedPoint<I, F>::fractional_mask;
+
+ // more costly but doesn't need a larger type
+ const base_type a_hi = (lhs.to_raw() & integer_mask) >> fractional_bits;
+ const base_type b_hi = (rhs.to_raw() & integer_mask) >> fractional_bits;
+ const base_type a_lo = (lhs.to_raw() & fractional_mask);
+ const base_type b_lo = (rhs.to_raw() & fractional_mask);
+
+ const base_type x1 = a_hi * b_hi;
+ const base_type x2 = a_hi * b_lo;
+ const base_type x3 = a_lo * b_hi;
+ const base_type x4 = a_lo * b_lo;
+
+ return FixedPoint<I, F>::from_base((x1 << fractional_bits) + (x3 + x2) +
+ (x4 >> fractional_bits));
+}
+} // namespace detail
+
+template <size_t I, size_t F>
+class FixedPoint {
+ static_assert(detail::type_from_size<I + F>::is_specialized, "invalid combination of sizes");
+
+public:
+ static constexpr size_t fractional_bits = F;
+ static constexpr size_t integer_bits = I;
+ static constexpr size_t total_bits = I + F;
+
+ using base_type_info = detail::type_from_size<total_bits>;
+
+ using base_type = typename base_type_info::value_type;
+ using next_type = typename base_type_info::next_size::value_type;
+ using unsigned_type = typename base_type_info::unsigned_type;
+
+public:
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Woverflow"
+#endif
+ static constexpr base_type fractional_mask =
+ ~(static_cast<unsigned_type>(~base_type(0)) << fractional_bits);
+ static constexpr base_type integer_mask = ~fractional_mask;
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
+
+public:
+ static constexpr base_type one = base_type(1) << fractional_bits;
+
+public: // constructors
+ FixedPoint() = default;
+ FixedPoint(const FixedPoint&) = default;
+ FixedPoint(FixedPoint&&) = default;
+ FixedPoint& operator=(const FixedPoint&) = default;
+
+ template <class Number>
+ constexpr FixedPoint(
+ Number n, typename std::enable_if<std::is_arithmetic<Number>::value>::type* = nullptr)
+ : data_(static_cast<base_type>(n * one)) {}
+
+public: // conversion
+ template <size_t I2, size_t F2>
+ CONSTEXPR14 explicit FixedPoint(FixedPoint<I2, F2> other) {
+ static_assert(I2 <= I && F2 <= F, "Scaling conversion can only upgrade types");
+ using T = FixedPoint<I2, F2>;
+
+ const base_type fractional = (other.data_ & T::fractional_mask);
+ const base_type integer = (other.data_ & T::integer_mask) >> T::fractional_bits;
+ data_ =
+ (integer << fractional_bits) | (fractional << (fractional_bits - T::fractional_bits));
+ }
+
+private:
+ // this makes it simpler to create a FixedPoint point object from
+ // a native type without scaling
+ // use "FixedPoint::from_base" in order to perform this.
+ struct NoScale {};
+
+ constexpr FixedPoint(base_type n, const NoScale&) : data_(n) {}
+
+public:
+ static constexpr FixedPoint from_base(base_type n) {
+ return FixedPoint(n, NoScale());
+ }
+
+public: // comparison operators
+ constexpr bool operator==(FixedPoint rhs) const {
+ return data_ == rhs.data_;
+ }
+
+ constexpr bool operator!=(FixedPoint rhs) const {
+ return data_ != rhs.data_;
+ }
+
+ constexpr bool operator<(FixedPoint rhs) const {
+ return data_ < rhs.data_;
+ }
+
+ constexpr bool operator>(FixedPoint rhs) const {
+ return data_ > rhs.data_;
+ }
+
+ constexpr bool operator<=(FixedPoint rhs) const {
+ return data_ <= rhs.data_;
+ }
+
+ constexpr bool operator>=(FixedPoint rhs) const {
+ return data_ >= rhs.data_;
+ }
+
+public: // unary operators
+ constexpr bool operator!() const {
+ return !data_;
+ }
+
+ constexpr FixedPoint operator~() const {
+ // NOTE(eteran): this will often appear to "just negate" the value
+ // that is not an error, it is because -x == (~x+1)
+ // and that "+1" is adding an infinitesimally small fraction to the
+ // complimented value
+ return FixedPoint::from_base(~data_);
+ }
+
+ constexpr FixedPoint operator-() const {
+ return FixedPoint::from_base(-data_);
+ }
+
+ constexpr FixedPoint operator+() const {
+ return FixedPoint::from_base(+data_);
+ }
+
+ CONSTEXPR14 FixedPoint& operator++() {
+ data_ += one;
+ return *this;
+ }
+
+ CONSTEXPR14 FixedPoint& operator--() {
+ data_ -= one;
+ return *this;
+ }
+
+ CONSTEXPR14 FixedPoint operator++(int) {
+ FixedPoint tmp(*this);
+ data_ += one;
+ return tmp;
+ }
+
+ CONSTEXPR14 FixedPoint operator--(int) {
+ FixedPoint tmp(*this);
+ data_ -= one;
+ return tmp;
+ }
+
+public: // basic math operators
+ CONSTEXPR14 FixedPoint& operator+=(FixedPoint n) {
+ data_ += n.data_;
+ return *this;
+ }
+
+ CONSTEXPR14 FixedPoint& operator-=(FixedPoint n) {
+ data_ -= n.data_;
+ return *this;
+ }
+
+ CONSTEXPR14 FixedPoint& operator*=(FixedPoint n) {
+ return assign(detail::multiply(*this, n));
+ }
+
+ CONSTEXPR14 FixedPoint& operator/=(FixedPoint n) {
+ FixedPoint temp;
+ return assign(detail::divide(*this, n, temp));
+ }
+
+private:
+ CONSTEXPR14 FixedPoint& assign(FixedPoint rhs) {
+ data_ = rhs.data_;
+ return *this;
+ }
+
+public: // binary math operators, effects underlying bit pattern since these
+ // don't really typically make sense for non-integer values
+ CONSTEXPR14 FixedPoint& operator&=(FixedPoint n) {
+ data_ &= n.data_;
+ return *this;
+ }
+
+ CONSTEXPR14 FixedPoint& operator|=(FixedPoint n) {
+ data_ |= n.data_;
+ return *this;
+ }
+
+ CONSTEXPR14 FixedPoint& operator^=(FixedPoint n) {
+ data_ ^= n.data_;
+ return *this;
+ }
+
+ template <class Integer,
+ class = typename std::enable_if<std::is_integral<Integer>::value>::type>
+ CONSTEXPR14 FixedPoint& operator>>=(Integer n) {
+ data_ >>= n;
+ return *this;
+ }
+
+ template <class Integer,
+ class = typename std::enable_if<std::is_integral<Integer>::value>::type>
+ CONSTEXPR14 FixedPoint& operator<<=(Integer n) {
+ data_ <<= n;
+ return *this;
+ }
+
+public: // conversion to basic types
+ constexpr void round_up() {
+ data_ += (data_ & fractional_mask) >> 1;
+ }
+
+ constexpr int to_int() {
+ round_up();
+ return static_cast<int>((data_ & integer_mask) >> fractional_bits);
+ }
+
+ constexpr unsigned int to_uint() const {
+ round_up();
+ return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits);
+ }
+
+ constexpr int64_t to_long() {
+ round_up();
+ return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits);
+ }
+
+ constexpr int to_int_floor() const {
+ return static_cast<int>((data_ & integer_mask) >> fractional_bits);
+ }
+
+ constexpr int64_t to_long_floor() {
+ return static_cast<int64_t>((data_ & integer_mask) >> fractional_bits);
+ }
+
+ constexpr unsigned int to_uint_floor() const {
+ return static_cast<unsigned int>((data_ & integer_mask) >> fractional_bits);
+ }
+
+ constexpr float to_float() const {
+ return static_cast<float>(data_) / FixedPoint::one;
+ }
+
+ constexpr double to_double() const {
+ return static_cast<double>(data_) / FixedPoint::one;
+ }
+
+ constexpr base_type to_raw() const {
+ return data_;
+ }
+
+ constexpr void clear_int() {
+ data_ &= fractional_mask;
+ }
+
+ constexpr base_type get_frac() const {
+ return data_ & fractional_mask;
+ }
+
+public:
+ CONSTEXPR14 void swap(FixedPoint& rhs) {
+ using std::swap;
+ swap(data_, rhs.data_);
+ }
+
+public:
+ base_type data_;
+};
+
+// if we have the same fractional portion, but differing integer portions, we trivially upgrade the
+// smaller type
+template <size_t I1, size_t I2, size_t F>
+CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
+operator+(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
+
+ using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
+
+ const T l = T::from_base(lhs.to_raw());
+ const T r = T::from_base(rhs.to_raw());
+ return l + r;
+}
+
+template <size_t I1, size_t I2, size_t F>
+CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
+operator-(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
+
+ using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
+
+ const T l = T::from_base(lhs.to_raw());
+ const T r = T::from_base(rhs.to_raw());
+ return l - r;
+}
+
+template <size_t I1, size_t I2, size_t F>
+CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
+operator*(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
+
+ using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
+
+ const T l = T::from_base(lhs.to_raw());
+ const T r = T::from_base(rhs.to_raw());
+ return l * r;
+}
+
+template <size_t I1, size_t I2, size_t F>
+CONSTEXPR14 typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type
+operator/(FixedPoint<I1, F> lhs, FixedPoint<I2, F> rhs) {
+
+ using T = typename std::conditional<I1 >= I2, FixedPoint<I1, F>, FixedPoint<I2, F>>::type;
+
+ const T l = T::from_base(lhs.to_raw());
+ const T r = T::from_base(rhs.to_raw());
+ return l / r;
+}
+
+template <size_t I, size_t F>
+std::ostream& operator<<(std::ostream& os, FixedPoint<I, F> f) {
+ os << f.to_double();
+ return os;
+}
+
+// basic math operators
+template <size_t I, size_t F>
+CONSTEXPR14 FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
+ lhs += rhs;
+ return lhs;
+}
+template <size_t I, size_t F>
+CONSTEXPR14 FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
+ lhs -= rhs;
+ return lhs;
+}
+template <size_t I, size_t F>
+CONSTEXPR14 FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
+ lhs *= rhs;
+ return lhs;
+}
+template <size_t I, size_t F>
+CONSTEXPR14 FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, FixedPoint<I, F> rhs) {
+ lhs /= rhs;
+ return lhs;
+}
+
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+CONSTEXPR14 FixedPoint<I, F> operator+(FixedPoint<I, F> lhs, Number rhs) {
+ lhs += FixedPoint<I, F>(rhs);
+ return lhs;
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+CONSTEXPR14 FixedPoint<I, F> operator-(FixedPoint<I, F> lhs, Number rhs) {
+ lhs -= FixedPoint<I, F>(rhs);
+ return lhs;
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+CONSTEXPR14 FixedPoint<I, F> operator*(FixedPoint<I, F> lhs, Number rhs) {
+ lhs *= FixedPoint<I, F>(rhs);
+ return lhs;
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+CONSTEXPR14 FixedPoint<I, F> operator/(FixedPoint<I, F> lhs, Number rhs) {
+ lhs /= FixedPoint<I, F>(rhs);
+ return lhs;
+}
+
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+CONSTEXPR14 FixedPoint<I, F> operator+(Number lhs, FixedPoint<I, F> rhs) {
+ FixedPoint<I, F> tmp(lhs);
+ tmp += rhs;
+ return tmp;
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+CONSTEXPR14 FixedPoint<I, F> operator-(Number lhs, FixedPoint<I, F> rhs) {
+ FixedPoint<I, F> tmp(lhs);
+ tmp -= rhs;
+ return tmp;
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+CONSTEXPR14 FixedPoint<I, F> operator*(Number lhs, FixedPoint<I, F> rhs) {
+ FixedPoint<I, F> tmp(lhs);
+ tmp *= rhs;
+ return tmp;
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+CONSTEXPR14 FixedPoint<I, F> operator/(Number lhs, FixedPoint<I, F> rhs) {
+ FixedPoint<I, F> tmp(lhs);
+ tmp /= rhs;
+ return tmp;
+}
+
+// shift operators
+template <size_t I, size_t F, class Integer,
+ class = typename std::enable_if<std::is_integral<Integer>::value>::type>
+CONSTEXPR14 FixedPoint<I, F> operator<<(FixedPoint<I, F> lhs, Integer rhs) {
+ lhs <<= rhs;
+ return lhs;
+}
+template <size_t I, size_t F, class Integer,
+ class = typename std::enable_if<std::is_integral<Integer>::value>::type>
+CONSTEXPR14 FixedPoint<I, F> operator>>(FixedPoint<I, F> lhs, Integer rhs) {
+ lhs >>= rhs;
+ return lhs;
+}
+
+// comparison operators
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator>(FixedPoint<I, F> lhs, Number rhs) {
+ return lhs > FixedPoint<I, F>(rhs);
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator<(FixedPoint<I, F> lhs, Number rhs) {
+ return lhs < FixedPoint<I, F>(rhs);
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator>=(FixedPoint<I, F> lhs, Number rhs) {
+ return lhs >= FixedPoint<I, F>(rhs);
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator<=(FixedPoint<I, F> lhs, Number rhs) {
+ return lhs <= FixedPoint<I, F>(rhs);
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator==(FixedPoint<I, F> lhs, Number rhs) {
+ return lhs == FixedPoint<I, F>(rhs);
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator!=(FixedPoint<I, F> lhs, Number rhs) {
+ return lhs != FixedPoint<I, F>(rhs);
+}
+
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator>(Number lhs, FixedPoint<I, F> rhs) {
+ return FixedPoint<I, F>(lhs) > rhs;
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator<(Number lhs, FixedPoint<I, F> rhs) {
+ return FixedPoint<I, F>(lhs) < rhs;
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator>=(Number lhs, FixedPoint<I, F> rhs) {
+ return FixedPoint<I, F>(lhs) >= rhs;
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator<=(Number lhs, FixedPoint<I, F> rhs) {
+ return FixedPoint<I, F>(lhs) <= rhs;
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator==(Number lhs, FixedPoint<I, F> rhs) {
+ return FixedPoint<I, F>(lhs) == rhs;
+}
+template <size_t I, size_t F, class Number,
+ class = typename std::enable_if<std::is_arithmetic<Number>::value>::type>
+constexpr bool operator!=(Number lhs, FixedPoint<I, F> rhs) {
+ return FixedPoint<I, F>(lhs) != rhs;
+}
+
+} // namespace Common
+
+#undef CONSTEXPR14
+
+#endif
diff --git a/src/common/fs/file.cpp b/src/common/fs/file.cpp
index 274f57659..fa8422c41 100644
--- a/src/common/fs/file.cpp
+++ b/src/common/fs/file.cpp
@@ -1,10 +1,8 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/fs/file.h"
#include "common/fs/fs.h"
-#include "common/fs/path_util.h"
#include "common/logging/log.h"
#ifdef _WIN32
diff --git a/src/common/fs/file.h b/src/common/fs/file.h
index a4f7944cd..69b53384c 100644
--- a/src/common/fs/file.h
+++ b/src/common/fs/file.h
@@ -1,15 +1,12 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <cstdio>
#include <filesystem>
-#include <fstream>
#include <span>
#include <type_traits>
-#include <vector>
#include "common/concepts.h"
#include "common/fs/fs_types.h"
diff --git a/src/common/fs/fs.cpp b/src/common/fs/fs.cpp
index 9089cad67..e1716c62d 100644
--- a/src/common/fs/fs.cpp
+++ b/src/common/fs/fs.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/fs/file.h"
#include "common/fs/fs.h"
diff --git a/src/common/fs/fs.h b/src/common/fs/fs.h
index 183126de3..ce3eb309a 100644
--- a/src/common/fs/fs.h
+++ b/src/common/fs/fs.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/fs/fs_paths.h b/src/common/fs/fs_paths.h
index 5d447f108..c77c112f1 100644
--- a/src/common/fs/fs_paths.h
+++ b/src/common/fs/fs_paths.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/fs/fs_types.h b/src/common/fs/fs_types.h
index 089980aee..5a4090c19 100644
--- a/src/common/fs/fs_types.h
+++ b/src/common/fs/fs_types.h
@@ -1,13 +1,11 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <functional>
#include "common/common_funcs.h"
-#include "common/common_types.h"
namespace Common::FS {
diff --git a/src/common/fs/fs_util.cpp b/src/common/fs/fs_util.cpp
index 9f8671982..eb4ac1deb 100644
--- a/src/common/fs/fs_util.cpp
+++ b/src/common/fs/fs_util.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
@@ -16,6 +15,10 @@ std::u8string BufferToU8String(std::span<const u8> buffer) {
return std::u8string{buffer.begin(), std::ranges::find(buffer, u8{0})};
}
+std::u8string_view BufferToU8StringView(std::span<const u8> buffer) {
+ return std::u8string_view{reinterpret_cast<const char8_t*>(buffer.data())};
+}
+
std::string ToUTF8String(std::u8string_view u8_string) {
return std::string{u8_string.begin(), u8_string.end()};
}
@@ -24,6 +27,10 @@ std::string BufferToUTF8String(std::span<const u8> buffer) {
return std::string{buffer.begin(), std::ranges::find(buffer, u8{0})};
}
+std::string_view BufferToUTF8StringView(std::span<const u8> buffer) {
+ return std::string_view{reinterpret_cast<const char*>(buffer.data())};
+}
+
std::string PathToUTF8String(const std::filesystem::path& path) {
return ToUTF8String(path.u8string());
}
diff --git a/src/common/fs/fs_util.h b/src/common/fs/fs_util.h
index 1ec82eb35..2492a9f94 100644
--- a/src/common/fs/fs_util.h
+++ b/src/common/fs/fs_util.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -8,7 +7,6 @@
#include <filesystem>
#include <span>
#include <string>
-#include <string_view>
#include "common/common_types.h"
@@ -38,6 +36,15 @@ concept IsChar = std::same_as<T, char>;
[[nodiscard]] std::u8string BufferToU8String(std::span<const u8> buffer);
/**
+ * Same as BufferToU8String, but returns a string view of the buffer.
+ *
+ * @param buffer Buffer of bytes
+ *
+ * @returns UTF-8 encoded std::u8string_view.
+ */
+[[nodiscard]] std::u8string_view BufferToU8StringView(std::span<const u8> buffer);
+
+/**
* Converts a std::u8string or std::u8string_view to a UTF-8 encoded std::string.
*
* @param u8_string UTF-8 encoded u8string
@@ -58,6 +65,15 @@ concept IsChar = std::same_as<T, char>;
[[nodiscard]] std::string BufferToUTF8String(std::span<const u8> buffer);
/**
+ * Same as BufferToUTF8String, but returns a string view of the buffer.
+ *
+ * @param buffer Buffer of bytes
+ *
+ * @returns UTF-8 encoded std::string_view.
+ */
+[[nodiscard]] std::string_view BufferToUTF8StringView(std::span<const u8> buffer);
+
+/**
* Converts a filesystem path to a UTF-8 encoded std::string.
*
* @param path Filesystem path
diff --git a/src/common/fs/path_util.cpp b/src/common/fs/path_util.cpp
index 1bcb897b5..1074f2421 100644
--- a/src/common/fs/path_util.cpp
+++ b/src/common/fs/path_util.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <unordered_map>
@@ -233,9 +232,7 @@ void SetYuzuPath(YuzuPath yuzu_path, const fs::path& new_path) {
fs::path GetExeDirectory() {
wchar_t exe_path[MAX_PATH];
- GetModuleFileNameW(nullptr, exe_path, MAX_PATH);
-
- if (!exe_path) {
+ if (GetModuleFileNameW(nullptr, exe_path, MAX_PATH) == 0) {
LOG_ERROR(Common_Filesystem,
"Failed to get the path to the executable of the current process");
}
diff --git a/src/common/fs/path_util.h b/src/common/fs/path_util.h
index 0a9e3a145..13d713f1e 100644
--- a/src/common/fs/path_util.h
+++ b/src/common/fs/path_util.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/hash.h b/src/common/hash.h
index 298930702..e8fe78b07 100644
--- a/src/common/hash.h
+++ b/src/common/hash.h
@@ -1,6 +1,5 @@
-// Copyright 2015 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2015 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -19,4 +18,11 @@ struct PairHash {
}
};
+template <typename T>
+struct IdentityHash {
+ [[nodiscard]] size_t operator()(T value) const noexcept {
+ return static_cast<size_t>(value);
+ }
+};
+
} // namespace Common
diff --git a/src/common/hex_util.cpp b/src/common/hex_util.cpp
index 74f52dd11..07053295c 100644
--- a/src/common/hex_util.cpp
+++ b/src/common/hex_util.cpp
@@ -1,6 +1,6 @@
-// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2013 Dolphin Emulator Project
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/hex_util.h"
diff --git a/src/common/hex_util.h b/src/common/hex_util.h
index 5e9b6ef8b..a00904939 100644
--- a/src/common/hex_util.h
+++ b/src/common/hex_util.h
@@ -1,13 +1,12 @@
-// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2013 Dolphin Emulator Project
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <cstddef>
#include <string>
-#include <type_traits>
#include <vector>
#include <fmt/format.h>
#include "common/common_types.h"
diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp
index 28949fe5e..7f9659612 100644
--- a/src/common/host_memory.cpp
+++ b/src/common/host_memory.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#ifdef _WIN32
@@ -18,6 +17,7 @@
#include <fcntl.h>
#include <sys/mman.h>
#include <unistd.h>
+#include "common/scope_exit.h"
#endif // ^^^ Linux ^^^
@@ -27,7 +27,6 @@
#include "common/assert.h"
#include "common/host_memory.h"
#include "common/logging/log.h"
-#include "common/scope_exit.h"
namespace Common {
@@ -149,7 +148,7 @@ public:
}
void Unmap(size_t virtual_offset, size_t length) {
- std::lock_guard lock{placeholder_mutex};
+ std::scoped_lock lock{placeholder_mutex};
// Unmap until there are no more placeholders
while (UnmapOnePlaceholder(virtual_offset, length)) {
@@ -169,7 +168,7 @@ public:
}
const size_t virtual_end = virtual_offset + length;
- std::lock_guard lock{placeholder_mutex};
+ std::scoped_lock lock{placeholder_mutex};
auto [it, end] = placeholders.equal_range({virtual_offset, virtual_end});
while (it != end) {
const size_t offset = std::max(it->lower(), virtual_offset);
@@ -327,8 +326,8 @@ private:
bool IsNiechePlaceholder(size_t virtual_offset, size_t length) const {
const auto it = placeholders.upper_bound({virtual_offset, virtual_offset + length});
if (it != placeholders.end() && it->lower() == virtual_offset + length) {
- const bool is_root = it == placeholders.begin() && virtual_offset == 0;
- return is_root || std::prev(it)->upper() == virtual_offset;
+ return it == placeholders.begin() ? virtual_offset == 0
+ : std::prev(it)->upper() == virtual_offset;
}
return false;
}
diff --git a/src/common/host_memory.h b/src/common/host_memory.h
index 9b8326d0f..447975ded 100644
--- a/src/common/host_memory.h
+++ b/src/common/host_memory.h
@@ -1,6 +1,5 @@
-// Copyright 2019 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/input.h b/src/common/input.h
index 54fcb24b0..bfa0639f5 100644
--- a/src/common/input.h
+++ b/src/common/input.h
@@ -1,6 +1,5 @@
-// Copyright 2017 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2017 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -28,7 +27,7 @@ enum class InputType {
Color,
Vibration,
Nfc,
- Ir,
+ IrSensor,
};
// Internal battery charge level
@@ -53,6 +52,15 @@ enum class PollingMode {
IR,
};
+enum class CameraFormat {
+ Size320x240,
+ Size160x120,
+ Size80x60,
+ Size40x30,
+ Size20x15,
+ None,
+};
+
// Vibration reply from the controller
enum class VibrationError {
None,
@@ -68,10 +76,31 @@ enum class PollingError {
Unknown,
};
+// Nfc reply from the controller
+enum class NfcState {
+ Success,
+ NewAmiibo,
+ WaitingForAmiibo,
+ AmiiboRemoved,
+ NotAnAmiibo,
+ NotSupported,
+ WrongDeviceState,
+ WriteFailed,
+ Unknown,
+};
+
+// Ir camera reply from the controller
+enum class CameraError {
+ None,
+ NotSupported,
+ Unknown,
+};
+
// Hint for amplification curve to be used
enum class VibrationAmplificationType {
Linear,
Exponential,
+ Test,
};
// Analog properties for calibration
@@ -86,6 +115,8 @@ struct AnalogProperties {
float offset{};
// Invert direction of the sensor data
bool inverted{};
+ // Press once to activate, press again to release
+ bool toggle{};
};
// Single analog sensor data
@@ -99,8 +130,11 @@ struct AnalogStatus {
struct ButtonStatus {
Common::UUID uuid{};
bool value{};
+ // Invert value of the button
bool inverted{};
+ // Press once to activate, press again to release
bool toggle{};
+ // Internal lock for the toggle status
bool locked{};
};
@@ -175,6 +209,17 @@ struct LedStatus {
bool led_4{};
};
+// Raw data fom camera
+struct CameraStatus {
+ CameraFormat format{CameraFormat::None};
+ std::vector<u8> data{};
+};
+
+struct NfcStatus {
+ NfcState state{};
+ std::vector<u8> data{};
+};
+
// List of buttons to be passed to Qt that can be translated
enum class ButtonNames {
Undefined,
@@ -232,6 +277,8 @@ struct CallbackStatus {
BodyColorStatus color_status{};
BatteryStatus battery_status{};
VibrationStatus vibration_status{};
+ CameraStatus camera_status{};
+ NfcStatus nfc_status{};
};
// Triggered once every input change
@@ -280,6 +327,18 @@ public:
virtual PollingError SetPollingMode([[maybe_unused]] PollingMode polling_mode) {
return PollingError::NotSupported;
}
+
+ virtual CameraError SetCameraFormat([[maybe_unused]] CameraFormat camera_format) {
+ return CameraError::NotSupported;
+ }
+
+ virtual NfcState SupportsNfc() const {
+ return NfcState::NotSupported;
+ }
+
+ virtual NfcState WriteNfcData([[maybe_unused]] const std::vector<u8>& data) {
+ return NfcState::NotSupported;
+ }
};
/// An abstract class template for a factory that can create input devices.
diff --git a/src/common/intrusive_red_black_tree.h b/src/common/intrusive_red_black_tree.h
index 3173cc449..93046615e 100644
--- a/src/common/intrusive_red_black_tree.h
+++ b/src/common/intrusive_red_black_tree.h
@@ -1,9 +1,9 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
+#include "common/common_funcs.h"
#include "common/parent_of_member.h"
#include "common/tree.h"
@@ -15,32 +15,33 @@ class IntrusiveRedBlackTreeImpl;
}
+#pragma pack(push, 4)
struct IntrusiveRedBlackTreeNode {
+ YUZU_NON_COPYABLE(IntrusiveRedBlackTreeNode);
+
public:
- using EntryType = RBEntry<IntrusiveRedBlackTreeNode>;
+ using RBEntry = freebsd::RBEntry<IntrusiveRedBlackTreeNode>;
- constexpr IntrusiveRedBlackTreeNode() = default;
+private:
+ RBEntry m_entry;
- void SetEntry(const EntryType& new_entry) {
- entry = new_entry;
- }
+public:
+ explicit IntrusiveRedBlackTreeNode() = default;
- [[nodiscard]] EntryType& GetEntry() {
- return entry;
+ [[nodiscard]] constexpr RBEntry& GetRBEntry() {
+ return m_entry;
}
-
- [[nodiscard]] const EntryType& GetEntry() const {
- return entry;
+ [[nodiscard]] constexpr const RBEntry& GetRBEntry() const {
+ return m_entry;
}
-private:
- EntryType entry{};
-
- friend class impl::IntrusiveRedBlackTreeImpl;
-
- template <class, class, class>
- friend class IntrusiveRedBlackTree;
+ constexpr void SetRBEntry(const RBEntry& entry) {
+ m_entry = entry;
+ }
};
+static_assert(sizeof(IntrusiveRedBlackTreeNode) ==
+ 3 * sizeof(void*) + std::max<size_t>(sizeof(freebsd::RBColor), 4));
+#pragma pack(pop)
template <class T, class Traits, class Comparator>
class IntrusiveRedBlackTree;
@@ -48,12 +49,17 @@ class IntrusiveRedBlackTree;
namespace impl {
class IntrusiveRedBlackTreeImpl {
+ YUZU_NON_COPYABLE(IntrusiveRedBlackTreeImpl);
+
private:
template <class, class, class>
friend class ::Common::IntrusiveRedBlackTree;
- using RootType = RBHead<IntrusiveRedBlackTreeNode>;
- RootType root;
+private:
+ using RootType = freebsd::RBHead<IntrusiveRedBlackTreeNode>;
+
+private:
+ RootType m_root;
public:
template <bool Const>
@@ -81,149 +87,150 @@ public:
IntrusiveRedBlackTreeImpl::reference>;
private:
- pointer node;
+ pointer m_node;
public:
- explicit Iterator(pointer n) : node(n) {}
+ constexpr explicit Iterator(pointer n) : m_node(n) {}
- bool operator==(const Iterator& rhs) const {
- return this->node == rhs.node;
+ constexpr bool operator==(const Iterator& rhs) const {
+ return m_node == rhs.m_node;
}
- bool operator!=(const Iterator& rhs) const {
+ constexpr bool operator!=(const Iterator& rhs) const {
return !(*this == rhs);
}
- pointer operator->() const {
- return this->node;
+ constexpr pointer operator->() const {
+ return m_node;
}
- reference operator*() const {
- return *this->node;
+ constexpr reference operator*() const {
+ return *m_node;
}
- Iterator& operator++() {
- this->node = GetNext(this->node);
+ constexpr Iterator& operator++() {
+ m_node = GetNext(m_node);
return *this;
}
- Iterator& operator--() {
- this->node = GetPrev(this->node);
+ constexpr Iterator& operator--() {
+ m_node = GetPrev(m_node);
return *this;
}
- Iterator operator++(int) {
+ constexpr Iterator operator++(int) {
const Iterator it{*this};
++(*this);
return it;
}
- Iterator operator--(int) {
+ constexpr Iterator operator--(int) {
const Iterator it{*this};
--(*this);
return it;
}
- operator Iterator<true>() const {
- return Iterator<true>(this->node);
+ constexpr operator Iterator<true>() const {
+ return Iterator<true>(m_node);
}
};
private:
- // Define accessors using RB_* functions.
- bool EmptyImpl() const {
- return root.IsEmpty();
+ constexpr bool EmptyImpl() const {
+ return m_root.IsEmpty();
}
- IntrusiveRedBlackTreeNode* GetMinImpl() const {
- return RB_MIN(const_cast<RootType*>(&root));
+ constexpr IntrusiveRedBlackTreeNode* GetMinImpl() const {
+ return freebsd::RB_MIN(const_cast<RootType&>(m_root));
}
- IntrusiveRedBlackTreeNode* GetMaxImpl() const {
- return RB_MAX(const_cast<RootType*>(&root));
+ constexpr IntrusiveRedBlackTreeNode* GetMaxImpl() const {
+ return freebsd::RB_MAX(const_cast<RootType&>(m_root));
}
- IntrusiveRedBlackTreeNode* RemoveImpl(IntrusiveRedBlackTreeNode* node) {
- return RB_REMOVE(&root, node);
+ constexpr IntrusiveRedBlackTreeNode* RemoveImpl(IntrusiveRedBlackTreeNode* node) {
+ return freebsd::RB_REMOVE(m_root, node);
}
public:
- static IntrusiveRedBlackTreeNode* GetNext(IntrusiveRedBlackTreeNode* node) {
- return RB_NEXT(node);
+ static constexpr IntrusiveRedBlackTreeNode* GetNext(IntrusiveRedBlackTreeNode* node) {
+ return freebsd::RB_NEXT(node);
}
- static IntrusiveRedBlackTreeNode* GetPrev(IntrusiveRedBlackTreeNode* node) {
- return RB_PREV(node);
+ static constexpr IntrusiveRedBlackTreeNode* GetPrev(IntrusiveRedBlackTreeNode* node) {
+ return freebsd::RB_PREV(node);
}
- static const IntrusiveRedBlackTreeNode* GetNext(const IntrusiveRedBlackTreeNode* node) {
+ static constexpr IntrusiveRedBlackTreeNode const* GetNext(
+ IntrusiveRedBlackTreeNode const* node) {
return static_cast<const IntrusiveRedBlackTreeNode*>(
GetNext(const_cast<IntrusiveRedBlackTreeNode*>(node)));
}
- static const IntrusiveRedBlackTreeNode* GetPrev(const IntrusiveRedBlackTreeNode* node) {
+ static constexpr IntrusiveRedBlackTreeNode const* GetPrev(
+ IntrusiveRedBlackTreeNode const* node) {
return static_cast<const IntrusiveRedBlackTreeNode*>(
GetPrev(const_cast<IntrusiveRedBlackTreeNode*>(node)));
}
public:
- constexpr IntrusiveRedBlackTreeImpl() {}
+ constexpr IntrusiveRedBlackTreeImpl() = default;
// Iterator accessors.
- iterator begin() {
+ constexpr iterator begin() {
return iterator(this->GetMinImpl());
}
- const_iterator begin() const {
+ constexpr const_iterator begin() const {
return const_iterator(this->GetMinImpl());
}
- iterator end() {
+ constexpr iterator end() {
return iterator(static_cast<IntrusiveRedBlackTreeNode*>(nullptr));
}
- const_iterator end() const {
+ constexpr const_iterator end() const {
return const_iterator(static_cast<const IntrusiveRedBlackTreeNode*>(nullptr));
}
- const_iterator cbegin() const {
+ constexpr const_iterator cbegin() const {
return this->begin();
}
- const_iterator cend() const {
+ constexpr const_iterator cend() const {
return this->end();
}
- iterator iterator_to(reference ref) {
- return iterator(&ref);
+ constexpr iterator iterator_to(reference ref) {
+ return iterator(std::addressof(ref));
}
- const_iterator iterator_to(const_reference ref) const {
- return const_iterator(&ref);
+ constexpr const_iterator iterator_to(const_reference ref) const {
+ return const_iterator(std::addressof(ref));
}
// Content management.
- bool empty() const {
+ constexpr bool empty() const {
return this->EmptyImpl();
}
- reference back() {
+ constexpr reference back() {
return *this->GetMaxImpl();
}
- const_reference back() const {
+ constexpr const_reference back() const {
return *this->GetMaxImpl();
}
- reference front() {
+ constexpr reference front() {
return *this->GetMinImpl();
}
- const_reference front() const {
+ constexpr const_reference front() const {
return *this->GetMinImpl();
}
- iterator erase(iterator it) {
+ constexpr iterator erase(iterator it) {
auto cur = std::addressof(*it);
auto next = GetNext(cur);
this->RemoveImpl(cur);
@@ -234,16 +241,16 @@ public:
} // namespace impl
template <typename T>
-concept HasLightCompareType = requires {
- { std::is_same<typename T::LightCompareType, void>::value } -> std::convertible_to<bool>;
+concept HasRedBlackKeyType = requires {
+ { std::is_same<typename T::RedBlackKeyType, void>::value } -> std::convertible_to<bool>;
};
namespace impl {
template <typename T, typename Default>
- consteval auto* GetLightCompareType() {
- if constexpr (HasLightCompareType<T>) {
- return static_cast<typename T::LightCompareType*>(nullptr);
+ consteval auto* GetRedBlackKeyType() {
+ if constexpr (HasRedBlackKeyType<T>) {
+ return static_cast<typename T::RedBlackKeyType*>(nullptr);
} else {
return static_cast<Default*>(nullptr);
}
@@ -252,16 +259,17 @@ namespace impl {
} // namespace impl
template <typename T, typename Default>
-using LightCompareType = std::remove_pointer_t<decltype(impl::GetLightCompareType<T, Default>())>;
+using RedBlackKeyType = std::remove_pointer_t<decltype(impl::GetRedBlackKeyType<T, Default>())>;
template <class T, class Traits, class Comparator>
class IntrusiveRedBlackTree {
+ YUZU_NON_COPYABLE(IntrusiveRedBlackTree);
public:
using ImplType = impl::IntrusiveRedBlackTreeImpl;
private:
- ImplType impl{};
+ ImplType m_impl;
public:
template <bool Const>
@@ -277,9 +285,9 @@ public:
using iterator = Iterator<false>;
using const_iterator = Iterator<true>;
- using light_value_type = LightCompareType<Comparator, value_type>;
- using const_light_pointer = const light_value_type*;
- using const_light_reference = const light_value_type&;
+ using key_type = RedBlackKeyType<Comparator, value_type>;
+ using const_key_pointer = const key_type*;
+ using const_key_reference = const key_type&;
template <bool Const>
class Iterator {
@@ -298,183 +306,201 @@ public:
IntrusiveRedBlackTree::reference>;
private:
- ImplIterator iterator;
+ ImplIterator m_impl;
private:
- explicit Iterator(ImplIterator it) : iterator(it) {}
+ constexpr explicit Iterator(ImplIterator it) : m_impl(it) {}
- explicit Iterator(typename std::conditional<Const, ImplType::const_iterator,
- ImplType::iterator>::type::pointer ptr)
- : iterator(ptr) {}
+ constexpr explicit Iterator(typename ImplIterator::pointer p) : m_impl(p) {}
- ImplIterator GetImplIterator() const {
- return this->iterator;
+ constexpr ImplIterator GetImplIterator() const {
+ return m_impl;
}
public:
- bool operator==(const Iterator& rhs) const {
- return this->iterator == rhs.iterator;
+ constexpr bool operator==(const Iterator& rhs) const {
+ return m_impl == rhs.m_impl;
}
- bool operator!=(const Iterator& rhs) const {
+ constexpr bool operator!=(const Iterator& rhs) const {
return !(*this == rhs);
}
- pointer operator->() const {
- return Traits::GetParent(std::addressof(*this->iterator));
+ constexpr pointer operator->() const {
+ return Traits::GetParent(std::addressof(*m_impl));
}
- reference operator*() const {
- return *Traits::GetParent(std::addressof(*this->iterator));
+ constexpr reference operator*() const {
+ return *Traits::GetParent(std::addressof(*m_impl));
}
- Iterator& operator++() {
- ++this->iterator;
+ constexpr Iterator& operator++() {
+ ++m_impl;
return *this;
}
- Iterator& operator--() {
- --this->iterator;
+ constexpr Iterator& operator--() {
+ --m_impl;
return *this;
}
- Iterator operator++(int) {
+ constexpr Iterator operator++(int) {
const Iterator it{*this};
- ++this->iterator;
+ ++m_impl;
return it;
}
- Iterator operator--(int) {
+ constexpr Iterator operator--(int) {
const Iterator it{*this};
- --this->iterator;
+ --m_impl;
return it;
}
- operator Iterator<true>() const {
- return Iterator<true>(this->iterator);
+ constexpr operator Iterator<true>() const {
+ return Iterator<true>(m_impl);
}
};
private:
- static int CompareImpl(const IntrusiveRedBlackTreeNode* lhs,
- const IntrusiveRedBlackTreeNode* rhs) {
+ static constexpr int CompareImpl(const IntrusiveRedBlackTreeNode* lhs,
+ const IntrusiveRedBlackTreeNode* rhs) {
return Comparator::Compare(*Traits::GetParent(lhs), *Traits::GetParent(rhs));
}
- static int LightCompareImpl(const void* elm, const IntrusiveRedBlackTreeNode* rhs) {
- return Comparator::Compare(*static_cast<const_light_pointer>(elm), *Traits::GetParent(rhs));
+ static constexpr int CompareKeyImpl(const_key_reference key,
+ const IntrusiveRedBlackTreeNode* rhs) {
+ return Comparator::Compare(key, *Traits::GetParent(rhs));
}
// Define accessors using RB_* functions.
- IntrusiveRedBlackTreeNode* InsertImpl(IntrusiveRedBlackTreeNode* node) {
- return RB_INSERT(&impl.root, node, CompareImpl);
+ constexpr IntrusiveRedBlackTreeNode* InsertImpl(IntrusiveRedBlackTreeNode* node) {
+ return freebsd::RB_INSERT(m_impl.m_root, node, CompareImpl);
}
- IntrusiveRedBlackTreeNode* FindImpl(const IntrusiveRedBlackTreeNode* node) const {
- return RB_FIND(const_cast<ImplType::RootType*>(&impl.root),
- const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
+ constexpr IntrusiveRedBlackTreeNode* FindImpl(IntrusiveRedBlackTreeNode const* node) const {
+ return freebsd::RB_FIND(const_cast<ImplType::RootType&>(m_impl.m_root),
+ const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
}
- IntrusiveRedBlackTreeNode* NFindImpl(const IntrusiveRedBlackTreeNode* node) const {
- return RB_NFIND(const_cast<ImplType::RootType*>(&impl.root),
- const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
+ constexpr IntrusiveRedBlackTreeNode* NFindImpl(IntrusiveRedBlackTreeNode const* node) const {
+ return freebsd::RB_NFIND(const_cast<ImplType::RootType&>(m_impl.m_root),
+ const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
}
- IntrusiveRedBlackTreeNode* FindLightImpl(const_light_pointer lelm) const {
- return RB_FIND_LIGHT(const_cast<ImplType::RootType*>(&impl.root),
- static_cast<const void*>(lelm), LightCompareImpl);
+ constexpr IntrusiveRedBlackTreeNode* FindKeyImpl(const_key_reference key) const {
+ return freebsd::RB_FIND_KEY(const_cast<ImplType::RootType&>(m_impl.m_root), key,
+ CompareKeyImpl);
}
- IntrusiveRedBlackTreeNode* NFindLightImpl(const_light_pointer lelm) const {
- return RB_NFIND_LIGHT(const_cast<ImplType::RootType*>(&impl.root),
- static_cast<const void*>(lelm), LightCompareImpl);
+ constexpr IntrusiveRedBlackTreeNode* NFindKeyImpl(const_key_reference key) const {
+ return freebsd::RB_NFIND_KEY(const_cast<ImplType::RootType&>(m_impl.m_root), key,
+ CompareKeyImpl);
+ }
+
+ constexpr IntrusiveRedBlackTreeNode* FindExistingImpl(
+ IntrusiveRedBlackTreeNode const* node) const {
+ return freebsd::RB_FIND_EXISTING(const_cast<ImplType::RootType&>(m_impl.m_root),
+ const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
+ }
+
+ constexpr IntrusiveRedBlackTreeNode* FindExistingKeyImpl(const_key_reference key) const {
+ return freebsd::RB_FIND_EXISTING_KEY(const_cast<ImplType::RootType&>(m_impl.m_root), key,
+ CompareKeyImpl);
}
public:
constexpr IntrusiveRedBlackTree() = default;
// Iterator accessors.
- iterator begin() {
- return iterator(this->impl.begin());
+ constexpr iterator begin() {
+ return iterator(m_impl.begin());
}
- const_iterator begin() const {
- return const_iterator(this->impl.begin());
+ constexpr const_iterator begin() const {
+ return const_iterator(m_impl.begin());
}
- iterator end() {
- return iterator(this->impl.end());
+ constexpr iterator end() {
+ return iterator(m_impl.end());
}
- const_iterator end() const {
- return const_iterator(this->impl.end());
+ constexpr const_iterator end() const {
+ return const_iterator(m_impl.end());
}
- const_iterator cbegin() const {
+ constexpr const_iterator cbegin() const {
return this->begin();
}
- const_iterator cend() const {
+ constexpr const_iterator cend() const {
return this->end();
}
- iterator iterator_to(reference ref) {
- return iterator(this->impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
+ constexpr iterator iterator_to(reference ref) {
+ return iterator(m_impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
}
- const_iterator iterator_to(const_reference ref) const {
- return const_iterator(this->impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
+ constexpr const_iterator iterator_to(const_reference ref) const {
+ return const_iterator(m_impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
}
// Content management.
- bool empty() const {
- return this->impl.empty();
+ constexpr bool empty() const {
+ return m_impl.empty();
}
- reference back() {
- return *Traits::GetParent(std::addressof(this->impl.back()));
+ constexpr reference back() {
+ return *Traits::GetParent(std::addressof(m_impl.back()));
}
- const_reference back() const {
- return *Traits::GetParent(std::addressof(this->impl.back()));
+ constexpr const_reference back() const {
+ return *Traits::GetParent(std::addressof(m_impl.back()));
}
- reference front() {
- return *Traits::GetParent(std::addressof(this->impl.front()));
+ constexpr reference front() {
+ return *Traits::GetParent(std::addressof(m_impl.front()));
}
- const_reference front() const {
- return *Traits::GetParent(std::addressof(this->impl.front()));
+ constexpr const_reference front() const {
+ return *Traits::GetParent(std::addressof(m_impl.front()));
}
- iterator erase(iterator it) {
- return iterator(this->impl.erase(it.GetImplIterator()));
+ constexpr iterator erase(iterator it) {
+ return iterator(m_impl.erase(it.GetImplIterator()));
}
- iterator insert(reference ref) {
+ constexpr iterator insert(reference ref) {
ImplType::pointer node = Traits::GetNode(std::addressof(ref));
this->InsertImpl(node);
return iterator(node);
}
- iterator find(const_reference ref) const {
+ constexpr iterator find(const_reference ref) const {
return iterator(this->FindImpl(Traits::GetNode(std::addressof(ref))));
}
- iterator nfind(const_reference ref) const {
+ constexpr iterator nfind(const_reference ref) const {
return iterator(this->NFindImpl(Traits::GetNode(std::addressof(ref))));
}
- iterator find_light(const_light_reference ref) const {
- return iterator(this->FindLightImpl(std::addressof(ref)));
+ constexpr iterator find_key(const_key_reference ref) const {
+ return iterator(this->FindKeyImpl(ref));
+ }
+
+ constexpr iterator nfind_key(const_key_reference ref) const {
+ return iterator(this->NFindKeyImpl(ref));
+ }
+
+ constexpr iterator find_existing(const_reference ref) const {
+ return iterator(this->FindExistingImpl(Traits::GetNode(std::addressof(ref))));
}
- iterator nfind_light(const_light_reference ref) const {
- return iterator(this->NFindLightImpl(std::addressof(ref)));
+ constexpr iterator find_existing_key(const_key_reference ref) const {
+ return iterator(this->FindExistingKeyImpl(ref));
}
};
-template <auto T, class Derived = impl::GetParentType<T>>
+template <auto T, class Derived = Common::impl::GetParentType<T>>
class IntrusiveRedBlackTreeMemberTraits;
template <class Parent, IntrusiveRedBlackTreeNode Parent::*Member, class Derived>
@@ -498,19 +524,16 @@ private:
return std::addressof(parent->*Member);
}
- static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
- return GetParentPointer<Member, Derived>(node);
+ static Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
+ return Common::GetParentPointer<Member, Derived>(node);
}
- static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
- return GetParentPointer<Member, Derived>(node);
+ static Derived const* GetParent(IntrusiveRedBlackTreeNode const* node) {
+ return Common::GetParentPointer<Member, Derived>(node);
}
-
-private:
- static constexpr TypedStorage<Derived> DerivedStorage = {};
};
-template <auto T, class Derived = impl::GetParentType<T>>
+template <auto T, class Derived = Common::impl::GetParentType<T>>
class IntrusiveRedBlackTreeMemberTraitsDeferredAssert;
template <class Parent, IntrusiveRedBlackTreeNode Parent::*Member, class Derived>
@@ -521,11 +544,6 @@ public:
IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeMemberTraitsDeferredAssert, Comparator>;
using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
- static constexpr bool IsValid() {
- TypedStorage<Derived> DerivedStorage = {};
- return GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage);
- }
-
private:
template <class, class, class>
friend class IntrusiveRedBlackTree;
@@ -540,30 +558,36 @@ private:
return std::addressof(parent->*Member);
}
- static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
- return GetParentPointer<Member, Derived>(node);
+ static Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
+ return Common::GetParentPointer<Member, Derived>(node);
}
- static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
- return GetParentPointer<Member, Derived>(node);
+ static Derived const* GetParent(IntrusiveRedBlackTreeNode const* node) {
+ return Common::GetParentPointer<Member, Derived>(node);
}
};
template <class Derived>
-class IntrusiveRedBlackTreeBaseNode : public IntrusiveRedBlackTreeNode {
+class alignas(void*) IntrusiveRedBlackTreeBaseNode : public IntrusiveRedBlackTreeNode {
public:
+ using IntrusiveRedBlackTreeNode::IntrusiveRedBlackTreeNode;
+
constexpr Derived* GetPrev() {
- return static_cast<Derived*>(impl::IntrusiveRedBlackTreeImpl::GetPrev(this));
+ return static_cast<Derived*>(static_cast<IntrusiveRedBlackTreeBaseNode*>(
+ impl::IntrusiveRedBlackTreeImpl::GetPrev(this)));
}
constexpr const Derived* GetPrev() const {
- return static_cast<const Derived*>(impl::IntrusiveRedBlackTreeImpl::GetPrev(this));
+ return static_cast<const Derived*>(static_cast<const IntrusiveRedBlackTreeBaseNode*>(
+ impl::IntrusiveRedBlackTreeImpl::GetPrev(this)));
}
constexpr Derived* GetNext() {
- return static_cast<Derived*>(impl::IntrusiveRedBlackTreeImpl::GetNext(this));
+ return static_cast<Derived*>(static_cast<IntrusiveRedBlackTreeBaseNode*>(
+ impl::IntrusiveRedBlackTreeImpl::GetNext(this)));
}
constexpr const Derived* GetNext() const {
- return static_cast<const Derived*>(impl::IntrusiveRedBlackTreeImpl::GetNext(this));
+ return static_cast<const Derived*>(static_cast<const IntrusiveRedBlackTreeBaseNode*>(
+ impl::IntrusiveRedBlackTreeImpl::GetNext(this)));
}
};
@@ -581,19 +605,22 @@ private:
friend class impl::IntrusiveRedBlackTreeImpl;
static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
- return static_cast<IntrusiveRedBlackTreeNode*>(parent);
+ return static_cast<IntrusiveRedBlackTreeNode*>(
+ static_cast<IntrusiveRedBlackTreeBaseNode<Derived>*>(parent));
}
static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
- return static_cast<const IntrusiveRedBlackTreeNode*>(parent);
+ return static_cast<const IntrusiveRedBlackTreeNode*>(
+ static_cast<const IntrusiveRedBlackTreeBaseNode<Derived>*>(parent));
}
static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
- return static_cast<Derived*>(node);
+ return static_cast<Derived*>(static_cast<IntrusiveRedBlackTreeBaseNode<Derived>*>(node));
}
- static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
- return static_cast<const Derived*>(node);
+ static constexpr Derived const* GetParent(IntrusiveRedBlackTreeNode const* node) {
+ return static_cast<const Derived*>(
+ static_cast<const IntrusiveRedBlackTreeBaseNode<Derived>*>(node));
}
};
diff --git a/src/common/literals.h b/src/common/literals.h
index d55fed40b..0ad314afb 100644
--- a/src/common/literals.h
+++ b/src/common/literals.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/logging/backend.cpp b/src/common/logging/backend.cpp
index c51c05b28..15d92505e 100644
--- a/src/common/logging/backend.cpp
+++ b/src/common/logging/backend.cpp
@@ -1,14 +1,11 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <atomic>
#include <chrono>
#include <climits>
-#include <exception>
#include <stop_token>
#include <thread>
-#include <vector>
#include <fmt/format.h>
@@ -218,19 +215,17 @@ private:
Impl(const std::filesystem::path& file_backend_filename, const Filter& filter_)
: filter{filter_}, file_backend{file_backend_filename} {}
- ~Impl() {
- StopBackendThread();
- }
+ ~Impl() = default;
void StartBackendThread() {
- backend_thread = std::thread([this] {
- Common::SetCurrentThreadName("yuzu:Log");
+ backend_thread = std::jthread([this](std::stop_token stop_token) {
+ Common::SetCurrentThreadName("Logger");
Entry entry;
const auto write_logs = [this, &entry]() {
ForEachBackend([&entry](Backend& backend) { backend.Write(entry); });
};
- while (!stop.stop_requested()) {
- entry = message_queue.PopWait(stop.get_token());
+ while (!stop_token.stop_requested()) {
+ entry = message_queue.PopWait(stop_token);
if (entry.filename != nullptr) {
write_logs();
}
@@ -244,11 +239,6 @@ private:
});
}
- void StopBackendThread() {
- stop.request_stop();
- backend_thread.join();
- }
-
Entry CreateEntry(Class log_class, Level log_level, const char* filename, unsigned int line_nr,
const char* function, std::string&& message) const {
using std::chrono::duration_cast;
@@ -283,10 +273,9 @@ private:
ColorConsoleBackend color_console_backend{};
FileBackend file_backend;
- std::stop_source stop;
- std::thread backend_thread;
MPSCQueue<Entry, true> message_queue{};
std::chrono::steady_clock::time_point time_origin{std::chrono::steady_clock::now()};
+ std::jthread backend_thread;
};
} // namespace
diff --git a/src/common/logging/backend.h b/src/common/logging/backend.h
index bf785f402..12e5e2498 100644
--- a/src/common/logging/backend.h
+++ b/src/common/logging/backend.h
@@ -1,10 +1,8 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
-#include <filesystem>
#include "common/logging/filter.h"
namespace Common::Log {
diff --git a/src/common/logging/filter.cpp b/src/common/logging/filter.cpp
index b898a652c..a959acb74 100644
--- a/src/common/logging/filter.cpp
+++ b/src/common/logging/filter.cpp
@@ -1,6 +1,5 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include "common/logging/filter.h"
@@ -101,6 +100,7 @@ bool ParseFilterRule(Filter& instance, Iterator begin, Iterator end) {
SUB(Service, GRC) \
SUB(Service, HID) \
SUB(Service, IRS) \
+ SUB(Service, JIT) \
SUB(Service, LBL) \
SUB(Service, LDN) \
SUB(Service, LDR) \
@@ -108,6 +108,7 @@ bool ParseFilterRule(Filter& instance, Iterator begin, Iterator end) {
SUB(Service, Migration) \
SUB(Service, Mii) \
SUB(Service, MM) \
+ SUB(Service, MNPP) \
SUB(Service, NCM) \
SUB(Service, NFC) \
SUB(Service, NFP) \
@@ -118,6 +119,7 @@ bool ParseFilterRule(Filter& instance, Iterator begin, Iterator end) {
SUB(Service, NPNS) \
SUB(Service, NS) \
SUB(Service, NVDRV) \
+ SUB(Service, NVFlinger) \
SUB(Service, OLSC) \
SUB(Service, PCIE) \
SUB(Service, PCTL) \
@@ -125,7 +127,7 @@ bool ParseFilterRule(Filter& instance, Iterator begin, Iterator end) {
SUB(Service, PM) \
SUB(Service, PREPO) \
SUB(Service, PSC) \
- SUB(Service, PSM) \
+ SUB(Service, PTM) \
SUB(Service, SET) \
SUB(Service, SM) \
SUB(Service, SPL) \
diff --git a/src/common/logging/filter.h b/src/common/logging/filter.h
index 1a3074e04..54d172cc0 100644
--- a/src/common/logging/filter.h
+++ b/src/common/logging/filter.h
@@ -1,13 +1,11 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <chrono>
#include <cstddef>
-#include <string_view>
#include "common/logging/log.h"
namespace Common::Log {
diff --git a/src/common/logging/formatter.h b/src/common/logging/formatter.h
index 552cde75a..88e55505d 100644
--- a/src/common/logging/formatter.h
+++ b/src/common/logging/formatter.h
@@ -1,6 +1,5 @@
-// Copyright 2022 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/logging/log.h b/src/common/logging/log.h
index 0c80d01ee..c00c01a9e 100644
--- a/src/common/logging/log.h
+++ b/src/common/logging/log.h
@@ -1,6 +1,5 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/logging/log_entry.h b/src/common/logging/log_entry.h
index b28570071..d8d7daf76 100644
--- a/src/common/logging/log_entry.h
+++ b/src/common/logging/log_entry.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/logging/text_formatter.cpp b/src/common/logging/text_formatter.cpp
index 10b2281db..09398ea64 100644
--- a/src/common/logging/text_formatter.cpp
+++ b/src/common/logging/text_formatter.cpp
@@ -1,6 +1,5 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <array>
#include <cstdio>
@@ -10,12 +9,10 @@
#endif
#include "common/assert.h"
-#include "common/common_funcs.h"
#include "common/logging/filter.h"
#include "common/logging/log.h"
#include "common/logging/log_entry.h"
#include "common/logging/text_formatter.h"
-#include "common/string_util.h"
namespace Common::Log {
diff --git a/src/common/logging/text_formatter.h b/src/common/logging/text_formatter.h
index 171e74cfe..0d0ec4370 100644
--- a/src/common/logging/text_formatter.h
+++ b/src/common/logging/text_formatter.h
@@ -1,10 +1,8 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
-#include <cstddef>
#include <string>
namespace Common::Log {
diff --git a/src/common/logging/types.h b/src/common/logging/types.h
index 9ed0c7ad6..595c15ada 100644
--- a/src/common/logging/types.h
+++ b/src/common/logging/types.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -69,6 +68,7 @@ enum class Class : u8 {
Service_GRC, ///< The game recording service
Service_HID, ///< The HID (Human interface device) service
Service_IRS, ///< The IRS service
+ Service_JIT, ///< The JIT service
Service_LBL, ///< The LBL (LCD backlight) service
Service_LDN, ///< The LDN (Local domain network) service
Service_LDR, ///< The loader service
@@ -76,6 +76,7 @@ enum class Class : u8 {
Service_Migration, ///< The migration service
Service_Mii, ///< The Mii service
Service_MM, ///< The MM (Multimedia) service
+ Service_MNPP, ///< The MNPP service
Service_NCM, ///< The NCM service
Service_NFC, ///< The NFC (Near-field communication) service
Service_NFP, ///< The NFP service
@@ -86,6 +87,7 @@ enum class Class : u8 {
Service_NPNS, ///< The NPNS service
Service_NS, ///< The NS services
Service_NVDRV, ///< The NVDRV (Nvidia driver) service
+ Service_NVFlinger, ///< The NVFlinger service
Service_OLSC, ///< The OLSC service
Service_PCIE, ///< The PCIe service
Service_PCTL, ///< The PCTL (Parental control) service
@@ -93,7 +95,7 @@ enum class Class : u8 {
Service_PM, ///< The PM service
Service_PREPO, ///< The PREPO (Play report) service
Service_PSC, ///< The PSC service
- Service_PSM, ///< The PSM service
+ Service_PTM, ///< The PTM service
Service_SET, ///< The SET (Settings) service
Service_SM, ///< The SM (Service manager) service
Service_SPL, ///< The SPL service
diff --git a/src/common/lru_cache.h b/src/common/lru_cache.h
index 365488ba5..36cea5d27 100644
--- a/src/common/lru_cache.h
+++ b/src/common/lru_cache.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2+ or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/lz4_compression.cpp b/src/common/lz4_compression.cpp
index dbb40da7c..ffb32fecf 100644
--- a/src/common/lz4_compression.cpp
+++ b/src/common/lz4_compression.cpp
@@ -1,6 +1,5 @@
-// Copyright 2019 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <lz4hc.h>
diff --git a/src/common/lz4_compression.h b/src/common/lz4_compression.h
index 1b4717595..7fd53a960 100644
--- a/src/common/lz4_compression.h
+++ b/src/common/lz4_compression.h
@@ -1,6 +1,5 @@
-// Copyright 2019 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/math_util.h b/src/common/math_util.h
index 510c4e56d..1f5928c15 100644
--- a/src/common/math_util.h
+++ b/src/common/math_util.h
@@ -1,9 +1,10 @@
-// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2013 Dolphin Emulator Project
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
+#include <algorithm>
#include <cstdlib>
#include <type_traits>
@@ -20,10 +21,32 @@ struct Rectangle {
constexpr Rectangle() = default;
+ constexpr Rectangle(T width, T height) : right(width), bottom(height) {}
+
constexpr Rectangle(T left_, T top_, T right_, T bottom_)
: left(left_), top(top_), right(right_), bottom(bottom_) {}
- [[nodiscard]] T GetWidth() const {
+ [[nodiscard]] constexpr T Left() const {
+ return left;
+ }
+
+ [[nodiscard]] constexpr T Top() const {
+ return top;
+ }
+
+ [[nodiscard]] constexpr T Right() const {
+ return right;
+ }
+
+ [[nodiscard]] constexpr T Bottom() const {
+ return bottom;
+ }
+
+ [[nodiscard]] constexpr bool IsEmpty() const {
+ return (GetWidth() <= 0) || (GetHeight() <= 0);
+ }
+
+ [[nodiscard]] constexpr T GetWidth() const {
if constexpr (std::is_floating_point_v<T>) {
return std::abs(right - left);
} else {
@@ -31,7 +54,7 @@ struct Rectangle {
}
}
- [[nodiscard]] T GetHeight() const {
+ [[nodiscard]] constexpr T GetHeight() const {
if constexpr (std::is_floating_point_v<T>) {
return std::abs(bottom - top);
} else {
@@ -39,18 +62,35 @@ struct Rectangle {
}
}
- [[nodiscard]] Rectangle<T> TranslateX(const T x) const {
+ [[nodiscard]] constexpr Rectangle<T> TranslateX(const T x) const {
return Rectangle{left + x, top, right + x, bottom};
}
- [[nodiscard]] Rectangle<T> TranslateY(const T y) const {
+ [[nodiscard]] constexpr Rectangle<T> TranslateY(const T y) const {
return Rectangle{left, top + y, right, bottom + y};
}
- [[nodiscard]] Rectangle<T> Scale(const float s) const {
+ [[nodiscard]] constexpr Rectangle<T> Scale(const float s) const {
return Rectangle{left, top, static_cast<T>(static_cast<float>(left + GetWidth()) * s),
static_cast<T>(static_cast<float>(top + GetHeight()) * s)};
}
+
+ [[nodiscard]] constexpr bool operator==(const Rectangle<T>& rhs) const {
+ return (left == rhs.left) && (top == rhs.top) && (right == rhs.right) &&
+ (bottom == rhs.bottom);
+ }
+
+ [[nodiscard]] constexpr bool operator!=(const Rectangle<T>& rhs) const {
+ return !operator==(rhs);
+ }
+
+ [[nodiscard]] constexpr bool Intersect(const Rectangle<T>& with, Rectangle<T>* result) const {
+ result->left = std::max(left, with.left);
+ result->top = std::max(top, with.top);
+ result->right = std::min(right, with.right);
+ result->bottom = std::min(bottom, with.bottom);
+ return !result->IsEmpty();
+ }
};
template <typename T>
diff --git a/src/common/memory_detect.cpp b/src/common/memory_detect.cpp
index 8cff6ec37..86a3abcc6 100644
--- a/src/common/memory_detect.cpp
+++ b/src/common/memory_detect.cpp
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#ifdef _WIN32
// clang-format off
@@ -70,4 +69,4 @@ const MemoryInfo& GetMemInfo() {
return mem_info;
}
-} // namespace Common \ No newline at end of file
+} // namespace Common
diff --git a/src/common/memory_detect.h b/src/common/memory_detect.h
index 0f73751c8..a345e6d28 100644
--- a/src/common/memory_detect.h
+++ b/src/common/memory_detect.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/microprofile.cpp b/src/common/microprofile.cpp
index ee25dd37f..e6657c82f 100644
--- a/src/common/microprofile.cpp
+++ b/src/common/microprofile.cpp
@@ -1,6 +1,5 @@
-// Copyright 2015 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2015 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
// Includes the MicroProfile implementation in this file for compilation
#define MICROPROFILE_IMPL 1
diff --git a/src/common/microprofile.h b/src/common/microprofile.h
index 54e7f3cc4..56ef0a2dc 100644
--- a/src/common/microprofile.h
+++ b/src/common/microprofile.h
@@ -1,6 +1,5 @@
-// Copyright 2015 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2015 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -23,12 +22,3 @@ typedef void* HANDLE;
#include <microprofile.h>
#define MP_RGB(r, g, b) ((r) << 16 | (g) << 8 | (b) << 0)
-
-// On OS X, some Mach header included by MicroProfile defines these as macros, conflicting with
-// identifiers we use.
-#ifdef PAGE_SIZE
-#undef PAGE_SIZE
-#endif
-#ifdef PAGE_MASK
-#undef PAGE_MASK
-#endif
diff --git a/src/common/microprofileui.h b/src/common/microprofileui.h
index 41abe6b75..39ed18ffa 100644
--- a/src/common/microprofileui.h
+++ b/src/common/microprofileui.h
@@ -1,6 +1,5 @@
-// Copyright 2015 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2015 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/multi_level_page_table.cpp b/src/common/multi_level_page_table.cpp
new file mode 100644
index 000000000..46e362f3b
--- /dev/null
+++ b/src/common/multi_level_page_table.cpp
@@ -0,0 +1,9 @@
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "common/multi_level_page_table.inc"
+
+namespace Common {
+template class Common::MultiLevelPageTable<u64>;
+template class Common::MultiLevelPageTable<u32>;
+} // namespace Common
diff --git a/src/common/multi_level_page_table.h b/src/common/multi_level_page_table.h
new file mode 100644
index 000000000..31f6676a0
--- /dev/null
+++ b/src/common/multi_level_page_table.h
@@ -0,0 +1,78 @@
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "common/common_types.h"
+
+namespace Common {
+
+template <typename BaseAddr>
+class MultiLevelPageTable final {
+public:
+ constexpr MultiLevelPageTable() = default;
+ explicit MultiLevelPageTable(std::size_t address_space_bits, std::size_t first_level_bits,
+ std::size_t page_bits);
+
+ ~MultiLevelPageTable() noexcept;
+
+ MultiLevelPageTable(const MultiLevelPageTable&) = delete;
+ MultiLevelPageTable& operator=(const MultiLevelPageTable&) = delete;
+
+ MultiLevelPageTable(MultiLevelPageTable&& other) noexcept
+ : address_space_bits{std::exchange(other.address_space_bits, 0)},
+ first_level_bits{std::exchange(other.first_level_bits, 0)}, page_bits{std::exchange(
+ other.page_bits, 0)},
+ first_level_shift{std::exchange(other.first_level_shift, 0)},
+ first_level_chunk_size{std::exchange(other.first_level_chunk_size, 0)},
+ first_level_map{std::move(other.first_level_map)}, base_ptr{std::exchange(other.base_ptr,
+ nullptr)} {}
+
+ MultiLevelPageTable& operator=(MultiLevelPageTable&& other) noexcept {
+ address_space_bits = std::exchange(other.address_space_bits, 0);
+ first_level_bits = std::exchange(other.first_level_bits, 0);
+ page_bits = std::exchange(other.page_bits, 0);
+ first_level_shift = std::exchange(other.first_level_shift, 0);
+ first_level_chunk_size = std::exchange(other.first_level_chunk_size, 0);
+ alloc_size = std::exchange(other.alloc_size, 0);
+ first_level_map = std::move(other.first_level_map);
+ base_ptr = std::exchange(other.base_ptr, nullptr);
+ return *this;
+ }
+
+ void ReserveRange(u64 start, std::size_t size);
+
+ [[nodiscard]] const BaseAddr& operator[](std::size_t index) const {
+ return base_ptr[index];
+ }
+
+ [[nodiscard]] BaseAddr& operator[](std::size_t index) {
+ return base_ptr[index];
+ }
+
+ [[nodiscard]] BaseAddr* data() {
+ return base_ptr;
+ }
+
+ [[nodiscard]] const BaseAddr* data() const {
+ return base_ptr;
+ }
+
+private:
+ void AllocateLevel(u64 level);
+
+ std::size_t address_space_bits{};
+ std::size_t first_level_bits{};
+ std::size_t page_bits{};
+ std::size_t first_level_shift{};
+ std::size_t first_level_chunk_size{};
+ std::size_t alloc_size{};
+ std::vector<void*> first_level_map{};
+ BaseAddr* base_ptr{};
+};
+
+} // namespace Common
diff --git a/src/common/multi_level_page_table.inc b/src/common/multi_level_page_table.inc
new file mode 100644
index 000000000..8ac506fa0
--- /dev/null
+++ b/src/common/multi_level_page_table.inc
@@ -0,0 +1,84 @@
+// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <sys/mman.h>
+#endif
+
+#include "common/assert.h"
+#include "common/multi_level_page_table.h"
+
+namespace Common {
+
+template <typename BaseAddr>
+MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bits_,
+ std::size_t first_level_bits_,
+ std::size_t page_bits_)
+ : address_space_bits{address_space_bits_},
+ first_level_bits{first_level_bits_}, page_bits{page_bits_} {
+ if (page_bits == 0) {
+ return;
+ }
+ first_level_shift = address_space_bits - first_level_bits;
+ first_level_chunk_size = (1ULL << (first_level_shift - page_bits)) * sizeof(BaseAddr);
+ alloc_size = (1ULL << (address_space_bits - page_bits)) * sizeof(BaseAddr);
+ std::size_t first_level_size = 1ULL << first_level_bits;
+ first_level_map.resize(first_level_size, nullptr);
+#ifdef _WIN32
+ void* base{VirtualAlloc(nullptr, alloc_size, MEM_RESERVE, PAGE_READWRITE)};
+#else
+ void* base{mmap(nullptr, alloc_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)};
+
+ if (base == MAP_FAILED) {
+ base = nullptr;
+ }
+#endif
+
+ ASSERT(base);
+ base_ptr = reinterpret_cast<BaseAddr*>(base);
+}
+
+template <typename BaseAddr>
+MultiLevelPageTable<BaseAddr>::~MultiLevelPageTable() noexcept {
+ if (!base_ptr) {
+ return;
+ }
+#ifdef _WIN32
+ ASSERT(VirtualFree(base_ptr, 0, MEM_RELEASE));
+#else
+ ASSERT(munmap(base_ptr, alloc_size) == 0);
+#endif
+}
+
+template <typename BaseAddr>
+void MultiLevelPageTable<BaseAddr>::ReserveRange(u64 start, std::size_t size) {
+ const u64 new_start = start >> first_level_shift;
+ const u64 new_end = (start + size) >> first_level_shift;
+ for (u64 i = new_start; i <= new_end; i++) {
+ if (!first_level_map[i]) {
+ AllocateLevel(i);
+ }
+ }
+}
+
+template <typename BaseAddr>
+void MultiLevelPageTable<BaseAddr>::AllocateLevel(u64 level) {
+ void* ptr = reinterpret_cast<char *>(base_ptr) + level * first_level_chunk_size;
+#ifdef _WIN32
+ void* base{VirtualAlloc(ptr, first_level_chunk_size, MEM_COMMIT, PAGE_READWRITE)};
+#else
+ void* base{mmap(ptr, first_level_chunk_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)};
+
+ if (base == MAP_FAILED) {
+ base = nullptr;
+ }
+#endif
+ ASSERT(base);
+
+ first_level_map[level] = base;
+}
+
+} // namespace Common
diff --git a/src/common/nvidia_flags.cpp b/src/common/nvidia_flags.cpp
index d1afd1f1d..7ed7690ee 100644
--- a/src/common/nvidia_flags.cpp
+++ b/src/common/nvidia_flags.cpp
@@ -1,12 +1,10 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <cstdlib>
#include <fmt/format.h>
-#include "common/fs/file.h"
#include "common/fs/fs.h"
#include "common/fs/path_util.h"
#include "common/nvidia_flags.h"
diff --git a/src/common/nvidia_flags.h b/src/common/nvidia_flags.h
index 8930efcec..8c3b1bfb9 100644
--- a/src/common/nvidia_flags.h
+++ b/src/common/nvidia_flags.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/page_table.cpp b/src/common/page_table.cpp
index 9fffd816f..b744b68ce 100644
--- a/src/common/page_table.cpp
+++ b/src/common/page_table.cpp
@@ -1,6 +1,5 @@
-// Copyright 2019 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/page_table.h"
@@ -10,11 +9,65 @@ PageTable::PageTable() = default;
PageTable::~PageTable() noexcept = default;
-void PageTable::Resize(size_t address_space_width_in_bits, size_t page_size_in_bits) {
- const size_t num_page_table_entries{1ULL << (address_space_width_in_bits - page_size_in_bits)};
+bool PageTable::BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context,
+ u64 address) const {
+ // Setup invalid defaults.
+ out_entry.phys_addr = 0;
+ out_entry.block_size = page_size;
+ out_context.next_page = 0;
+
+ // Validate that we can read the actual entry.
+ const auto page = address / page_size;
+ if (page >= backing_addr.size()) {
+ return false;
+ }
+
+ // Validate that the entry is mapped.
+ const auto phys_addr = backing_addr[page];
+ if (phys_addr == 0) {
+ return false;
+ }
+
+ // Populate the results.
+ out_entry.phys_addr = phys_addr + address;
+ out_context.next_page = page + 1;
+ out_context.next_offset = address + page_size;
+
+ return true;
+}
+
+bool PageTable::ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const {
+ // Setup invalid defaults.
+ out_entry.phys_addr = 0;
+ out_entry.block_size = page_size;
+
+ // Validate that we can read the actual entry.
+ const auto page = context.next_page;
+ if (page >= backing_addr.size()) {
+ return false;
+ }
+
+ // Validate that the entry is mapped.
+ const auto phys_addr = backing_addr[page];
+ if (phys_addr == 0) {
+ return false;
+ }
+
+ // Populate the results.
+ out_entry.phys_addr = phys_addr + context.next_offset;
+ context.next_page = page + 1;
+ context.next_offset += page_size;
+
+ return true;
+}
+
+void PageTable::Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits) {
+ const std::size_t num_page_table_entries{1ULL
+ << (address_space_width_in_bits - page_size_in_bits)};
pointers.resize(num_page_table_entries);
backing_addr.resize(num_page_table_entries);
current_address_space_width_in_bits = address_space_width_in_bits;
+ page_size = 1ULL << page_size_in_bits;
}
} // namespace Common
diff --git a/src/common/page_table.h b/src/common/page_table.h
index 8267e8b4d..1ad3a9f8b 100644
--- a/src/common/page_table.h
+++ b/src/common/page_table.h
@@ -1,11 +1,9 @@
-// Copyright 2019 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <atomic>
-#include <tuple>
#include "common/common_types.h"
#include "common/virtual_buffer.h"
@@ -17,6 +15,9 @@ enum class PageType : u8 {
Unmapped,
/// Page is mapped to regular memory. This is the only type you can get pointers to.
Memory,
+ /// Page is mapped to regular memory, but inaccessible from CPU fastmem and must use
+ /// the callbacks.
+ DebugMemory,
/// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and
/// invalidation
RasterizerCachedMemory,
@@ -27,6 +28,16 @@ enum class PageType : u8 {
* mimics the way a real CPU page table works.
*/
struct PageTable {
+ struct TraversalEntry {
+ u64 phys_addr{};
+ std::size_t block_size{};
+ };
+
+ struct TraversalContext {
+ u64 next_page{};
+ u64 next_offset{};
+ };
+
/// Number of bits reserved for attribute tagging.
/// This can be at most the guaranteed alignment of the pointers in the page table.
static constexpr int ATTRIBUTE_BITS = 2;
@@ -89,6 +100,10 @@ struct PageTable {
PageTable(PageTable&&) noexcept = default;
PageTable& operator=(PageTable&&) noexcept = default;
+ bool BeginTraversal(TraversalEntry& out_entry, TraversalContext& out_context,
+ u64 address) const;
+ bool ContinueTraversal(TraversalEntry& out_entry, TraversalContext& context) const;
+
/**
* Resizes the page table to be able to accommodate enough pages within
* a given address space.
@@ -96,9 +111,9 @@ struct PageTable {
* @param address_space_width_in_bits The address size width in bits.
* @param page_size_in_bits The page size in bits.
*/
- void Resize(size_t address_space_width_in_bits, size_t page_size_in_bits);
+ void Resize(std::size_t address_space_width_in_bits, std::size_t page_size_in_bits);
- size_t GetAddressSpaceBits() const {
+ std::size_t GetAddressSpaceBits() const {
return current_address_space_width_in_bits;
}
@@ -110,9 +125,11 @@ struct PageTable {
VirtualBuffer<u64> backing_addr;
- size_t current_address_space_width_in_bits;
+ std::size_t current_address_space_width_in_bits{};
+
+ u8* fastmem_arena{};
- u8* fastmem_arena;
+ std::size_t page_size{};
};
} // namespace Common
diff --git a/src/common/param_package.cpp b/src/common/param_package.cpp
index bbf20f5eb..629babb81 100644
--- a/src/common/param_package.cpp
+++ b/src/common/param_package.cpp
@@ -1,6 +1,5 @@
-// Copyright 2017 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2017 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <array>
#include <stdexcept>
@@ -76,7 +75,7 @@ std::string ParamPackage::Serialize() const {
std::string ParamPackage::Get(const std::string& key, const std::string& default_value) const {
auto pair = data.find(key);
if (pair == data.end()) {
- LOG_DEBUG(Common, "key '{}' not found", key);
+ LOG_TRACE(Common, "key '{}' not found", key);
return default_value;
}
@@ -86,7 +85,7 @@ std::string ParamPackage::Get(const std::string& key, const std::string& default
int ParamPackage::Get(const std::string& key, int default_value) const {
auto pair = data.find(key);
if (pair == data.end()) {
- LOG_DEBUG(Common, "key '{}' not found", key);
+ LOG_TRACE(Common, "key '{}' not found", key);
return default_value;
}
@@ -101,7 +100,7 @@ int ParamPackage::Get(const std::string& key, int default_value) const {
float ParamPackage::Get(const std::string& key, float default_value) const {
auto pair = data.find(key);
if (pair == data.end()) {
- LOG_DEBUG(Common, "key {} not found", key);
+ LOG_TRACE(Common, "key {} not found", key);
return default_value;
}
diff --git a/src/common/param_package.h b/src/common/param_package.h
index c13e45479..d7c13cb1f 100644
--- a/src/common/param_package.h
+++ b/src/common/param_package.h
@@ -1,6 +1,5 @@
-// Copyright 2017 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2017 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/parent_of_member.h b/src/common/parent_of_member.h
index 58c70b0e7..8e03f17d8 100644
--- a/src/common/parent_of_member.h
+++ b/src/common/parent_of_member.h
@@ -1,19 +1,17 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <type_traits>
#include "common/assert.h"
-#include "common/common_types.h"
namespace Common {
namespace detail {
template <typename T, size_t Size, size_t Align>
struct TypedStorageImpl {
- std::aligned_storage_t<Size, Align> storage_;
+ alignas(Align) u8 storage_[Size];
};
} // namespace detail
diff --git a/src/common/point.h b/src/common/point.h
index c0a52ad8d..6491856ea 100644
--- a/src/common/point.h
+++ b/src/common/point.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/quaternion.h b/src/common/quaternion.h
index 4d0871eb4..5bb5f2af0 100644
--- a/src/common/quaternion.h
+++ b/src/common/quaternion.h
@@ -1,6 +1,5 @@
-// Copyright 2016 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2016 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/reader_writer_queue.h b/src/common/reader_writer_queue.h
new file mode 100644
index 000000000..60c41a8cb
--- /dev/null
+++ b/src/common/reader_writer_queue.h
@@ -0,0 +1,940 @@
+// SPDX-FileCopyrightText: 2013-2020 Cameron Desrochers
+// SPDX-License-Identifier: BSD-2-Clause
+
+#pragma once
+
+#include <cassert>
+#include <cstdint>
+#include <cstdlib> // For malloc/free/abort & size_t
+#include <memory>
+#include <new>
+#include <stdexcept>
+#include <type_traits>
+#include <utility>
+
+#include "common/atomic_helpers.h"
+
+#if __cplusplus > 199711L || _MSC_VER >= 1700 // C++11 or VS2012
+#include <chrono>
+#endif
+
+// A lock-free queue for a single-consumer, single-producer architecture.
+// The queue is also wait-free in the common path (except if more memory
+// needs to be allocated, in which case malloc is called).
+// Allocates memory sparingly, and only once if the original maximum size
+// estimate is never exceeded.
+// Tested on x86/x64 processors, but semantics should be correct for all
+// architectures (given the right implementations in atomicops.h), provided
+// that aligned integer and pointer accesses are naturally atomic.
+// Note that there should only be one consumer thread and producer thread;
+// Switching roles of the threads, or using multiple consecutive threads for
+// one role, is not safe unless properly synchronized.
+// Using the queue exclusively from one thread is fine, though a bit silly.
+
+#ifndef MOODYCAMEL_CACHE_LINE_SIZE
+#define MOODYCAMEL_CACHE_LINE_SIZE 64
+#endif
+
+#ifndef MOODYCAMEL_EXCEPTIONS_ENABLED
+#if (defined(_MSC_VER) && defined(_CPPUNWIND)) || (defined(__GNUC__) && defined(__EXCEPTIONS)) || \
+ (!defined(_MSC_VER) && !defined(__GNUC__))
+#define MOODYCAMEL_EXCEPTIONS_ENABLED
+#endif
+#endif
+
+#ifndef MOODYCAMEL_HAS_EMPLACE
+#if !defined(_MSC_VER) || \
+ _MSC_VER >= 1800 // variadic templates: either a non-MS compiler or VS >= 2013
+#define MOODYCAMEL_HAS_EMPLACE 1
+#endif
+#endif
+
+#ifndef MOODYCAMEL_MAYBE_ALIGN_TO_CACHELINE
+#if defined(__APPLE__) && defined(__MACH__) && __cplusplus >= 201703L
+// This is required to find out what deployment target we are using
+#include <CoreFoundation/CoreFoundation.h>
+#if !defined(MAC_OS_X_VERSION_MIN_REQUIRED) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_14
+// C++17 new(size_t, align_val_t) is not backwards-compatible with older versions of macOS, so we
+// can't support over-alignment in this case
+#define MOODYCAMEL_MAYBE_ALIGN_TO_CACHELINE
+#endif
+#endif
+#endif
+
+#ifndef MOODYCAMEL_MAYBE_ALIGN_TO_CACHELINE
+#define MOODYCAMEL_MAYBE_ALIGN_TO_CACHELINE AE_ALIGN(MOODYCAMEL_CACHE_LINE_SIZE)
+#endif
+
+#ifdef AE_VCPP
+#pragma warning(push)
+#pragma warning(disable : 4324) // structure was padded due to __declspec(align())
+#pragma warning(disable : 4820) // padding was added
+#pragma warning(disable : 4127) // conditional expression is constant
+#endif
+
+namespace Common {
+
+template <typename T, size_t MAX_BLOCK_SIZE = 512>
+class MOODYCAMEL_MAYBE_ALIGN_TO_CACHELINE ReaderWriterQueue {
+ // Design: Based on a queue-of-queues. The low-level queues are just
+ // circular buffers with front and tail indices indicating where the
+ // next element to dequeue is and where the next element can be enqueued,
+ // respectively. Each low-level queue is called a "block". Each block
+ // wastes exactly one element's worth of space to keep the design simple
+ // (if front == tail then the queue is empty, and can't be full).
+ // The high-level queue is a circular linked list of blocks; again there
+ // is a front and tail, but this time they are pointers to the blocks.
+ // The front block is where the next element to be dequeued is, provided
+ // the block is not empty. The back block is where elements are to be
+ // enqueued, provided the block is not full.
+ // The producer thread owns all the tail indices/pointers. The consumer
+ // thread owns all the front indices/pointers. Both threads read each
+ // other's variables, but only the owning thread updates them. E.g. After
+ // the consumer reads the producer's tail, the tail may change before the
+ // consumer is done dequeuing an object, but the consumer knows the tail
+ // will never go backwards, only forwards.
+ // If there is no room to enqueue an object, an additional block (of
+ // equal size to the last block) is added. Blocks are never removed.
+
+public:
+ typedef T value_type;
+
+ // Constructs a queue that can hold at least `size` elements without further
+ // allocations. If more than MAX_BLOCK_SIZE elements are requested,
+ // then several blocks of MAX_BLOCK_SIZE each are reserved (including
+ // at least one extra buffer block).
+ AE_NO_TSAN explicit ReaderWriterQueue(size_t size = 15)
+#ifndef NDEBUG
+ : enqueuing(false), dequeuing(false)
+#endif
+ {
+ assert(MAX_BLOCK_SIZE == ceilToPow2(MAX_BLOCK_SIZE) &&
+ "MAX_BLOCK_SIZE must be a power of 2");
+ assert(MAX_BLOCK_SIZE >= 2 && "MAX_BLOCK_SIZE must be at least 2");
+
+ Block* firstBlock = nullptr;
+
+ largestBlockSize =
+ ceilToPow2(size + 1); // We need a spare slot to fit size elements in the block
+ if (largestBlockSize > MAX_BLOCK_SIZE * 2) {
+ // We need a spare block in case the producer is writing to a different block the
+ // consumer is reading from, and wants to enqueue the maximum number of elements. We
+ // also need a spare element in each block to avoid the ambiguity between front == tail
+ // meaning "empty" and "full". So the effective number of slots that are guaranteed to
+ // be usable at any time is the block size - 1 times the number of blocks - 1. Solving
+ // for size and applying a ceiling to the division gives us (after simplifying):
+ size_t initialBlockCount = (size + MAX_BLOCK_SIZE * 2 - 3) / (MAX_BLOCK_SIZE - 1);
+ largestBlockSize = MAX_BLOCK_SIZE;
+ Block* lastBlock = nullptr;
+ for (size_t i = 0; i != initialBlockCount; ++i) {
+ auto block = make_block(largestBlockSize);
+ if (block == nullptr) {
+#ifdef MOODYCAMEL_EXCEPTIONS_ENABLED
+ throw std::bad_alloc();
+#else
+ abort();
+#endif
+ }
+ if (firstBlock == nullptr) {
+ firstBlock = block;
+ } else {
+ lastBlock->next = block;
+ }
+ lastBlock = block;
+ block->next = firstBlock;
+ }
+ } else {
+ firstBlock = make_block(largestBlockSize);
+ if (firstBlock == nullptr) {
+#ifdef MOODYCAMEL_EXCEPTIONS_ENABLED
+ throw std::bad_alloc();
+#else
+ abort();
+#endif
+ }
+ firstBlock->next = firstBlock;
+ }
+ frontBlock = firstBlock;
+ tailBlock = firstBlock;
+
+ // Make sure the reader/writer threads will have the initialized memory setup above:
+ fence(memory_order_sync);
+ }
+
+ // Note: The queue should not be accessed concurrently while it's
+ // being moved. It's up to the user to synchronize this.
+ AE_NO_TSAN ReaderWriterQueue(ReaderWriterQueue&& other)
+ : frontBlock(other.frontBlock.load()), tailBlock(other.tailBlock.load()),
+ largestBlockSize(other.largestBlockSize)
+#ifndef NDEBUG
+ ,
+ enqueuing(false), dequeuing(false)
+#endif
+ {
+ other.largestBlockSize = 32;
+ Block* b = other.make_block(other.largestBlockSize);
+ if (b == nullptr) {
+#ifdef MOODYCAMEL_EXCEPTIONS_ENABLED
+ throw std::bad_alloc();
+#else
+ abort();
+#endif
+ }
+ b->next = b;
+ other.frontBlock = b;
+ other.tailBlock = b;
+ }
+
+ // Note: The queue should not be accessed concurrently while it's
+ // being moved. It's up to the user to synchronize this.
+ ReaderWriterQueue& operator=(ReaderWriterQueue&& other) AE_NO_TSAN {
+ Block* b = frontBlock.load();
+ frontBlock = other.frontBlock.load();
+ other.frontBlock = b;
+ b = tailBlock.load();
+ tailBlock = other.tailBlock.load();
+ other.tailBlock = b;
+ std::swap(largestBlockSize, other.largestBlockSize);
+ return *this;
+ }
+
+ // Note: The queue should not be accessed concurrently while it's
+ // being deleted. It's up to the user to synchronize this.
+ AE_NO_TSAN ~ReaderWriterQueue() {
+ // Make sure we get the latest version of all variables from other CPUs:
+ fence(memory_order_sync);
+
+ // Destroy any remaining objects in queue and free memory
+ Block* frontBlock_ = frontBlock;
+ Block* block = frontBlock_;
+ do {
+ Block* nextBlock = block->next;
+ size_t blockFront = block->front;
+ size_t blockTail = block->tail;
+
+ for (size_t i = blockFront; i != blockTail; i = (i + 1) & block->sizeMask) {
+ auto element = reinterpret_cast<T*>(block->data + i * sizeof(T));
+ element->~T();
+ (void)element;
+ }
+
+ auto rawBlock = block->rawThis;
+ block->~Block();
+ std::free(rawBlock);
+ block = nextBlock;
+ } while (block != frontBlock_);
+ }
+
+ // Enqueues a copy of element if there is room in the queue.
+ // Returns true if the element was enqueued, false otherwise.
+ // Does not allocate memory.
+ AE_FORCEINLINE bool try_enqueue(T const& element) AE_NO_TSAN {
+ return inner_enqueue<CannotAlloc>(element);
+ }
+
+ // Enqueues a moved copy of element if there is room in the queue.
+ // Returns true if the element was enqueued, false otherwise.
+ // Does not allocate memory.
+ AE_FORCEINLINE bool try_enqueue(T&& element) AE_NO_TSAN {
+ return inner_enqueue<CannotAlloc>(std::forward<T>(element));
+ }
+
+#if MOODYCAMEL_HAS_EMPLACE
+ // Like try_enqueue() but with emplace semantics (i.e. construct-in-place).
+ template <typename... Args>
+ AE_FORCEINLINE bool try_emplace(Args&&... args) AE_NO_TSAN {
+ return inner_enqueue<CannotAlloc>(std::forward<Args>(args)...);
+ }
+#endif
+
+ // Enqueues a copy of element on the queue.
+ // Allocates an additional block of memory if needed.
+ // Only fails (returns false) if memory allocation fails.
+ AE_FORCEINLINE bool enqueue(T const& element) AE_NO_TSAN {
+ return inner_enqueue<CanAlloc>(element);
+ }
+
+ // Enqueues a moved copy of element on the queue.
+ // Allocates an additional block of memory if needed.
+ // Only fails (returns false) if memory allocation fails.
+ AE_FORCEINLINE bool enqueue(T&& element) AE_NO_TSAN {
+ return inner_enqueue<CanAlloc>(std::forward<T>(element));
+ }
+
+#if MOODYCAMEL_HAS_EMPLACE
+ // Like enqueue() but with emplace semantics (i.e. construct-in-place).
+ template <typename... Args>
+ AE_FORCEINLINE bool emplace(Args&&... args) AE_NO_TSAN {
+ return inner_enqueue<CanAlloc>(std::forward<Args>(args)...);
+ }
+#endif
+
+ // Attempts to dequeue an element; if the queue is empty,
+ // returns false instead. If the queue has at least one element,
+ // moves front to result using operator=, then returns true.
+ template <typename U>
+ bool try_dequeue(U& result) AE_NO_TSAN {
+#ifndef NDEBUG
+ ReentrantGuard guard(this->dequeuing);
+#endif
+
+ // High-level pseudocode:
+ // Remember where the tail block is
+ // If the front block has an element in it, dequeue it
+ // Else
+ // If front block was the tail block when we entered the function, return false
+ // Else advance to next block and dequeue the item there
+
+ // Note that we have to use the value of the tail block from before we check if the front
+ // block is full or not, in case the front block is empty and then, before we check if the
+ // tail block is at the front block or not, the producer fills up the front block *and
+ // moves on*, which would make us skip a filled block. Seems unlikely, but was consistently
+ // reproducible in practice.
+ // In order to avoid overhead in the common case, though, we do a double-checked pattern
+ // where we have the fast path if the front block is not empty, then read the tail block,
+ // then re-read the front block and check if it's not empty again, then check if the tail
+ // block has advanced.
+
+ Block* frontBlock_ = frontBlock.load();
+ size_t blockTail = frontBlock_->localTail;
+ size_t blockFront = frontBlock_->front.load();
+
+ if (blockFront != blockTail ||
+ blockFront != (frontBlock_->localTail = frontBlock_->tail.load())) {
+ fence(memory_order_acquire);
+
+ non_empty_front_block:
+ // Front block not empty, dequeue from here
+ auto element = reinterpret_cast<T*>(frontBlock_->data + blockFront * sizeof(T));
+ result = std::move(*element);
+ element->~T();
+
+ blockFront = (blockFront + 1) & frontBlock_->sizeMask;
+
+ fence(memory_order_release);
+ frontBlock_->front = blockFront;
+ } else if (frontBlock_ != tailBlock.load()) {
+ fence(memory_order_acquire);
+
+ frontBlock_ = frontBlock.load();
+ blockTail = frontBlock_->localTail = frontBlock_->tail.load();
+ blockFront = frontBlock_->front.load();
+ fence(memory_order_acquire);
+
+ if (blockFront != blockTail) {
+ // Oh look, the front block isn't empty after all
+ goto non_empty_front_block;
+ }
+
+ // Front block is empty but there's another block ahead, advance to it
+ Block* nextBlock = frontBlock_->next;
+ // Don't need an acquire fence here since next can only ever be set on the tailBlock,
+ // and we're not the tailBlock, and we did an acquire earlier after reading tailBlock
+ // which ensures next is up-to-date on this CPU in case we recently were at tailBlock.
+
+ size_t nextBlockFront = nextBlock->front.load();
+ size_t nextBlockTail = nextBlock->localTail = nextBlock->tail.load();
+ fence(memory_order_acquire);
+
+ // Since the tailBlock is only ever advanced after being written to,
+ // we know there's for sure an element to dequeue on it
+ assert(nextBlockFront != nextBlockTail);
+ AE_UNUSED(nextBlockTail);
+
+ // We're done with this block, let the producer use it if it needs
+ fence(memory_order_release); // Expose possibly pending changes to frontBlock->front
+ // from last dequeue
+ frontBlock = frontBlock_ = nextBlock;
+
+ compiler_fence(memory_order_release); // Not strictly needed
+
+ auto element = reinterpret_cast<T*>(frontBlock_->data + nextBlockFront * sizeof(T));
+
+ result = std::move(*element);
+ element->~T();
+
+ nextBlockFront = (nextBlockFront + 1) & frontBlock_->sizeMask;
+
+ fence(memory_order_release);
+ frontBlock_->front = nextBlockFront;
+ } else {
+ // No elements in current block and no other block to advance to
+ return false;
+ }
+
+ return true;
+ }
+
+ // Returns a pointer to the front element in the queue (the one that
+ // would be removed next by a call to `try_dequeue` or `pop`). If the
+ // queue appears empty at the time the method is called, nullptr is
+ // returned instead.
+ // Must be called only from the consumer thread.
+ T* peek() const AE_NO_TSAN {
+#ifndef NDEBUG
+ ReentrantGuard guard(this->dequeuing);
+#endif
+ // See try_dequeue() for reasoning
+
+ Block* frontBlock_ = frontBlock.load();
+ size_t blockTail = frontBlock_->localTail;
+ size_t blockFront = frontBlock_->front.load();
+
+ if (blockFront != blockTail ||
+ blockFront != (frontBlock_->localTail = frontBlock_->tail.load())) {
+ fence(memory_order_acquire);
+ non_empty_front_block:
+ return reinterpret_cast<T*>(frontBlock_->data + blockFront * sizeof(T));
+ } else if (frontBlock_ != tailBlock.load()) {
+ fence(memory_order_acquire);
+ frontBlock_ = frontBlock.load();
+ blockTail = frontBlock_->localTail = frontBlock_->tail.load();
+ blockFront = frontBlock_->front.load();
+ fence(memory_order_acquire);
+
+ if (blockFront != blockTail) {
+ goto non_empty_front_block;
+ }
+
+ Block* nextBlock = frontBlock_->next;
+
+ size_t nextBlockFront = nextBlock->front.load();
+ fence(memory_order_acquire);
+
+ assert(nextBlockFront != nextBlock->tail.load());
+ return reinterpret_cast<T*>(nextBlock->data + nextBlockFront * sizeof(T));
+ }
+
+ return nullptr;
+ }
+
+ // Removes the front element from the queue, if any, without returning it.
+ // Returns true on success, or false if the queue appeared empty at the time
+ // `pop` was called.
+ bool pop() AE_NO_TSAN {
+#ifndef NDEBUG
+ ReentrantGuard guard(this->dequeuing);
+#endif
+ // See try_dequeue() for reasoning
+
+ Block* frontBlock_ = frontBlock.load();
+ size_t blockTail = frontBlock_->localTail;
+ size_t blockFront = frontBlock_->front.load();
+
+ if (blockFront != blockTail ||
+ blockFront != (frontBlock_->localTail = frontBlock_->tail.load())) {
+ fence(memory_order_acquire);
+
+ non_empty_front_block:
+ auto element = reinterpret_cast<T*>(frontBlock_->data + blockFront * sizeof(T));
+ element->~T();
+
+ blockFront = (blockFront + 1) & frontBlock_->sizeMask;
+
+ fence(memory_order_release);
+ frontBlock_->front = blockFront;
+ } else if (frontBlock_ != tailBlock.load()) {
+ fence(memory_order_acquire);
+ frontBlock_ = frontBlock.load();
+ blockTail = frontBlock_->localTail = frontBlock_->tail.load();
+ blockFront = frontBlock_->front.load();
+ fence(memory_order_acquire);
+
+ if (blockFront != blockTail) {
+ goto non_empty_front_block;
+ }
+
+ // Front block is empty but there's another block ahead, advance to it
+ Block* nextBlock = frontBlock_->next;
+
+ size_t nextBlockFront = nextBlock->front.load();
+ size_t nextBlockTail = nextBlock->localTail = nextBlock->tail.load();
+ fence(memory_order_acquire);
+
+ assert(nextBlockFront != nextBlockTail);
+ AE_UNUSED(nextBlockTail);
+
+ fence(memory_order_release);
+ frontBlock = frontBlock_ = nextBlock;
+
+ compiler_fence(memory_order_release);
+
+ auto element = reinterpret_cast<T*>(frontBlock_->data + nextBlockFront * sizeof(T));
+ element->~T();
+
+ nextBlockFront = (nextBlockFront + 1) & frontBlock_->sizeMask;
+
+ fence(memory_order_release);
+ frontBlock_->front = nextBlockFront;
+ } else {
+ // No elements in current block and no other block to advance to
+ return false;
+ }
+
+ return true;
+ }
+
+ // Returns the approximate number of items currently in the queue.
+ // Safe to call from both the producer and consumer threads.
+ inline size_t size_approx() const AE_NO_TSAN {
+ size_t result = 0;
+ Block* frontBlock_ = frontBlock.load();
+ Block* block = frontBlock_;
+ do {
+ fence(memory_order_acquire);
+ size_t blockFront = block->front.load();
+ size_t blockTail = block->tail.load();
+ result += (blockTail - blockFront) & block->sizeMask;
+ block = block->next.load();
+ } while (block != frontBlock_);
+ return result;
+ }
+
+ // Returns the total number of items that could be enqueued without incurring
+ // an allocation when this queue is empty.
+ // Safe to call from both the producer and consumer threads.
+ //
+ // NOTE: The actual capacity during usage may be different depending on the consumer.
+ // If the consumer is removing elements concurrently, the producer cannot add to
+ // the block the consumer is removing from until it's completely empty, except in
+ // the case where the producer was writing to the same block the consumer was
+ // reading from the whole time.
+ inline size_t max_capacity() const {
+ size_t result = 0;
+ Block* frontBlock_ = frontBlock.load();
+ Block* block = frontBlock_;
+ do {
+ fence(memory_order_acquire);
+ result += block->sizeMask;
+ block = block->next.load();
+ } while (block != frontBlock_);
+ return result;
+ }
+
+private:
+ enum AllocationMode { CanAlloc, CannotAlloc };
+
+#if MOODYCAMEL_HAS_EMPLACE
+ template <AllocationMode canAlloc, typename... Args>
+ bool inner_enqueue(Args&&... args) AE_NO_TSAN
+#else
+ template <AllocationMode canAlloc, typename U>
+ bool inner_enqueue(U&& element) AE_NO_TSAN
+#endif
+ {
+#ifndef NDEBUG
+ ReentrantGuard guard(this->enqueuing);
+#endif
+
+ // High-level pseudocode (assuming we're allowed to alloc a new block):
+ // If room in tail block, add to tail
+ // Else check next block
+ // If next block is not the head block, enqueue on next block
+ // Else create a new block and enqueue there
+ // Advance tail to the block we just enqueued to
+
+ Block* tailBlock_ = tailBlock.load();
+ size_t blockFront = tailBlock_->localFront;
+ size_t blockTail = tailBlock_->tail.load();
+
+ size_t nextBlockTail = (blockTail + 1) & tailBlock_->sizeMask;
+ if (nextBlockTail != blockFront ||
+ nextBlockTail != (tailBlock_->localFront = tailBlock_->front.load())) {
+ fence(memory_order_acquire);
+ // This block has room for at least one more element
+ char* location = tailBlock_->data + blockTail * sizeof(T);
+#if MOODYCAMEL_HAS_EMPLACE
+ new (location) T(std::forward<Args>(args)...);
+#else
+ new (location) T(std::forward<U>(element));
+#endif
+
+ fence(memory_order_release);
+ tailBlock_->tail = nextBlockTail;
+ } else {
+ fence(memory_order_acquire);
+ if (tailBlock_->next.load() != frontBlock) {
+ // Note that the reason we can't advance to the frontBlock and start adding new
+ // entries there is because if we did, then dequeue would stay in that block,
+ // eventually reading the new values, instead of advancing to the next full block
+ // (whose values were enqueued first and so should be consumed first).
+
+ fence(memory_order_acquire); // Ensure we get latest writes if we got the latest
+ // frontBlock
+
+ // tailBlock is full, but there's a free block ahead, use it
+ Block* tailBlockNext = tailBlock_->next.load();
+ size_t nextBlockFront = tailBlockNext->localFront = tailBlockNext->front.load();
+ nextBlockTail = tailBlockNext->tail.load();
+ fence(memory_order_acquire);
+
+ // This block must be empty since it's not the head block and we
+ // go through the blocks in a circle
+ assert(nextBlockFront == nextBlockTail);
+ tailBlockNext->localFront = nextBlockFront;
+
+ char* location = tailBlockNext->data + nextBlockTail * sizeof(T);
+#if MOODYCAMEL_HAS_EMPLACE
+ new (location) T(std::forward<Args>(args)...);
+#else
+ new (location) T(std::forward<U>(element));
+#endif
+
+ tailBlockNext->tail = (nextBlockTail + 1) & tailBlockNext->sizeMask;
+
+ fence(memory_order_release);
+ tailBlock = tailBlockNext;
+ } else if (canAlloc == CanAlloc) {
+ // tailBlock is full and there's no free block ahead; create a new block
+ auto newBlockSize =
+ largestBlockSize >= MAX_BLOCK_SIZE ? largestBlockSize : largestBlockSize * 2;
+ auto newBlock = make_block(newBlockSize);
+ if (newBlock == nullptr) {
+ // Could not allocate a block!
+ return false;
+ }
+ largestBlockSize = newBlockSize;
+
+#if MOODYCAMEL_HAS_EMPLACE
+ new (newBlock->data) T(std::forward<Args>(args)...);
+#else
+ new (newBlock->data) T(std::forward<U>(element));
+#endif
+ assert(newBlock->front == 0);
+ newBlock->tail = newBlock->localTail = 1;
+
+ newBlock->next = tailBlock_->next.load();
+ tailBlock_->next = newBlock;
+
+ // Might be possible for the dequeue thread to see the new tailBlock->next
+ // *without* seeing the new tailBlock value, but this is OK since it can't
+ // advance to the next block until tailBlock is set anyway (because the only
+ // case where it could try to read the next is if it's already at the tailBlock,
+ // and it won't advance past tailBlock in any circumstance).
+
+ fence(memory_order_release);
+ tailBlock = newBlock;
+ } else if (canAlloc == CannotAlloc) {
+ // Would have had to allocate a new block to enqueue, but not allowed
+ return false;
+ } else {
+ assert(false && "Should be unreachable code");
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ // Disable copying
+ ReaderWriterQueue(ReaderWriterQueue const&) {}
+
+ // Disable assignment
+ ReaderWriterQueue& operator=(ReaderWriterQueue const&) {}
+
+ AE_FORCEINLINE static size_t ceilToPow2(size_t x) {
+ // From http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
+ --x;
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ for (size_t i = 1; i < sizeof(size_t); i <<= 1) {
+ x |= x >> (i << 3);
+ }
+ ++x;
+ return x;
+ }
+
+ template <typename U>
+ static AE_FORCEINLINE char* align_for(char* ptr) AE_NO_TSAN {
+ const std::size_t alignment = std::alignment_of<U>::value;
+ return ptr + (alignment - (reinterpret_cast<std::uintptr_t>(ptr) % alignment)) % alignment;
+ }
+
+private:
+#ifndef NDEBUG
+ struct ReentrantGuard {
+ AE_NO_TSAN ReentrantGuard(weak_atomic<bool>& _inSection) : inSection(_inSection) {
+ assert(!inSection &&
+ "Concurrent (or re-entrant) enqueue or dequeue operation detected (only one "
+ "thread at a time may hold the producer or consumer role)");
+ inSection = true;
+ }
+
+ AE_NO_TSAN ~ReentrantGuard() {
+ inSection = false;
+ }
+
+ private:
+ ReentrantGuard& operator=(ReentrantGuard const&);
+
+ private:
+ weak_atomic<bool>& inSection;
+ };
+#endif
+
+ struct Block {
+ // Avoid false-sharing by putting highly contended variables on their own cache lines
+ weak_atomic<size_t> front; // (Atomic) Elements are read from here
+ size_t localTail; // An uncontended shadow copy of tail, owned by the consumer
+
+ char cachelineFiller0[MOODYCAMEL_CACHE_LINE_SIZE - sizeof(weak_atomic<size_t>) -
+ sizeof(size_t)];
+ weak_atomic<size_t> tail; // (Atomic) Elements are enqueued here
+ size_t localFront;
+
+ char cachelineFiller1[MOODYCAMEL_CACHE_LINE_SIZE - sizeof(weak_atomic<size_t>) -
+ sizeof(size_t)]; // next isn't very contended, but we don't want it on
+ // the same cache line as tail (which is)
+ weak_atomic<Block*> next; // (Atomic)
+
+ char* data; // Contents (on heap) are aligned to T's alignment
+
+ const size_t sizeMask;
+
+ // size must be a power of two (and greater than 0)
+ AE_NO_TSAN Block(size_t const& _size, char* _rawThis, char* _data)
+ : front(0UL), localTail(0), tail(0UL), localFront(0), next(nullptr), data(_data),
+ sizeMask(_size - 1), rawThis(_rawThis) {}
+
+ private:
+ // C4512 - Assignment operator could not be generated
+ Block& operator=(Block const&);
+
+ public:
+ char* rawThis;
+ };
+
+ static Block* make_block(size_t capacity) AE_NO_TSAN {
+ // Allocate enough memory for the block itself, as well as all the elements it will contain
+ auto size = sizeof(Block) + std::alignment_of<Block>::value - 1;
+ size += sizeof(T) * capacity + std::alignment_of<T>::value - 1;
+ auto newBlockRaw = static_cast<char*>(std::malloc(size));
+ if (newBlockRaw == nullptr) {
+ return nullptr;
+ }
+
+ auto newBlockAligned = align_for<Block>(newBlockRaw);
+ auto newBlockData = align_for<T>(newBlockAligned + sizeof(Block));
+ return new (newBlockAligned) Block(capacity, newBlockRaw, newBlockData);
+ }
+
+private:
+ weak_atomic<Block*> frontBlock; // (Atomic) Elements are dequeued from this block
+
+ char cachelineFiller[MOODYCAMEL_CACHE_LINE_SIZE - sizeof(weak_atomic<Block*>)];
+ weak_atomic<Block*> tailBlock; // (Atomic) Elements are enqueued to this block
+
+ size_t largestBlockSize;
+
+#ifndef NDEBUG
+ weak_atomic<bool> enqueuing;
+ mutable weak_atomic<bool> dequeuing;
+#endif
+};
+
+// Like ReaderWriterQueue, but also providees blocking operations
+template <typename T, size_t MAX_BLOCK_SIZE = 512>
+class BlockingReaderWriterQueue {
+private:
+ typedef ::Common::ReaderWriterQueue<T, MAX_BLOCK_SIZE> ReaderWriterQueue;
+
+public:
+ explicit BlockingReaderWriterQueue(size_t size = 15) AE_NO_TSAN
+ : inner(size),
+ sema(new spsc_sema::LightweightSemaphore()) {}
+
+ BlockingReaderWriterQueue(BlockingReaderWriterQueue&& other) AE_NO_TSAN
+ : inner(std::move(other.inner)),
+ sema(std::move(other.sema)) {}
+
+ BlockingReaderWriterQueue& operator=(BlockingReaderWriterQueue&& other) AE_NO_TSAN {
+ std::swap(sema, other.sema);
+ std::swap(inner, other.inner);
+ return *this;
+ }
+
+ // Enqueues a copy of element if there is room in the queue.
+ // Returns true if the element was enqueued, false otherwise.
+ // Does not allocate memory.
+ AE_FORCEINLINE bool try_enqueue(T const& element) AE_NO_TSAN {
+ if (inner.try_enqueue(element)) {
+ sema->signal();
+ return true;
+ }
+ return false;
+ }
+
+ // Enqueues a moved copy of element if there is room in the queue.
+ // Returns true if the element was enqueued, false otherwise.
+ // Does not allocate memory.
+ AE_FORCEINLINE bool try_enqueue(T&& element) AE_NO_TSAN {
+ if (inner.try_enqueue(std::forward<T>(element))) {
+ sema->signal();
+ return true;
+ }
+ return false;
+ }
+
+#if MOODYCAMEL_HAS_EMPLACE
+ // Like try_enqueue() but with emplace semantics (i.e. construct-in-place).
+ template <typename... Args>
+ AE_FORCEINLINE bool try_emplace(Args&&... args) AE_NO_TSAN {
+ if (inner.try_emplace(std::forward<Args>(args)...)) {
+ sema->signal();
+ return true;
+ }
+ return false;
+ }
+#endif
+
+ // Enqueues a copy of element on the queue.
+ // Allocates an additional block of memory if needed.
+ // Only fails (returns false) if memory allocation fails.
+ AE_FORCEINLINE bool enqueue(T const& element) AE_NO_TSAN {
+ if (inner.enqueue(element)) {
+ sema->signal();
+ return true;
+ }
+ return false;
+ }
+
+ // Enqueues a moved copy of element on the queue.
+ // Allocates an additional block of memory if needed.
+ // Only fails (returns false) if memory allocation fails.
+ AE_FORCEINLINE bool enqueue(T&& element) AE_NO_TSAN {
+ if (inner.enqueue(std::forward<T>(element))) {
+ sema->signal();
+ return true;
+ }
+ return false;
+ }
+
+#if MOODYCAMEL_HAS_EMPLACE
+ // Like enqueue() but with emplace semantics (i.e. construct-in-place).
+ template <typename... Args>
+ AE_FORCEINLINE bool emplace(Args&&... args) AE_NO_TSAN {
+ if (inner.emplace(std::forward<Args>(args)...)) {
+ sema->signal();
+ return true;
+ }
+ return false;
+ }
+#endif
+
+ // Attempts to dequeue an element; if the queue is empty,
+ // returns false instead. If the queue has at least one element,
+ // moves front to result using operator=, then returns true.
+ template <typename U>
+ bool try_dequeue(U& result) AE_NO_TSAN {
+ if (sema->tryWait()) {
+ bool success = inner.try_dequeue(result);
+ assert(success);
+ AE_UNUSED(success);
+ return true;
+ }
+ return false;
+ }
+
+ // Attempts to dequeue an element; if the queue is empty,
+ // waits until an element is available, then dequeues it.
+ template <typename U>
+ void wait_dequeue(U& result) AE_NO_TSAN {
+ while (!sema->wait())
+ ;
+ bool success = inner.try_dequeue(result);
+ AE_UNUSED(result);
+ assert(success);
+ AE_UNUSED(success);
+ }
+
+ // Attempts to dequeue an element; if the queue is empty,
+ // waits until an element is available up to the specified timeout,
+ // then dequeues it and returns true, or returns false if the timeout
+ // expires before an element can be dequeued.
+ // Using a negative timeout indicates an indefinite timeout,
+ // and is thus functionally equivalent to calling wait_dequeue.
+ template <typename U>
+ bool wait_dequeue_timed(U& result, std::int64_t timeout_usecs) AE_NO_TSAN {
+ if (!sema->wait(timeout_usecs)) {
+ return false;
+ }
+ bool success = inner.try_dequeue(result);
+ AE_UNUSED(result);
+ assert(success);
+ AE_UNUSED(success);
+ return true;
+ }
+
+#if __cplusplus > 199711L || _MSC_VER >= 1700
+ // Attempts to dequeue an element; if the queue is empty,
+ // waits until an element is available up to the specified timeout,
+ // then dequeues it and returns true, or returns false if the timeout
+ // expires before an element can be dequeued.
+ // Using a negative timeout indicates an indefinite timeout,
+ // and is thus functionally equivalent to calling wait_dequeue.
+ template <typename U, typename Rep, typename Period>
+ inline bool wait_dequeue_timed(U& result,
+ std::chrono::duration<Rep, Period> const& timeout) AE_NO_TSAN {
+ return wait_dequeue_timed(
+ result, std::chrono::duration_cast<std::chrono::microseconds>(timeout).count());
+ }
+#endif
+
+ // Returns a pointer to the front element in the queue (the one that
+ // would be removed next by a call to `try_dequeue` or `pop`). If the
+ // queue appears empty at the time the method is called, nullptr is
+ // returned instead.
+ // Must be called only from the consumer thread.
+ AE_FORCEINLINE T* peek() const AE_NO_TSAN {
+ return inner.peek();
+ }
+
+ // Removes the front element from the queue, if any, without returning it.
+ // Returns true on success, or false if the queue appeared empty at the time
+ // `pop` was called.
+ AE_FORCEINLINE bool pop() AE_NO_TSAN {
+ if (sema->tryWait()) {
+ bool result = inner.pop();
+ assert(result);
+ AE_UNUSED(result);
+ return true;
+ }
+ return false;
+ }
+
+ // Returns the approximate number of items currently in the queue.
+ // Safe to call from both the producer and consumer threads.
+ AE_FORCEINLINE size_t size_approx() const AE_NO_TSAN {
+ return sema->availableApprox();
+ }
+
+ // Returns the total number of items that could be enqueued without incurring
+ // an allocation when this queue is empty.
+ // Safe to call from both the producer and consumer threads.
+ //
+ // NOTE: The actual capacity during usage may be different depending on the consumer.
+ // If the consumer is removing elements concurrently, the producer cannot add to
+ // the block the consumer is removing from until it's completely empty, except in
+ // the case where the producer was writing to the same block the consumer was
+ // reading from the whole time.
+ AE_FORCEINLINE size_t max_capacity() const {
+ return inner.max_capacity();
+ }
+
+private:
+ // Disable copying & assignment
+ BlockingReaderWriterQueue(BlockingReaderWriterQueue const&) {}
+ BlockingReaderWriterQueue& operator=(BlockingReaderWriterQueue const&) {}
+
+private:
+ ReaderWriterQueue inner;
+ std::unique_ptr<spsc_sema::LightweightSemaphore> sema;
+};
+
+} // namespace Common
+
+#ifdef AE_VCPP
+#pragma warning(pop)
+#endif
diff --git a/src/common/ring_buffer.h b/src/common/ring_buffer.h
index 4a8d09806..4c328ab44 100644
--- a/src/common/ring_buffer.h
+++ b/src/common/ring_buffer.h
@@ -1,6 +1,5 @@
-// Copyright 2018 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -12,7 +11,6 @@
#include <new>
#include <type_traits>
#include <vector>
-#include "common/common_types.h"
namespace Common {
diff --git a/src/common/scm_rev.cpp.in b/src/common/scm_rev.cpp.in
index cc88994c6..f0c124d69 100644
--- a/src/common/scm_rev.cpp.in
+++ b/src/common/scm_rev.cpp.in
@@ -1,6 +1,5 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/scm_rev.h"
diff --git a/src/common/scm_rev.h b/src/common/scm_rev.h
index 563015ec9..88404316a 100644
--- a/src/common/scm_rev.h
+++ b/src/common/scm_rev.h
@@ -1,6 +1,5 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/scope_exit.h b/src/common/scope_exit.h
index 35dac3a8f..e9c789c88 100644
--- a/src/common/scope_exit.h
+++ b/src/common/scope_exit.h
@@ -1,6 +1,5 @@
-// Copyright 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/settings.cpp b/src/common/settings.cpp
index 6964a8273..0a560ebb7 100644
--- a/src/common/settings.cpp
+++ b/src/common/settings.cpp
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <string_view>
@@ -11,7 +10,7 @@
namespace Settings {
-Values values = {};
+Values values;
static bool configuring_global = true;
std::string GetTimeZoneString() {
@@ -63,7 +62,8 @@ void LogSettings() {
log_setting("Renderer_UseAsynchronousShaders", values.use_asynchronous_shaders.GetValue());
log_setting("Renderer_AnisotropicFilteringLevel", values.max_anisotropy.GetValue());
log_setting("Audio_OutputEngine", values.sink_id.GetValue());
- log_setting("Audio_OutputDevice", values.audio_device_id.GetValue());
+ log_setting("Audio_OutputDevice", values.audio_output_device_id.GetValue());
+ log_setting("Audio_InputDevice", values.audio_input_device_id.GetValue());
log_setting("DataStorage_UseVirtualSd", values.use_virtual_sd.GetValue());
log_path("DataStorage_CacheDir", Common::FS::GetYuzuPath(Common::FS::YuzuPath::CacheDir));
log_path("DataStorage_ConfigDir", Common::FS::GetYuzuPath(Common::FS::YuzuPath::ConfigDir));
@@ -71,6 +71,7 @@ void LogSettings() {
log_path("DataStorage_NANDDir", Common::FS::GetYuzuPath(Common::FS::YuzuPath::NANDDir));
log_path("DataStorage_SDMCDir", Common::FS::GetYuzuPath(Common::FS::YuzuPath::SDMCDir));
log_setting("Debugging_ProgramArgs", values.program_args.GetValue());
+ log_setting("Debugging_GDBStub", values.use_gdbstub.GetValue());
log_setting("Input_EnableMotion", values.motion_enabled.GetValue());
log_setting("Input_EnableVibration", values.vibration_enabled.GetValue());
log_setting("Input_EnableRawInput", values.enable_raw_input.GetValue());
@@ -104,7 +105,7 @@ float Volume() {
if (values.audio_muted) {
return 0.0f;
}
- return values.volume.GetValue() / 100.0f;
+ return values.volume.GetValue() / static_cast<f32>(values.volume.GetDefault());
}
void UpdateRescalingInfo() {
@@ -147,7 +148,7 @@ void UpdateRescalingInfo() {
info.down_shift = 0;
break;
default:
- UNREACHABLE();
+ ASSERT(false);
info.up_scale = 1;
info.down_shift = 0;
}
@@ -167,6 +168,7 @@ void RestoreGlobalState(bool is_powered_on) {
// Core
values.use_multi_core.SetGlobal(true);
+ values.use_extended_memory_layout.SetGlobal(true);
// CPU
values.cpu_accuracy.SetGlobal(true);
@@ -175,6 +177,7 @@ void RestoreGlobalState(bool is_powered_on) {
values.cpuopt_unsafe_ignore_standard_fpcr.SetGlobal(true);
values.cpuopt_unsafe_inaccurate_nan.SetGlobal(true);
values.cpuopt_unsafe_fastmem_check.SetGlobal(true);
+ values.cpuopt_unsafe_ignore_global_monitor.SetGlobal(true);
// Renderer
values.renderer_backend.SetGlobal(true);
@@ -183,7 +186,6 @@ void RestoreGlobalState(bool is_powered_on) {
values.max_anisotropy.SetGlobal(true);
values.use_speed_limit.SetGlobal(true);
values.speed_limit.SetGlobal(true);
- values.fps_cap.SetGlobal(true);
values.use_disk_shader_cache.SetGlobal(true);
values.gpu_accuracy.SetGlobal(true);
values.use_asynchronous_gpu_emulation.SetGlobal(true);
@@ -193,6 +195,7 @@ void RestoreGlobalState(bool is_powered_on) {
values.shader_backend.SetGlobal(true);
values.use_asynchronous_shaders.SetGlobal(true);
values.use_fast_gpu_time.SetGlobal(true);
+ values.use_pessimistic_flushes.SetGlobal(true);
values.bg_red.SetGlobal(true);
values.bg_green.SetGlobal(true);
values.bg_blue.SetGlobal(true);
diff --git a/src/common/settings.h b/src/common/settings.h
index 9bee6e10f..d2452c93b 100644
--- a/src/common/settings.h
+++ b/src/common/settings.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -38,6 +37,7 @@ enum class CPUAccuracy : u32 {
Auto = 0,
Accurate = 1,
Unsafe = 2,
+ Paranoid = 3,
};
enum class FullscreenMode : u32 {
@@ -101,15 +101,15 @@ struct ResolutionScalingInfo {
}
};
-/** The BasicSetting class is a simple resource manager. It defines a label and default value
- * alongside the actual value of the setting for simpler and less-error prone use with frontend
- * configurations. Setting a default value and label is required, though subclasses may deviate from
- * this requirement.
+/** The Setting class is a simple resource manager. It defines a label and default value alongside
+ * the actual value of the setting for simpler and less-error prone use with frontend
+ * configurations. Specifying a default value and label is required. A minimum and maximum range can
+ * be specified for sanitization.
*/
-template <typename Type>
-class BasicSetting {
+template <typename Type, bool ranged = false>
+class Setting {
protected:
- BasicSetting() = default;
+ Setting() = default;
/**
* Only sets the setting to the given initializer, leaving the other members to their default
@@ -117,7 +117,7 @@ protected:
*
* @param global_val Initial value of the setting
*/
- explicit BasicSetting(const Type& global_val) : global{global_val} {}
+ explicit Setting(const Type& val) : value{val} {}
public:
/**
@@ -126,9 +126,22 @@ public:
* @param default_val Intial value of the setting, and default value of the setting
* @param name Label for the setting
*/
- explicit BasicSetting(const Type& default_val, const std::string& name)
- : default_value{default_val}, global{default_val}, label{name} {}
- virtual ~BasicSetting() = default;
+ explicit Setting(const Type& default_val, const std::string& name) requires(!ranged)
+ : value{default_val}, default_value{default_val}, label{name} {}
+ virtual ~Setting() = default;
+
+ /**
+ * Sets a default value, minimum value, maximum value, and label.
+ *
+ * @param default_val Intial value of the setting, and default value of the setting
+ * @param min_val Sets the minimum allowed value of the setting
+ * @param max_val Sets the maximum allowed value of the setting
+ * @param name Label for the setting
+ */
+ explicit Setting(const Type& default_val, const Type& min_val, const Type& max_val,
+ const std::string& name) requires(ranged)
+ : value{default_val},
+ default_value{default_val}, maximum{max_val}, minimum{min_val}, label{name} {}
/**
* Returns a reference to the setting's value.
@@ -136,17 +149,17 @@ public:
* @returns A reference to the setting
*/
[[nodiscard]] virtual const Type& GetValue() const {
- return global;
+ return value;
}
/**
* Sets the setting to the given value.
*
- * @param value The desired value
+ * @param val The desired value
*/
- virtual void SetValue(const Type& value) {
- Type temp{value};
- std::swap(global, temp);
+ virtual void SetValue(const Type& val) {
+ Type temp{ranged ? std::clamp(val, minimum, maximum) : val};
+ std::swap(value, temp);
}
/**
@@ -170,14 +183,14 @@ public:
/**
* Assigns a value to the setting.
*
- * @param value The desired setting value
+ * @param val The desired setting value
*
* @returns A reference to the setting
*/
- virtual const Type& operator=(const Type& value) {
- Type temp{value};
- std::swap(global, temp);
- return global;
+ virtual const Type& operator=(const Type& val) {
+ Type temp{ranged ? std::clamp(val, minimum, maximum) : val};
+ std::swap(value, temp);
+ return value;
}
/**
@@ -186,72 +199,27 @@ public:
* @returns A reference to the setting
*/
explicit virtual operator const Type&() const {
- return global;
+ return value;
}
protected:
+ Type value{}; ///< The setting
const Type default_value{}; ///< The default value
- Type global{}; ///< The setting
+ const Type maximum{}; ///< Maximum allowed value of the setting
+ const Type minimum{}; ///< Minimum allowed value of the setting
const std::string label{}; ///< The setting's label
};
/**
- * BasicRangedSetting class is intended for use with quantifiable settings that need a more
- * restrictive range than implicitly defined by its type. Implements a minimum and maximum that is
- * simply used to sanitize SetValue and the assignment overload.
- */
-template <typename Type>
-class BasicRangedSetting : virtual public BasicSetting<Type> {
-public:
- /**
- * Sets a default value, minimum value, maximum value, and label.
- *
- * @param default_val Intial value of the setting, and default value of the setting
- * @param min_val Sets the minimum allowed value of the setting
- * @param max_val Sets the maximum allowed value of the setting
- * @param name Label for the setting
- */
- explicit BasicRangedSetting(const Type& default_val, const Type& min_val, const Type& max_val,
- const std::string& name)
- : BasicSetting<Type>{default_val, name}, minimum{min_val}, maximum{max_val} {}
- virtual ~BasicRangedSetting() = default;
-
- /**
- * Like BasicSetting's SetValue, except value is clamped to the range of the setting.
- *
- * @param value The desired value
- */
- void SetValue(const Type& value) override {
- this->global = std::clamp(value, minimum, maximum);
- }
-
- /**
- * Like BasicSetting's assignment overload, except value is clamped to the range of the setting.
- *
- * @param value The desired value
- * @returns A reference to the setting's value
- */
- const Type& operator=(const Type& value) override {
- this->global = std::clamp(value, minimum, maximum);
- return this->global;
- }
-
- const Type minimum; ///< Minimum allowed value of the setting
- const Type maximum; ///< Maximum allowed value of the setting
-};
-
-/**
- * The Setting class is a slightly more complex version of the BasicSetting class. This adds a
+ * The SwitchableSetting class is a slightly more complex version of the Setting class. This adds a
* custom setting to switch to when a guest application specifically requires it. The effect is that
* other components of the emulator can access the setting's intended value without any need for the
* component to ask whether the custom or global setting is needed at the moment.
*
* By default, the global setting is used.
- *
- * Like the BasicSetting, this requires setting a default value and label to use.
*/
-template <typename Type>
-class Setting : virtual public BasicSetting<Type> {
+template <typename Type, bool ranged = false>
+class SwitchableSetting : virtual public Setting<Type, ranged> {
public:
/**
* Sets a default value, label, and setting value.
@@ -259,9 +227,21 @@ public:
* @param default_val Intial value of the setting, and default value of the setting
* @param name Label for the setting
*/
- explicit Setting(const Type& default_val, const std::string& name)
- : BasicSetting<Type>(default_val, name) {}
- virtual ~Setting() = default;
+ explicit SwitchableSetting(const Type& default_val, const std::string& name) requires(!ranged)
+ : Setting<Type>{default_val, name} {}
+ virtual ~SwitchableSetting() = default;
+
+ /**
+ * Sets a default value, minimum value, maximum value, and label.
+ *
+ * @param default_val Intial value of the setting, and default value of the setting
+ * @param min_val Sets the minimum allowed value of the setting
+ * @param max_val Sets the maximum allowed value of the setting
+ * @param name Label for the setting
+ */
+ explicit SwitchableSetting(const Type& default_val, const Type& min_val, const Type& max_val,
+ const std::string& name) requires(ranged)
+ : Setting<Type, true>{default_val, min_val, max_val, name} {}
/**
* Tells this setting to represent either the global or custom setting when other member
@@ -292,13 +272,13 @@ public:
*/
[[nodiscard]] virtual const Type& GetValue() const override {
if (use_global) {
- return this->global;
+ return this->value;
}
return custom;
}
[[nodiscard]] virtual const Type& GetValue(bool need_global) const {
if (use_global || need_global) {
- return this->global;
+ return this->value;
}
return custom;
}
@@ -306,12 +286,12 @@ public:
/**
* Sets the current setting value depending on the global state.
*
- * @param value The new value
+ * @param val The new value
*/
- void SetValue(const Type& value) override {
- Type temp{value};
+ void SetValue(const Type& val) override {
+ Type temp{ranged ? std::clamp(val, this->minimum, this->maximum) : val};
if (use_global) {
- std::swap(this->global, temp);
+ std::swap(this->value, temp);
} else {
std::swap(custom, temp);
}
@@ -320,15 +300,15 @@ public:
/**
* Assigns the current setting value depending on the global state.
*
- * @param value The new value
+ * @param val The new value
*
* @returns A reference to the current setting value
*/
- const Type& operator=(const Type& value) override {
- Type temp{value};
+ const Type& operator=(const Type& val) override {
+ Type temp{ranged ? std::clamp(val, this->minimum, this->maximum) : val};
if (use_global) {
- std::swap(this->global, temp);
- return this->global;
+ std::swap(this->value, temp);
+ return this->value;
}
std::swap(custom, temp);
return custom;
@@ -341,7 +321,7 @@ public:
*/
virtual explicit operator const Type&() const override {
if (use_global) {
- return this->global;
+ return this->value;
}
return custom;
}
@@ -352,75 +332,6 @@ protected:
};
/**
- * RangedSetting is a Setting that implements a maximum and minimum value for its setting. Intended
- * for use with quantifiable settings.
- */
-template <typename Type>
-class RangedSetting final : public BasicRangedSetting<Type>, public Setting<Type> {
-public:
- /**
- * Sets a default value, minimum value, maximum value, and label.
- *
- * @param default_val Intial value of the setting, and default value of the setting
- * @param min_val Sets the minimum allowed value of the setting
- * @param max_val Sets the maximum allowed value of the setting
- * @param name Label for the setting
- */
- explicit RangedSetting(const Type& default_val, const Type& min_val, const Type& max_val,
- const std::string& name)
- : BasicSetting<Type>{default_val, name},
- BasicRangedSetting<Type>{default_val, min_val, max_val, name}, Setting<Type>{default_val,
- name} {}
- virtual ~RangedSetting() = default;
-
- // The following are needed to avoid a MSVC bug
- // (source: https://stackoverflow.com/questions/469508)
- [[nodiscard]] const Type& GetValue() const override {
- return Setting<Type>::GetValue();
- }
- [[nodiscard]] const Type& GetValue(bool need_global) const override {
- return Setting<Type>::GetValue(need_global);
- }
- explicit operator const Type&() const override {
- if (this->use_global) {
- return this->global;
- }
- return this->custom;
- }
-
- /**
- * Like BasicSetting's SetValue, except value is clamped to the range of the setting. Sets the
- * appropriate value depending on the global state.
- *
- * @param value The desired value
- */
- void SetValue(const Type& value) override {
- const Type temp = std::clamp(value, this->minimum, this->maximum);
- if (this->use_global) {
- this->global = temp;
- }
- this->custom = temp;
- }
-
- /**
- * Like BasicSetting's assignment overload, except value is clamped to the range of the setting.
- * Uses the appropriate value depending on the global state.
- *
- * @param value The desired value
- * @returns A reference to the setting's value
- */
- const Type& operator=(const Type& value) override {
- const Type temp = std::clamp(value, this->minimum, this->maximum);
- if (this->use_global) {
- this->global = temp;
- return this->global;
- }
- this->custom = temp;
- return this->custom;
- }
-};
-
-/**
* The InputSetting class allows for getting a reference to either the global or custom members.
* This is required as we cannot easily modify the values of user-defined types within containers
* using the SetValue() member function found in the Setting class. The primary purpose of this
@@ -431,7 +342,7 @@ template <typename Type>
class InputSetting final {
public:
InputSetting() = default;
- explicit InputSetting(Type val) : BasicSetting<Type>(val) {}
+ explicit InputSetting(Type val) : Setting<Type>(val) {}
~InputSetting() = default;
void SetGlobal(bool to_global) {
use_global = to_global;
@@ -459,167 +370,181 @@ struct TouchFromButtonMap {
struct Values {
// Audio
- BasicSetting<std::string> audio_device_id{"auto", "output_device"};
- BasicSetting<std::string> sink_id{"auto", "output_engine"};
- BasicSetting<bool> audio_muted{false, "audio_muted"};
- RangedSetting<u8> volume{100, 0, 100, "volume"};
+ Setting<std::string> sink_id{"auto", "output_engine"};
+ Setting<std::string> audio_output_device_id{"auto", "output_device"};
+ Setting<std::string> audio_input_device_id{"auto", "input_device"};
+ Setting<bool> audio_muted{false, "audio_muted"};
+ SwitchableSetting<u8, true> volume{100, 0, 200, "volume"};
+ Setting<bool> dump_audio_commands{false, "dump_audio_commands"};
// Core
- Setting<bool> use_multi_core{true, "use_multi_core"};
+ SwitchableSetting<bool> use_multi_core{true, "use_multi_core"};
+ SwitchableSetting<bool> use_extended_memory_layout{false, "use_extended_memory_layout"};
// Cpu
- RangedSetting<CPUAccuracy> cpu_accuracy{CPUAccuracy::Auto, CPUAccuracy::Auto,
- CPUAccuracy::Unsafe, "cpu_accuracy"};
+ SwitchableSetting<CPUAccuracy, true> cpu_accuracy{CPUAccuracy::Auto, CPUAccuracy::Auto,
+ CPUAccuracy::Paranoid, "cpu_accuracy"};
// TODO: remove cpu_accuracy_first_time, migration setting added 8 July 2021
- BasicSetting<bool> cpu_accuracy_first_time{true, "cpu_accuracy_first_time"};
- BasicSetting<bool> cpu_debug_mode{false, "cpu_debug_mode"};
-
- BasicSetting<bool> cpuopt_page_tables{true, "cpuopt_page_tables"};
- BasicSetting<bool> cpuopt_block_linking{true, "cpuopt_block_linking"};
- BasicSetting<bool> cpuopt_return_stack_buffer{true, "cpuopt_return_stack_buffer"};
- BasicSetting<bool> cpuopt_fast_dispatcher{true, "cpuopt_fast_dispatcher"};
- BasicSetting<bool> cpuopt_context_elimination{true, "cpuopt_context_elimination"};
- BasicSetting<bool> cpuopt_const_prop{true, "cpuopt_const_prop"};
- BasicSetting<bool> cpuopt_misc_ir{true, "cpuopt_misc_ir"};
- BasicSetting<bool> cpuopt_reduce_misalign_checks{true, "cpuopt_reduce_misalign_checks"};
- BasicSetting<bool> cpuopt_fastmem{true, "cpuopt_fastmem"};
-
- Setting<bool> cpuopt_unsafe_unfuse_fma{true, "cpuopt_unsafe_unfuse_fma"};
- Setting<bool> cpuopt_unsafe_reduce_fp_error{true, "cpuopt_unsafe_reduce_fp_error"};
- Setting<bool> cpuopt_unsafe_ignore_standard_fpcr{true, "cpuopt_unsafe_ignore_standard_fpcr"};
- Setting<bool> cpuopt_unsafe_inaccurate_nan{true, "cpuopt_unsafe_inaccurate_nan"};
- Setting<bool> cpuopt_unsafe_fastmem_check{true, "cpuopt_unsafe_fastmem_check"};
+ Setting<bool> cpu_accuracy_first_time{true, "cpu_accuracy_first_time"};
+ Setting<bool> cpu_debug_mode{false, "cpu_debug_mode"};
+
+ Setting<bool> cpuopt_page_tables{true, "cpuopt_page_tables"};
+ Setting<bool> cpuopt_block_linking{true, "cpuopt_block_linking"};
+ Setting<bool> cpuopt_return_stack_buffer{true, "cpuopt_return_stack_buffer"};
+ Setting<bool> cpuopt_fast_dispatcher{true, "cpuopt_fast_dispatcher"};
+ Setting<bool> cpuopt_context_elimination{true, "cpuopt_context_elimination"};
+ Setting<bool> cpuopt_const_prop{true, "cpuopt_const_prop"};
+ Setting<bool> cpuopt_misc_ir{true, "cpuopt_misc_ir"};
+ Setting<bool> cpuopt_reduce_misalign_checks{true, "cpuopt_reduce_misalign_checks"};
+ Setting<bool> cpuopt_fastmem{true, "cpuopt_fastmem"};
+ Setting<bool> cpuopt_fastmem_exclusives{true, "cpuopt_fastmem_exclusives"};
+ Setting<bool> cpuopt_recompile_exclusives{true, "cpuopt_recompile_exclusives"};
+
+ SwitchableSetting<bool> cpuopt_unsafe_unfuse_fma{true, "cpuopt_unsafe_unfuse_fma"};
+ SwitchableSetting<bool> cpuopt_unsafe_reduce_fp_error{true, "cpuopt_unsafe_reduce_fp_error"};
+ SwitchableSetting<bool> cpuopt_unsafe_ignore_standard_fpcr{
+ true, "cpuopt_unsafe_ignore_standard_fpcr"};
+ SwitchableSetting<bool> cpuopt_unsafe_inaccurate_nan{true, "cpuopt_unsafe_inaccurate_nan"};
+ SwitchableSetting<bool> cpuopt_unsafe_fastmem_check{true, "cpuopt_unsafe_fastmem_check"};
+ SwitchableSetting<bool> cpuopt_unsafe_ignore_global_monitor{
+ true, "cpuopt_unsafe_ignore_global_monitor"};
// Renderer
- RangedSetting<RendererBackend> renderer_backend{
- RendererBackend::OpenGL, RendererBackend::OpenGL, RendererBackend::Vulkan, "backend"};
- BasicSetting<bool> renderer_debug{false, "debug"};
- BasicSetting<bool> renderer_shader_feedback{false, "shader_feedback"};
- BasicSetting<bool> enable_nsight_aftermath{false, "nsight_aftermath"};
- BasicSetting<bool> disable_shader_loop_safety_checks{false,
- "disable_shader_loop_safety_checks"};
- Setting<int> vulkan_device{0, "vulkan_device"};
+ SwitchableSetting<RendererBackend, true> renderer_backend{
+ RendererBackend::Vulkan, RendererBackend::OpenGL, RendererBackend::Vulkan, "backend"};
+ Setting<bool> renderer_debug{false, "debug"};
+ Setting<bool> renderer_shader_feedback{false, "shader_feedback"};
+ Setting<bool> enable_nsight_aftermath{false, "nsight_aftermath"};
+ Setting<bool> disable_shader_loop_safety_checks{false, "disable_shader_loop_safety_checks"};
+ SwitchableSetting<int> vulkan_device{0, "vulkan_device"};
ResolutionScalingInfo resolution_info{};
- Setting<ResolutionSetup> resolution_setup{ResolutionSetup::Res1X, "resolution_setup"};
- Setting<ScalingFilter> scaling_filter{ScalingFilter::Bilinear, "scaling_filter"};
- Setting<AntiAliasing> anti_aliasing{AntiAliasing::None, "anti_aliasing"};
+ SwitchableSetting<ResolutionSetup> resolution_setup{ResolutionSetup::Res1X, "resolution_setup"};
+ SwitchableSetting<ScalingFilter> scaling_filter{ScalingFilter::Bilinear, "scaling_filter"};
+ SwitchableSetting<AntiAliasing> anti_aliasing{AntiAliasing::None, "anti_aliasing"};
// *nix platforms may have issues with the borderless windowed fullscreen mode.
// Default to exclusive fullscreen on these platforms for now.
- RangedSetting<FullscreenMode> fullscreen_mode{
+ SwitchableSetting<FullscreenMode, true> fullscreen_mode{
#ifdef _WIN32
FullscreenMode::Borderless,
#else
FullscreenMode::Exclusive,
#endif
FullscreenMode::Borderless, FullscreenMode::Exclusive, "fullscreen_mode"};
- RangedSetting<int> aspect_ratio{0, 0, 3, "aspect_ratio"};
- RangedSetting<int> max_anisotropy{0, 0, 5, "max_anisotropy"};
- Setting<bool> use_speed_limit{true, "use_speed_limit"};
- RangedSetting<u16> speed_limit{100, 0, 9999, "speed_limit"};
- Setting<bool> use_disk_shader_cache{true, "use_disk_shader_cache"};
- RangedSetting<GPUAccuracy> gpu_accuracy{GPUAccuracy::High, GPUAccuracy::Normal,
- GPUAccuracy::Extreme, "gpu_accuracy"};
- Setting<bool> use_asynchronous_gpu_emulation{true, "use_asynchronous_gpu_emulation"};
- Setting<NvdecEmulation> nvdec_emulation{NvdecEmulation::GPU, "nvdec_emulation"};
- Setting<bool> accelerate_astc{true, "accelerate_astc"};
- Setting<bool> use_vsync{true, "use_vsync"};
- RangedSetting<u16> fps_cap{1000, 1, 1000, "fps_cap"};
- BasicSetting<bool> disable_fps_limit{false, "disable_fps_limit"};
- RangedSetting<ShaderBackend> shader_backend{ShaderBackend::GLASM, ShaderBackend::GLSL,
- ShaderBackend::SPIRV, "shader_backend"};
- Setting<bool> use_asynchronous_shaders{false, "use_asynchronous_shaders"};
- Setting<bool> use_fast_gpu_time{true, "use_fast_gpu_time"};
-
- Setting<u8> bg_red{0, "bg_red"};
- Setting<u8> bg_green{0, "bg_green"};
- Setting<u8> bg_blue{0, "bg_blue"};
+ SwitchableSetting<int, true> aspect_ratio{0, 0, 3, "aspect_ratio"};
+ SwitchableSetting<int, true> max_anisotropy{0, 0, 5, "max_anisotropy"};
+ SwitchableSetting<bool> use_speed_limit{true, "use_speed_limit"};
+ SwitchableSetting<u16, true> speed_limit{100, 0, 9999, "speed_limit"};
+ SwitchableSetting<bool> use_disk_shader_cache{true, "use_disk_shader_cache"};
+ SwitchableSetting<GPUAccuracy, true> gpu_accuracy{GPUAccuracy::High, GPUAccuracy::Normal,
+ GPUAccuracy::Extreme, "gpu_accuracy"};
+ SwitchableSetting<bool> use_asynchronous_gpu_emulation{true, "use_asynchronous_gpu_emulation"};
+ SwitchableSetting<NvdecEmulation> nvdec_emulation{NvdecEmulation::GPU, "nvdec_emulation"};
+ SwitchableSetting<bool> accelerate_astc{true, "accelerate_astc"};
+ SwitchableSetting<bool> use_vsync{true, "use_vsync"};
+ SwitchableSetting<ShaderBackend, true> shader_backend{ShaderBackend::GLASM, ShaderBackend::GLSL,
+ ShaderBackend::SPIRV, "shader_backend"};
+ SwitchableSetting<bool> use_asynchronous_shaders{false, "use_asynchronous_shaders"};
+ SwitchableSetting<bool> use_fast_gpu_time{true, "use_fast_gpu_time"};
+ SwitchableSetting<bool> use_pessimistic_flushes{false, "use_pessimistic_flushes"};
+
+ SwitchableSetting<u8> bg_red{0, "bg_red"};
+ SwitchableSetting<u8> bg_green{0, "bg_green"};
+ SwitchableSetting<u8> bg_blue{0, "bg_blue"};
// System
- Setting<std::optional<u32>> rng_seed{std::optional<u32>(), "rng_seed"};
+ SwitchableSetting<std::optional<u32>> rng_seed{std::optional<u32>(), "rng_seed"};
// Measured in seconds since epoch
std::optional<s64> custom_rtc;
// Set on game boot, reset on stop. Seconds difference between current time and `custom_rtc`
s64 custom_rtc_differential;
- BasicSetting<s32> current_user{0, "current_user"};
- RangedSetting<s32> language_index{1, 0, 17, "language_index"};
- RangedSetting<s32> region_index{1, 0, 6, "region_index"};
- RangedSetting<s32> time_zone_index{0, 0, 45, "time_zone_index"};
- RangedSetting<s32> sound_index{1, 0, 2, "sound_index"};
+ Setting<s32> current_user{0, "current_user"};
+ SwitchableSetting<s32, true> language_index{1, 0, 17, "language_index"};
+ SwitchableSetting<s32, true> region_index{1, 0, 6, "region_index"};
+ SwitchableSetting<s32, true> time_zone_index{0, 0, 45, "time_zone_index"};
+ SwitchableSetting<s32, true> sound_index{1, 0, 2, "sound_index"};
// Controls
InputSetting<std::array<PlayerInput, 10>> players;
- Setting<bool> use_docked_mode{true, "use_docked_mode"};
+ SwitchableSetting<bool> use_docked_mode{true, "use_docked_mode"};
- BasicSetting<bool> enable_raw_input{false, "enable_raw_input"};
- BasicSetting<bool> controller_navigation{true, "controller_navigation"};
+ Setting<bool> enable_raw_input{false, "enable_raw_input"};
+ Setting<bool> controller_navigation{true, "controller_navigation"};
- Setting<bool> vibration_enabled{true, "vibration_enabled"};
- Setting<bool> enable_accurate_vibrations{false, "enable_accurate_vibrations"};
+ SwitchableSetting<bool> vibration_enabled{true, "vibration_enabled"};
+ SwitchableSetting<bool> enable_accurate_vibrations{false, "enable_accurate_vibrations"};
- Setting<bool> motion_enabled{true, "motion_enabled"};
- BasicSetting<std::string> udp_input_servers{"127.0.0.1:26760", "udp_input_servers"};
- BasicSetting<bool> enable_udp_controller{false, "enable_udp_controller"};
+ SwitchableSetting<bool> motion_enabled{true, "motion_enabled"};
+ Setting<std::string> udp_input_servers{"127.0.0.1:26760", "udp_input_servers"};
+ Setting<bool> enable_udp_controller{false, "enable_udp_controller"};
- BasicSetting<bool> pause_tas_on_load{true, "pause_tas_on_load"};
- BasicSetting<bool> tas_enable{false, "tas_enable"};
- BasicSetting<bool> tas_loop{false, "tas_loop"};
+ Setting<bool> pause_tas_on_load{true, "pause_tas_on_load"};
+ Setting<bool> tas_enable{false, "tas_enable"};
+ Setting<bool> tas_loop{false, "tas_loop"};
- BasicSetting<bool> mouse_panning{false, "mouse_panning"};
- BasicRangedSetting<u8> mouse_panning_sensitivity{10, 1, 100, "mouse_panning_sensitivity"};
- BasicSetting<bool> mouse_enabled{false, "mouse_enabled"};
+ Setting<bool> mouse_panning{false, "mouse_panning"};
+ Setting<u8, true> mouse_panning_sensitivity{10, 1, 100, "mouse_panning_sensitivity"};
+ Setting<bool> mouse_enabled{false, "mouse_enabled"};
- BasicSetting<bool> emulate_analog_keyboard{false, "emulate_analog_keyboard"};
- BasicSetting<bool> keyboard_enabled{false, "keyboard_enabled"};
+ Setting<bool> emulate_analog_keyboard{false, "emulate_analog_keyboard"};
+ Setting<bool> keyboard_enabled{false, "keyboard_enabled"};
- BasicSetting<bool> debug_pad_enabled{false, "debug_pad_enabled"};
+ Setting<bool> debug_pad_enabled{false, "debug_pad_enabled"};
ButtonsRaw debug_pad_buttons;
AnalogsRaw debug_pad_analogs;
TouchscreenInput touchscreen;
- BasicSetting<std::string> touch_device{"min_x:100,min_y:50,max_x:1800,max_y:850",
- "touch_device"};
- BasicSetting<int> touch_from_button_map_index{0, "touch_from_button_map"};
+ Setting<std::string> touch_device{"min_x:100,min_y:50,max_x:1800,max_y:850", "touch_device"};
+ Setting<int> touch_from_button_map_index{0, "touch_from_button_map"};
std::vector<TouchFromButtonMap> touch_from_button_maps;
+ Setting<bool> enable_ring_controller{true, "enable_ring_controller"};
+ RingconRaw ringcon_analogs;
+
+ Setting<bool> enable_ir_sensor{false, "enable_ir_sensor"};
+ Setting<std::string> ir_sensor_device{"auto", "ir_sensor_device"};
+
// Data Storage
- BasicSetting<bool> use_virtual_sd{true, "use_virtual_sd"};
- BasicSetting<bool> gamecard_inserted{false, "gamecard_inserted"};
- BasicSetting<bool> gamecard_current_game{false, "gamecard_current_game"};
- BasicSetting<std::string> gamecard_path{std::string(), "gamecard_path"};
+ Setting<bool> use_virtual_sd{true, "use_virtual_sd"};
+ Setting<bool> gamecard_inserted{false, "gamecard_inserted"};
+ Setting<bool> gamecard_current_game{false, "gamecard_current_game"};
+ Setting<std::string> gamecard_path{std::string(), "gamecard_path"};
// Debugging
bool record_frame_times;
- BasicSetting<bool> use_gdbstub{false, "use_gdbstub"};
- BasicSetting<u16> gdbstub_port{0, "gdbstub_port"};
- BasicSetting<std::string> program_args{std::string(), "program_args"};
- BasicSetting<bool> dump_exefs{false, "dump_exefs"};
- BasicSetting<bool> dump_nso{false, "dump_nso"};
- BasicSetting<bool> dump_shaders{false, "dump_shaders"};
- BasicSetting<bool> enable_fs_access_log{false, "enable_fs_access_log"};
- BasicSetting<bool> reporting_services{false, "reporting_services"};
- BasicSetting<bool> quest_flag{false, "quest_flag"};
- BasicSetting<bool> disable_macro_jit{false, "disable_macro_jit"};
- BasicSetting<bool> extended_logging{false, "extended_logging"};
- BasicSetting<bool> use_debug_asserts{false, "use_debug_asserts"};
- BasicSetting<bool> use_auto_stub{false, "use_auto_stub"};
- BasicSetting<bool> enable_all_controllers{false, "enable_all_controllers"};
+ Setting<bool> use_gdbstub{false, "use_gdbstub"};
+ Setting<u16> gdbstub_port{6543, "gdbstub_port"};
+ Setting<std::string> program_args{std::string(), "program_args"};
+ Setting<bool> dump_exefs{false, "dump_exefs"};
+ Setting<bool> dump_nso{false, "dump_nso"};
+ Setting<bool> dump_shaders{false, "dump_shaders"};
+ Setting<bool> dump_macros{false, "dump_macros"};
+ Setting<bool> enable_fs_access_log{false, "enable_fs_access_log"};
+ Setting<bool> reporting_services{false, "reporting_services"};
+ Setting<bool> quest_flag{false, "quest_flag"};
+ Setting<bool> disable_macro_jit{false, "disable_macro_jit"};
+ Setting<bool> extended_logging{false, "extended_logging"};
+ Setting<bool> use_debug_asserts{false, "use_debug_asserts"};
+ Setting<bool> use_auto_stub{false, "use_auto_stub"};
+ Setting<bool> enable_all_controllers{false, "enable_all_controllers"};
+ Setting<bool> create_crash_dumps{false, "create_crash_dumps"};
+ Setting<bool> perform_vulkan_check{true, "perform_vulkan_check"};
// Miscellaneous
- BasicSetting<std::string> log_filter{"*:Info", "log_filter"};
- BasicSetting<bool> use_dev_keys{false, "use_dev_keys"};
+ Setting<std::string> log_filter{"*:Info", "log_filter"};
+ Setting<bool> use_dev_keys{false, "use_dev_keys"};
// Network
- BasicSetting<std::string> network_interface{std::string(), "network_interface"};
+ Setting<std::string> network_interface{std::string(), "network_interface"};
// WebService
- BasicSetting<bool> enable_telemetry{true, "enable_telemetry"};
- BasicSetting<std::string> web_api_url{"https://api.yuzu-emu.org", "web_api_url"};
- BasicSetting<std::string> yuzu_username{std::string(), "yuzu_username"};
- BasicSetting<std::string> yuzu_token{std::string(), "yuzu_token"};
+ Setting<bool> enable_telemetry{true, "enable_telemetry"};
+ Setting<std::string> web_api_url{"https://api.yuzu-emu.org", "web_api_url"};
+ Setting<std::string> yuzu_username{std::string(), "yuzu_username"};
+ Setting<std::string> yuzu_token{std::string(), "yuzu_token"};
// Add-Ons
std::map<u64, std::vector<std::string>> disabled_addons;
diff --git a/src/common/settings_input.cpp b/src/common/settings_input.cpp
index bea2b837b..0a6eea3cf 100644
--- a/src/common/settings_input.cpp
+++ b/src/common/settings_input.cpp
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/settings_input.h"
diff --git a/src/common/settings_input.h b/src/common/settings_input.h
index 4ff37e186..485e4ad22 100644
--- a/src/common/settings_input.h
+++ b/src/common/settings_input.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -357,6 +356,7 @@ constexpr int NUM_KEYBOARD_MODS_HID = NumKeyboardMods;
using AnalogsRaw = std::array<std::string, NativeAnalog::NumAnalogs>;
using ButtonsRaw = std::array<std::string, NativeButton::NumButtons>;
using MotionsRaw = std::array<std::string, NativeMotion::NumMotions>;
+using RingconRaw = std::string;
constexpr u32 JOYCON_BODY_NEON_RED = 0xFF3C28;
constexpr u32 JOYCON_BUTTONS_NEON_RED = 0x1E0A0A;
diff --git a/src/common/socket_types.h b/src/common/socket_types.h
new file mode 100644
index 000000000..0a801a443
--- /dev/null
+++ b/src/common/socket_types.h
@@ -0,0 +1,51 @@
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#pragma once
+
+#include "common/common_types.h"
+
+namespace Network {
+
+/// Address families
+enum class Domain : u8 {
+ INET, ///< Address family for IPv4
+};
+
+/// Socket types
+enum class Type {
+ STREAM,
+ DGRAM,
+ RAW,
+ SEQPACKET,
+};
+
+/// Protocol values for sockets
+enum class Protocol : u8 {
+ ICMP,
+ TCP,
+ UDP,
+};
+
+/// Shutdown mode
+enum class ShutdownHow {
+ RD,
+ WR,
+ RDWR,
+};
+
+/// Array of IPv4 address
+using IPv4Address = std::array<u8, 4>;
+
+/// Cross-platform sockaddr structure
+struct SockAddrIn {
+ Domain family;
+ IPv4Address ip;
+ u16 portno;
+};
+
+constexpr u32 FLAG_MSG_PEEK = 0x2;
+constexpr u32 FLAG_MSG_DONTWAIT = 0x80;
+constexpr u32 FLAG_O_NONBLOCK = 0x800;
+
+} // namespace Network
diff --git a/src/common/spin_lock.cpp b/src/common/spin_lock.cpp
index c1524220f..b2ef4ea1d 100644
--- a/src/common/spin_lock.cpp
+++ b/src/common/spin_lock.cpp
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/spin_lock.h"
diff --git a/src/common/spin_lock.h b/src/common/spin_lock.h
index 06ac2f5bb..a83274851 100644
--- a/src/common/spin_lock.h
+++ b/src/common/spin_lock.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/stream.cpp b/src/common/stream.cpp
index bf0496c26..80ddd68c8 100644
--- a/src/common/stream.cpp
+++ b/src/common/stream.cpp
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <stdexcept>
#include "common/common_types.h"
diff --git a/src/common/stream.h b/src/common/stream.h
index 0e40692de..5bb26e883 100644
--- a/src/common/stream.h
+++ b/src/common/stream.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/string_util.cpp b/src/common/string_util.cpp
index 662171138..7a495bc79 100644
--- a/src/common/string_util.cpp
+++ b/src/common/string_util.cpp
@@ -1,15 +1,13 @@
-// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2013 Dolphin Emulator Project
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <cctype>
#include <codecvt>
-#include <cstdlib>
#include <locale>
#include <sstream>
-#include "common/logging/log.h"
#include "common/string_util.h"
#ifdef _WIN32
@@ -180,6 +178,10 @@ std::wstring UTF8ToUTF16W(const std::string& input) {
#endif
+std::u16string U16StringFromBuffer(const u16* input, std::size_t length) {
+ return std::u16string(reinterpret_cast<const char16_t*>(input), length);
+}
+
std::string StringFromFixedZeroTerminatedBuffer(std::string_view buffer, std::size_t max_len) {
std::size_t len = 0;
while (len < buffer.length() && len < max_len && buffer[len] != '\0') {
diff --git a/src/common/string_util.h b/src/common/string_util.h
index f0dd632ee..ce18a33cf 100644
--- a/src/common/string_util.h
+++ b/src/common/string_util.h
@@ -1,6 +1,6 @@
-// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2013 Dolphin Emulator Project
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -44,6 +44,8 @@ bool SplitPath(const std::string& full_path, std::string* _pPath, std::string* _
#endif
+[[nodiscard]] std::u16string U16StringFromBuffer(const u16* input, std::size_t length);
+
/**
* Compares the string defined by the range [`begin`, `end`) to the null-terminated C-string
* `other` for equality.
diff --git a/src/common/swap.h b/src/common/swap.h
index a80e191dc..037b82781 100644
--- a/src/common/swap.h
+++ b/src/common/swap.h
@@ -1,16 +1,6 @@
-// Copyright (c) 2012- PPSSPP Project / Dolphin Project.
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, version 2.0 or later versions.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License 2.0 for more details.
-
-// A copy of the GPL 2.0 should have been included with the program.
-// If not, see http://www.gnu.org/licenses/
+// SPDX-FileCopyrightText: 2012 PPSSPP Project
+// SPDX-FileCopyrightText: 2012 Dolphin Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
// Official git repository and contact information can be found at
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
diff --git a/src/common/telemetry.cpp b/src/common/telemetry.cpp
index 6241d08b3..d26394359 100644
--- a/src/common/telemetry.cpp
+++ b/src/common/telemetry.cpp
@@ -1,10 +1,8 @@
-// Copyright 2017 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2017 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <cstring>
-#include "common/assert.h"
#include "common/scm_rev.h"
#include "common/telemetry.h"
@@ -55,22 +53,50 @@ void AppendBuildInfo(FieldCollection& fc) {
void AppendCPUInfo(FieldCollection& fc) {
#ifdef ARCHITECTURE_x86_64
- fc.AddField(FieldType::UserSystem, "CPU_Model", Common::GetCPUCaps().cpu_string);
- fc.AddField(FieldType::UserSystem, "CPU_BrandString", Common::GetCPUCaps().brand_string);
- fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_AES", Common::GetCPUCaps().aes);
- fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_AVX", Common::GetCPUCaps().avx);
- fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_AVX2", Common::GetCPUCaps().avx2);
- fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_AVX512", Common::GetCPUCaps().avx512);
- fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_BMI1", Common::GetCPUCaps().bmi1);
- fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_BMI2", Common::GetCPUCaps().bmi2);
- fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_FMA", Common::GetCPUCaps().fma);
- fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_FMA4", Common::GetCPUCaps().fma4);
- fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_SSE", Common::GetCPUCaps().sse);
- fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_SSE2", Common::GetCPUCaps().sse2);
- fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_SSE3", Common::GetCPUCaps().sse3);
- fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_SSSE3", Common::GetCPUCaps().ssse3);
- fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_SSE41", Common::GetCPUCaps().sse4_1);
- fc.AddField(FieldType::UserSystem, "CPU_Extension_x64_SSE42", Common::GetCPUCaps().sse4_2);
+
+ const auto& caps = Common::GetCPUCaps();
+ const auto add_field = [&fc](std::string_view field_name, const auto& field_value) {
+ fc.AddField(FieldType::UserSystem, field_name, field_value);
+ };
+ add_field("CPU_Model", caps.cpu_string);
+ add_field("CPU_BrandString", caps.brand_string);
+
+ add_field("CPU_Extension_x64_SSE", caps.sse);
+ add_field("CPU_Extension_x64_SSE2", caps.sse2);
+ add_field("CPU_Extension_x64_SSE3", caps.sse3);
+ add_field("CPU_Extension_x64_SSSE3", caps.ssse3);
+ add_field("CPU_Extension_x64_SSE41", caps.sse4_1);
+ add_field("CPU_Extension_x64_SSE42", caps.sse4_2);
+
+ add_field("CPU_Extension_x64_AVX", caps.avx);
+ add_field("CPU_Extension_x64_AVX_VNNI", caps.avx_vnni);
+ add_field("CPU_Extension_x64_AVX2", caps.avx2);
+
+ // Skylake-X/SP level AVX512, for compatibility with the previous telemetry field
+ add_field("CPU_Extension_x64_AVX512",
+ caps.avx512f && caps.avx512cd && caps.avx512vl && caps.avx512dq && caps.avx512bw);
+
+ add_field("CPU_Extension_x64_AVX512F", caps.avx512f);
+ add_field("CPU_Extension_x64_AVX512CD", caps.avx512cd);
+ add_field("CPU_Extension_x64_AVX512VL", caps.avx512vl);
+ add_field("CPU_Extension_x64_AVX512DQ", caps.avx512dq);
+ add_field("CPU_Extension_x64_AVX512BW", caps.avx512bw);
+ add_field("CPU_Extension_x64_AVX512BITALG", caps.avx512bitalg);
+ add_field("CPU_Extension_x64_AVX512VBMI", caps.avx512vbmi);
+
+ add_field("CPU_Extension_x64_AES", caps.aes);
+ add_field("CPU_Extension_x64_BMI1", caps.bmi1);
+ add_field("CPU_Extension_x64_BMI2", caps.bmi2);
+ add_field("CPU_Extension_x64_F16C", caps.f16c);
+ add_field("CPU_Extension_x64_FMA", caps.fma);
+ add_field("CPU_Extension_x64_FMA4", caps.fma4);
+ add_field("CPU_Extension_x64_GFNI", caps.gfni);
+ add_field("CPU_Extension_x64_INVARIANT_TSC", caps.invariant_tsc);
+ add_field("CPU_Extension_x64_LZCNT", caps.lzcnt);
+ add_field("CPU_Extension_x64_MOVBE", caps.movbe);
+ add_field("CPU_Extension_x64_PCLMULQDQ", caps.pclmulqdq);
+ add_field("CPU_Extension_x64_POPCNT", caps.popcnt);
+ add_field("CPU_Extension_x64_SHA", caps.sha);
#else
fc.AddField(FieldType::UserSystem, "CPU_Model", "Other");
#endif
diff --git a/src/common/telemetry.h b/src/common/telemetry.h
index d38aeac99..ba633d5a5 100644
--- a/src/common/telemetry.h
+++ b/src/common/telemetry.h
@@ -1,6 +1,5 @@
-// Copyright 2017 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2017 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -55,8 +54,8 @@ class Field : public FieldInterface {
public:
YUZU_NON_COPYABLE(Field);
- Field(FieldType type_, std::string name_, T value_)
- : name(std::move(name_)), type(type_), value(std::move(value_)) {}
+ Field(FieldType type_, std::string_view name_, T value_)
+ : name(name_), type(type_), value(std::move(value_)) {}
~Field() override = default;
@@ -123,7 +122,7 @@ public:
* @param value Value for the field to add.
*/
template <typename T>
- void AddField(FieldType type, const char* name, T value) {
+ void AddField(FieldType type, std::string_view name, T value) {
return AddField(std::make_unique<Field<T>>(type, name, std::move(value)));
}
@@ -171,6 +170,9 @@ struct VisitorInterface {
struct NullVisitor final : public VisitorInterface {
YUZU_NON_COPYABLE(NullVisitor);
+ NullVisitor() = default;
+ ~NullVisitor() override = default;
+
void Visit(const Field<bool>& /*field*/) override {}
void Visit(const Field<double>& /*field*/) override {}
void Visit(const Field<float>& /*field*/) override {}
diff --git a/src/common/thread.cpp b/src/common/thread.cpp
index 946a1114d..919e33af9 100644
--- a/src/common/thread.cpp
+++ b/src/common/thread.cpp
@@ -1,6 +1,6 @@
-// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2013 Dolphin Emulator Project
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <string>
@@ -47,6 +47,9 @@ void SetCurrentThreadPriority(ThreadPriority new_priority) {
case ThreadPriority::VeryHigh:
windows_priority = THREAD_PRIORITY_HIGHEST;
break;
+ case ThreadPriority::Critical:
+ windows_priority = THREAD_PRIORITY_TIME_CRITICAL;
+ break;
default:
windows_priority = THREAD_PRIORITY_NORMAL;
break;
@@ -59,9 +62,10 @@ void SetCurrentThreadPriority(ThreadPriority new_priority) {
void SetCurrentThreadPriority(ThreadPriority new_priority) {
pthread_t this_thread = pthread_self();
- s32 max_prio = sched_get_priority_max(SCHED_OTHER);
- s32 min_prio = sched_get_priority_min(SCHED_OTHER);
- u32 level = static_cast<u32>(new_priority) + 1;
+ const auto scheduling_type = SCHED_OTHER;
+ s32 max_prio = sched_get_priority_max(scheduling_type);
+ s32 min_prio = sched_get_priority_min(scheduling_type);
+ u32 level = std::max(static_cast<u32>(new_priority) + 1, 4U);
struct sched_param params;
if (max_prio > min_prio) {
@@ -70,7 +74,7 @@ void SetCurrentThreadPriority(ThreadPriority new_priority) {
params.sched_priority = min_prio - ((min_prio - max_prio) * level) / 4;
}
- pthread_setschedparam(this_thread, SCHED_OTHER, &params);
+ pthread_setschedparam(this_thread, scheduling_type, &params);
}
#endif
diff --git a/src/common/thread.h b/src/common/thread.h
index a8c17c71a..e17a7850f 100644
--- a/src/common/thread.h
+++ b/src/common/thread.h
@@ -1,6 +1,6 @@
-// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2013 Dolphin Emulator Project
+// SPDX-FileCopyrightText: 2014 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -17,7 +17,7 @@ namespace Common {
class Event {
public:
void Set() {
- std::lock_guard lk{mutex};
+ std::scoped_lock lk{mutex};
if (!is_set) {
is_set = true;
condvar.notify_one();
@@ -54,6 +54,10 @@ public:
is_set = false;
}
+ [[nodiscard]] bool IsSet() {
+ return is_set;
+ }
+
private:
std::condition_variable condvar;
std::mutex mutex;
@@ -92,6 +96,7 @@ enum class ThreadPriority : u32 {
Normal = 1,
High = 2,
VeryHigh = 3,
+ Critical = 4,
};
void SetCurrentThreadPriority(ThreadPriority new_priority);
diff --git a/src/common/thread_queue_list.h b/src/common/thread_queue_list.h
index def9e5d8d..ce48cec92 100644
--- a/src/common/thread_queue_list.h
+++ b/src/common/thread_queue_list.h
@@ -1,6 +1,6 @@
-// Copyright 2014 Citra Emulator Project / PPSSPP Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2012 PPSSPP Project
+// SPDX-FileCopyrightText: 2014 Dolphin Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/thread_worker.h b/src/common/thread_worker.h
index cd0017726..62c60f724 100644
--- a/src/common/thread_worker.h
+++ b/src/common/thread_worker.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/threadsafe_queue.h b/src/common/threadsafe_queue.h
index 2c8c2b90e..053798e79 100644
--- a/src/common/threadsafe_queue.h
+++ b/src/common/threadsafe_queue.h
@@ -1,6 +1,5 @@
-// Copyright 2010 Dolphin Emulator Project
-// Licensed under GPLv2+
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2010 Dolphin Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
@@ -40,7 +39,7 @@ public:
template <typename Arg>
void Push(Arg&& t) {
// create the element, add it to the queue
- write_ptr->current = std::forward<Arg>(t);
+ write_ptr->current = std::move(t);
// set the next pointer to a new element ptr
// then advance the write pointer
ElementPtr* new_ptr = new ElementPtr();
@@ -52,7 +51,7 @@ public:
// line before cv.wait
// TODO(bunnei): This can be replaced with C++20 waitable atomics when properly supported.
// See discussion on https://github.com/yuzu-emu/yuzu/pull/3173 for details.
- std::lock_guard lock{cv_mutex};
+ std::scoped_lock lock{cv_mutex};
cv.notify_one();
}
@@ -159,7 +158,7 @@ public:
template <typename Arg>
void Push(Arg&& t) {
- std::lock_guard lock{write_lock};
+ std::scoped_lock lock{write_lock};
spsc_queue.Push(t);
}
diff --git a/src/common/time_zone.cpp b/src/common/time_zone.cpp
index ce239eb63..126836b01 100644
--- a/src/common/time_zone.cpp
+++ b/src/common/time_zone.cpp
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <chrono>
#include <iomanip>
diff --git a/src/common/time_zone.h b/src/common/time_zone.h
index 9f5939ca5..99cae6ef2 100644
--- a/src/common/time_zone.h
+++ b/src/common/time_zone.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/tiny_mt.h b/src/common/tiny_mt.h
index 19ae5b7d6..5d5ebf158 100644
--- a/src/common/tiny_mt.h
+++ b/src/common/tiny_mt.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/tree.h b/src/common/tree.h
index 18faa4a48..f77859209 100644
--- a/src/common/tree.h
+++ b/src/common/tree.h
@@ -1,32 +1,10 @@
+// SPDX-FileCopyrightText: 2002 Niels Provos <provos@citi.umich.edu>
+// SPDX-License-Identifier: BSD-2-Clause
+
/* $NetBSD: tree.h,v 1.8 2004/03/28 19:38:30 provos Exp $ */
/* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */
/* $FreeBSD$ */
-/*-
- * Copyright 2002 Niels Provos <provos@citi.umich.edu>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
#pragma once
/*
@@ -43,294 +21,265 @@
* The maximum height of a red-black tree is 2lg (n+1).
*/
-#include "common/assert.h"
+namespace Common::freebsd {
+
+enum class RBColor {
+ RB_BLACK = 0,
+ RB_RED = 1,
+};
-namespace Common {
+#pragma pack(push, 4)
template <typename T>
-class RBHead {
+class RBEntry {
public:
- [[nodiscard]] T* Root() {
- return rbh_root;
- }
+ constexpr RBEntry() = default;
- [[nodiscard]] const T* Root() const {
- return rbh_root;
+ [[nodiscard]] constexpr T* Left() {
+ return m_rbe_left;
}
-
- void SetRoot(T* root) {
- rbh_root = root;
+ [[nodiscard]] constexpr const T* Left() const {
+ return m_rbe_left;
}
- [[nodiscard]] bool IsEmpty() const {
- return Root() == nullptr;
+ constexpr void SetLeft(T* e) {
+ m_rbe_left = e;
}
-private:
- T* rbh_root = nullptr;
-};
-
-enum class EntryColor {
- Black,
- Red,
-};
-
-template <typename T>
-class RBEntry {
-public:
- [[nodiscard]] T* Left() {
- return rbe_left;
+ [[nodiscard]] constexpr T* Right() {
+ return m_rbe_right;
}
-
- [[nodiscard]] const T* Left() const {
- return rbe_left;
+ [[nodiscard]] constexpr const T* Right() const {
+ return m_rbe_right;
}
- void SetLeft(T* left) {
- rbe_left = left;
+ constexpr void SetRight(T* e) {
+ m_rbe_right = e;
}
- [[nodiscard]] T* Right() {
- return rbe_right;
+ [[nodiscard]] constexpr T* Parent() {
+ return m_rbe_parent;
}
-
- [[nodiscard]] const T* Right() const {
- return rbe_right;
+ [[nodiscard]] constexpr const T* Parent() const {
+ return m_rbe_parent;
}
- void SetRight(T* right) {
- rbe_right = right;
+ constexpr void SetParent(T* e) {
+ m_rbe_parent = e;
}
- [[nodiscard]] T* Parent() {
- return rbe_parent;
+ [[nodiscard]] constexpr bool IsBlack() const {
+ return m_rbe_color == RBColor::RB_BLACK;
}
-
- [[nodiscard]] const T* Parent() const {
- return rbe_parent;
+ [[nodiscard]] constexpr bool IsRed() const {
+ return m_rbe_color == RBColor::RB_RED;
}
-
- void SetParent(T* parent) {
- rbe_parent = parent;
+ [[nodiscard]] constexpr RBColor Color() const {
+ return m_rbe_color;
}
- [[nodiscard]] bool IsBlack() const {
- return rbe_color == EntryColor::Black;
+ constexpr void SetColor(RBColor c) {
+ m_rbe_color = c;
}
- [[nodiscard]] bool IsRed() const {
- return rbe_color == EntryColor::Red;
- }
+private:
+ T* m_rbe_left{};
+ T* m_rbe_right{};
+ T* m_rbe_parent{};
+ RBColor m_rbe_color{RBColor::RB_BLACK};
+};
+#pragma pack(pop)
- [[nodiscard]] EntryColor Color() const {
- return rbe_color;
- }
+template <typename T>
+struct CheckRBEntry {
+ static constexpr bool value = false;
+};
+template <typename T>
+struct CheckRBEntry<RBEntry<T>> {
+ static constexpr bool value = true;
+};
- void SetColor(EntryColor color) {
- rbe_color = color;
- }
+template <typename T>
+concept IsRBEntry = CheckRBEntry<T>::value;
+
+template <typename T>
+concept HasRBEntry = requires(T& t, const T& ct) {
+ { t.GetRBEntry() } -> std::same_as<RBEntry<T>&>;
+ { ct.GetRBEntry() } -> std::same_as<const RBEntry<T>&>;
+};
+template <typename T>
+requires HasRBEntry<T>
+class RBHead {
private:
- T* rbe_left = nullptr;
- T* rbe_right = nullptr;
- T* rbe_parent = nullptr;
- EntryColor rbe_color{};
+ T* m_rbh_root = nullptr;
+
+public:
+ [[nodiscard]] constexpr T* Root() {
+ return m_rbh_root;
+ }
+ [[nodiscard]] constexpr const T* Root() const {
+ return m_rbh_root;
+ }
+ constexpr void SetRoot(T* root) {
+ m_rbh_root = root;
+ }
+
+ [[nodiscard]] constexpr bool IsEmpty() const {
+ return this->Root() == nullptr;
+ }
};
-template <typename Node>
-[[nodiscard]] RBEntry<Node>& RB_ENTRY(Node* node) {
- return node->GetEntry();
+template <typename T>
+requires HasRBEntry<T>
+[[nodiscard]] constexpr RBEntry<T>& RB_ENTRY(T* t) {
+ return t->GetRBEntry();
}
-
-template <typename Node>
-[[nodiscard]] const RBEntry<Node>& RB_ENTRY(const Node* node) {
- return node->GetEntry();
+template <typename T>
+requires HasRBEntry<T>
+[[nodiscard]] constexpr const RBEntry<T>& RB_ENTRY(const T* t) {
+ return t->GetRBEntry();
}
-template <typename Node>
-[[nodiscard]] Node* RB_PARENT(Node* node) {
- return RB_ENTRY(node).Parent();
+template <typename T>
+requires HasRBEntry<T>
+[[nodiscard]] constexpr T* RB_LEFT(T* t) {
+ return RB_ENTRY(t).Left();
}
-
-template <typename Node>
-[[nodiscard]] const Node* RB_PARENT(const Node* node) {
- return RB_ENTRY(node).Parent();
+template <typename T>
+requires HasRBEntry<T>
+[[nodiscard]] constexpr const T* RB_LEFT(const T* t) {
+ return RB_ENTRY(t).Left();
}
-template <typename Node>
-void RB_SET_PARENT(Node* node, Node* parent) {
- return RB_ENTRY(node).SetParent(parent);
+template <typename T>
+requires HasRBEntry<T>
+[[nodiscard]] constexpr T* RB_RIGHT(T* t) {
+ return RB_ENTRY(t).Right();
}
-
-template <typename Node>
-[[nodiscard]] Node* RB_LEFT(Node* node) {
- return RB_ENTRY(node).Left();
+template <typename T>
+requires HasRBEntry<T>
+[[nodiscard]] constexpr const T* RB_RIGHT(const T* t) {
+ return RB_ENTRY(t).Right();
}
-template <typename Node>
-[[nodiscard]] const Node* RB_LEFT(const Node* node) {
- return RB_ENTRY(node).Left();
+template <typename T>
+requires HasRBEntry<T>
+[[nodiscard]] constexpr T* RB_PARENT(T* t) {
+ return RB_ENTRY(t).Parent();
}
-
-template <typename Node>
-void RB_SET_LEFT(Node* node, Node* left) {
- return RB_ENTRY(node).SetLeft(left);
+template <typename T>
+requires HasRBEntry<T>
+[[nodiscard]] constexpr const T* RB_PARENT(const T* t) {
+ return RB_ENTRY(t).Parent();
}
-template <typename Node>
-[[nodiscard]] Node* RB_RIGHT(Node* node) {
- return RB_ENTRY(node).Right();
+template <typename T>
+requires HasRBEntry<T>
+constexpr void RB_SET_LEFT(T* t, T* e) {
+ RB_ENTRY(t).SetLeft(e);
}
-
-template <typename Node>
-[[nodiscard]] const Node* RB_RIGHT(const Node* node) {
- return RB_ENTRY(node).Right();
+template <typename T>
+requires HasRBEntry<T>
+constexpr void RB_SET_RIGHT(T* t, T* e) {
+ RB_ENTRY(t).SetRight(e);
}
-
-template <typename Node>
-void RB_SET_RIGHT(Node* node, Node* right) {
- return RB_ENTRY(node).SetRight(right);
+template <typename T>
+requires HasRBEntry<T>
+constexpr void RB_SET_PARENT(T* t, T* e) {
+ RB_ENTRY(t).SetParent(e);
}
-template <typename Node>
-[[nodiscard]] bool RB_IS_BLACK(const Node* node) {
- return RB_ENTRY(node).IsBlack();
+template <typename T>
+requires HasRBEntry<T>
+[[nodiscard]] constexpr bool RB_IS_BLACK(const T* t) {
+ return RB_ENTRY(t).IsBlack();
}
-
-template <typename Node>
-[[nodiscard]] bool RB_IS_RED(const Node* node) {
- return RB_ENTRY(node).IsRed();
+template <typename T>
+requires HasRBEntry<T>
+[[nodiscard]] constexpr bool RB_IS_RED(const T* t) {
+ return RB_ENTRY(t).IsRed();
}
-template <typename Node>
-[[nodiscard]] EntryColor RB_COLOR(const Node* node) {
- return RB_ENTRY(node).Color();
+template <typename T>
+requires HasRBEntry<T>
+[[nodiscard]] constexpr RBColor RB_COLOR(const T* t) {
+ return RB_ENTRY(t).Color();
}
-template <typename Node>
-void RB_SET_COLOR(Node* node, EntryColor color) {
- return RB_ENTRY(node).SetColor(color);
+template <typename T>
+requires HasRBEntry<T>
+constexpr void RB_SET_COLOR(T* t, RBColor c) {
+ RB_ENTRY(t).SetColor(c);
}
-template <typename Node>
-void RB_SET(Node* node, Node* parent) {
- auto& entry = RB_ENTRY(node);
- entry.SetParent(parent);
- entry.SetLeft(nullptr);
- entry.SetRight(nullptr);
- entry.SetColor(EntryColor::Red);
+template <typename T>
+requires HasRBEntry<T>
+constexpr void RB_SET(T* elm, T* parent) {
+ auto& rb_entry = RB_ENTRY(elm);
+ rb_entry.SetParent(parent);
+ rb_entry.SetLeft(nullptr);
+ rb_entry.SetRight(nullptr);
+ rb_entry.SetColor(RBColor::RB_RED);
}
-template <typename Node>
-void RB_SET_BLACKRED(Node* black, Node* red) {
- RB_SET_COLOR(black, EntryColor::Black);
- RB_SET_COLOR(red, EntryColor::Red);
+template <typename T>
+requires HasRBEntry<T>
+constexpr void RB_SET_BLACKRED(T* black, T* red) {
+ RB_SET_COLOR(black, RBColor::RB_BLACK);
+ RB_SET_COLOR(red, RBColor::RB_RED);
}
-template <typename Node>
-void RB_ROTATE_LEFT(RBHead<Node>* head, Node* elm, Node*& tmp) {
+template <typename T>
+requires HasRBEntry<T>
+constexpr void RB_ROTATE_LEFT(RBHead<T>& head, T* elm, T*& tmp) {
tmp = RB_RIGHT(elm);
- RB_SET_RIGHT(elm, RB_LEFT(tmp));
- if (RB_RIGHT(elm) != nullptr) {
+ if (RB_SET_RIGHT(elm, RB_LEFT(tmp)); RB_RIGHT(elm) != nullptr) {
RB_SET_PARENT(RB_LEFT(tmp), elm);
}
- RB_SET_PARENT(tmp, RB_PARENT(elm));
- if (RB_PARENT(tmp) != nullptr) {
+ if (RB_SET_PARENT(tmp, RB_PARENT(elm)); RB_PARENT(tmp) != nullptr) {
if (elm == RB_LEFT(RB_PARENT(elm))) {
RB_SET_LEFT(RB_PARENT(elm), tmp);
} else {
RB_SET_RIGHT(RB_PARENT(elm), tmp);
}
} else {
- head->SetRoot(tmp);
+ head.SetRoot(tmp);
}
RB_SET_LEFT(tmp, elm);
RB_SET_PARENT(elm, tmp);
}
-template <typename Node>
-void RB_ROTATE_RIGHT(RBHead<Node>* head, Node* elm, Node*& tmp) {
+template <typename T>
+requires HasRBEntry<T>
+constexpr void RB_ROTATE_RIGHT(RBHead<T>& head, T* elm, T*& tmp) {
tmp = RB_LEFT(elm);
- RB_SET_LEFT(elm, RB_RIGHT(tmp));
- if (RB_LEFT(elm) != nullptr) {
+ if (RB_SET_LEFT(elm, RB_RIGHT(tmp)); RB_LEFT(elm) != nullptr) {
RB_SET_PARENT(RB_RIGHT(tmp), elm);
}
- RB_SET_PARENT(tmp, RB_PARENT(elm));
- if (RB_PARENT(tmp) != nullptr) {
+ if (RB_SET_PARENT(tmp, RB_PARENT(elm)); RB_PARENT(tmp) != nullptr) {
if (elm == RB_LEFT(RB_PARENT(elm))) {
RB_SET_LEFT(RB_PARENT(elm), tmp);
} else {
RB_SET_RIGHT(RB_PARENT(elm), tmp);
}
} else {
- head->SetRoot(tmp);
+ head.SetRoot(tmp);
}
RB_SET_RIGHT(tmp, elm);
RB_SET_PARENT(elm, tmp);
}
-template <typename Node>
-void RB_INSERT_COLOR(RBHead<Node>* head, Node* elm) {
- Node* parent = nullptr;
- Node* tmp = nullptr;
-
- while ((parent = RB_PARENT(elm)) != nullptr && RB_IS_RED(parent)) {
- Node* gparent = RB_PARENT(parent);
- if (parent == RB_LEFT(gparent)) {
- tmp = RB_RIGHT(gparent);
- if (tmp && RB_IS_RED(tmp)) {
- RB_SET_COLOR(tmp, EntryColor::Black);
- RB_SET_BLACKRED(parent, gparent);
- elm = gparent;
- continue;
- }
-
- if (RB_RIGHT(parent) == elm) {
- RB_ROTATE_LEFT(head, parent, tmp);
- tmp = parent;
- parent = elm;
- elm = tmp;
- }
-
- RB_SET_BLACKRED(parent, gparent);
- RB_ROTATE_RIGHT(head, gparent, tmp);
- } else {
- tmp = RB_LEFT(gparent);
- if (tmp && RB_IS_RED(tmp)) {
- RB_SET_COLOR(tmp, EntryColor::Black);
- RB_SET_BLACKRED(parent, gparent);
- elm = gparent;
- continue;
- }
-
- if (RB_LEFT(parent) == elm) {
- RB_ROTATE_RIGHT(head, parent, tmp);
- tmp = parent;
- parent = elm;
- elm = tmp;
- }
-
- RB_SET_BLACKRED(parent, gparent);
- RB_ROTATE_LEFT(head, gparent, tmp);
- }
- }
-
- RB_SET_COLOR(head->Root(), EntryColor::Black);
-}
-
-template <typename Node>
-void RB_REMOVE_COLOR(RBHead<Node>* head, Node* parent, Node* elm) {
- Node* tmp;
- while ((elm == nullptr || RB_IS_BLACK(elm)) && elm != head->Root() && parent != nullptr) {
+template <typename T>
+requires HasRBEntry<T>
+constexpr void RB_REMOVE_COLOR(RBHead<T>& head, T* parent, T* elm) {
+ T* tmp;
+ while ((elm == nullptr || RB_IS_BLACK(elm)) && elm != head.Root()) {
if (RB_LEFT(parent) == elm) {
tmp = RB_RIGHT(parent);
- if (!tmp) {
- ASSERT_MSG(false, "tmp is invalid!");
- break;
- }
if (RB_IS_RED(tmp)) {
RB_SET_BLACKRED(tmp, parent);
RB_ROTATE_LEFT(head, parent, tmp);
@@ -339,29 +288,29 @@ void RB_REMOVE_COLOR(RBHead<Node>* head, Node* parent, Node* elm) {
if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) &&
(RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) {
- RB_SET_COLOR(tmp, EntryColor::Red);
+ RB_SET_COLOR(tmp, RBColor::RB_RED);
elm = parent;
parent = RB_PARENT(elm);
} else {
if (RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp))) {
- Node* oleft;
+ T* oleft;
if ((oleft = RB_LEFT(tmp)) != nullptr) {
- RB_SET_COLOR(oleft, EntryColor::Black);
+ RB_SET_COLOR(oleft, RBColor::RB_BLACK);
}
- RB_SET_COLOR(tmp, EntryColor::Red);
+ RB_SET_COLOR(tmp, RBColor::RB_RED);
RB_ROTATE_RIGHT(head, tmp, oleft);
tmp = RB_RIGHT(parent);
}
RB_SET_COLOR(tmp, RB_COLOR(parent));
- RB_SET_COLOR(parent, EntryColor::Black);
+ RB_SET_COLOR(parent, RBColor::RB_BLACK);
if (RB_RIGHT(tmp)) {
- RB_SET_COLOR(RB_RIGHT(tmp), EntryColor::Black);
+ RB_SET_COLOR(RB_RIGHT(tmp), RBColor::RB_BLACK);
}
RB_ROTATE_LEFT(head, parent, tmp);
- elm = head->Root();
+ elm = head.Root();
break;
}
} else {
@@ -372,68 +321,56 @@ void RB_REMOVE_COLOR(RBHead<Node>* head, Node* parent, Node* elm) {
tmp = RB_LEFT(parent);
}
- if (!tmp) {
- ASSERT_MSG(false, "tmp is invalid!");
- break;
- }
-
if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) &&
(RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) {
- RB_SET_COLOR(tmp, EntryColor::Red);
+ RB_SET_COLOR(tmp, RBColor::RB_RED);
elm = parent;
parent = RB_PARENT(elm);
} else {
if (RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) {
- Node* oright;
+ T* oright;
if ((oright = RB_RIGHT(tmp)) != nullptr) {
- RB_SET_COLOR(oright, EntryColor::Black);
+ RB_SET_COLOR(oright, RBColor::RB_BLACK);
}
- RB_SET_COLOR(tmp, EntryColor::Red);
+ RB_SET_COLOR(tmp, RBColor::RB_RED);
RB_ROTATE_LEFT(head, tmp, oright);
tmp = RB_LEFT(parent);
}
RB_SET_COLOR(tmp, RB_COLOR(parent));
- RB_SET_COLOR(parent, EntryColor::Black);
+ RB_SET_COLOR(parent, RBColor::RB_BLACK);
if (RB_LEFT(tmp)) {
- RB_SET_COLOR(RB_LEFT(tmp), EntryColor::Black);
+ RB_SET_COLOR(RB_LEFT(tmp), RBColor::RB_BLACK);
}
RB_ROTATE_RIGHT(head, parent, tmp);
- elm = head->Root();
+ elm = head.Root();
break;
}
}
}
if (elm) {
- RB_SET_COLOR(elm, EntryColor::Black);
+ RB_SET_COLOR(elm, RBColor::RB_BLACK);
}
}
-template <typename Node>
-Node* RB_REMOVE(RBHead<Node>* head, Node* elm) {
- Node* child = nullptr;
- Node* parent = nullptr;
- Node* old = elm;
- EntryColor color{};
-
- const auto finalize = [&] {
- if (color == EntryColor::Black) {
- RB_REMOVE_COLOR(head, parent, child);
- }
-
- return old;
- };
+template <typename T>
+requires HasRBEntry<T>
+constexpr T* RB_REMOVE(RBHead<T>& head, T* elm) {
+ T* child = nullptr;
+ T* parent = nullptr;
+ T* old = elm;
+ RBColor color = RBColor::RB_BLACK;
if (RB_LEFT(elm) == nullptr) {
child = RB_RIGHT(elm);
} else if (RB_RIGHT(elm) == nullptr) {
child = RB_LEFT(elm);
} else {
- Node* left;
+ T* left;
elm = RB_RIGHT(elm);
while ((left = RB_LEFT(elm)) != nullptr) {
elm = left;
@@ -446,6 +383,7 @@ Node* RB_REMOVE(RBHead<Node>* head, Node* elm) {
if (child) {
RB_SET_PARENT(child, parent);
}
+
if (parent) {
if (RB_LEFT(parent) == elm) {
RB_SET_LEFT(parent, child);
@@ -453,14 +391,14 @@ Node* RB_REMOVE(RBHead<Node>* head, Node* elm) {
RB_SET_RIGHT(parent, child);
}
} else {
- head->SetRoot(child);
+ head.SetRoot(child);
}
if (RB_PARENT(elm) == old) {
parent = elm;
}
- elm->SetEntry(old->GetEntry());
+ elm->SetRBEntry(old->GetRBEntry());
if (RB_PARENT(old)) {
if (RB_LEFT(RB_PARENT(old)) == old) {
@@ -469,17 +407,24 @@ Node* RB_REMOVE(RBHead<Node>* head, Node* elm) {
RB_SET_RIGHT(RB_PARENT(old), elm);
}
} else {
- head->SetRoot(elm);
+ head.SetRoot(elm);
}
+
RB_SET_PARENT(RB_LEFT(old), elm);
+
if (RB_RIGHT(old)) {
RB_SET_PARENT(RB_RIGHT(old), elm);
}
+
if (parent) {
left = parent;
}
- return finalize();
+ if (color == RBColor::RB_BLACK) {
+ RB_REMOVE_COLOR(head, parent, child);
+ }
+
+ return old;
}
parent = RB_PARENT(elm);
@@ -495,17 +440,69 @@ Node* RB_REMOVE(RBHead<Node>* head, Node* elm) {
RB_SET_RIGHT(parent, child);
}
} else {
- head->SetRoot(child);
+ head.SetRoot(child);
+ }
+
+ if (color == RBColor::RB_BLACK) {
+ RB_REMOVE_COLOR(head, parent, child);
+ }
+
+ return old;
+}
+
+template <typename T>
+requires HasRBEntry<T>
+constexpr void RB_INSERT_COLOR(RBHead<T>& head, T* elm) {
+ T *parent = nullptr, *tmp = nullptr;
+ while ((parent = RB_PARENT(elm)) != nullptr && RB_IS_RED(parent)) {
+ T* gparent = RB_PARENT(parent);
+ if (parent == RB_LEFT(gparent)) {
+ tmp = RB_RIGHT(gparent);
+ if (tmp && RB_IS_RED(tmp)) {
+ RB_SET_COLOR(tmp, RBColor::RB_BLACK);
+ RB_SET_BLACKRED(parent, gparent);
+ elm = gparent;
+ continue;
+ }
+
+ if (RB_RIGHT(parent) == elm) {
+ RB_ROTATE_LEFT(head, parent, tmp);
+ tmp = parent;
+ parent = elm;
+ elm = tmp;
+ }
+
+ RB_SET_BLACKRED(parent, gparent);
+ RB_ROTATE_RIGHT(head, gparent, tmp);
+ } else {
+ tmp = RB_LEFT(gparent);
+ if (tmp && RB_IS_RED(tmp)) {
+ RB_SET_COLOR(tmp, RBColor::RB_BLACK);
+ RB_SET_BLACKRED(parent, gparent);
+ elm = gparent;
+ continue;
+ }
+
+ if (RB_LEFT(parent) == elm) {
+ RB_ROTATE_RIGHT(head, parent, tmp);
+ tmp = parent;
+ parent = elm;
+ elm = tmp;
+ }
+
+ RB_SET_BLACKRED(parent, gparent);
+ RB_ROTATE_LEFT(head, gparent, tmp);
+ }
}
- return finalize();
+ RB_SET_COLOR(head.Root(), RBColor::RB_BLACK);
}
-// Inserts a node into the RB tree
-template <typename Node, typename CompareFunction>
-Node* RB_INSERT(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
- Node* parent = nullptr;
- Node* tmp = head->Root();
+template <typename T, typename Compare>
+requires HasRBEntry<T>
+constexpr T* RB_INSERT(RBHead<T>& head, T* elm, Compare cmp) {
+ T* parent = nullptr;
+ T* tmp = head.Root();
int comp = 0;
while (tmp) {
@@ -529,17 +526,17 @@ Node* RB_INSERT(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
RB_SET_RIGHT(parent, elm);
}
} else {
- head->SetRoot(elm);
+ head.SetRoot(elm);
}
RB_INSERT_COLOR(head, elm);
return nullptr;
}
-// Finds the node with the same key as elm
-template <typename Node, typename CompareFunction>
-Node* RB_FIND(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
- Node* tmp = head->Root();
+template <typename T, typename Compare>
+requires HasRBEntry<T>
+constexpr T* RB_FIND(RBHead<T>& head, T* elm, Compare cmp) {
+ T* tmp = head.Root();
while (tmp) {
const int comp = cmp(elm, tmp);
@@ -555,11 +552,11 @@ Node* RB_FIND(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
return nullptr;
}
-// Finds the first node greater than or equal to the search key
-template <typename Node, typename CompareFunction>
-Node* RB_NFIND(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
- Node* tmp = head->Root();
- Node* res = nullptr;
+template <typename T, typename Compare>
+requires HasRBEntry<T>
+constexpr T* RB_NFIND(RBHead<T>& head, T* elm, Compare cmp) {
+ T* tmp = head.Root();
+ T* res = nullptr;
while (tmp) {
const int comp = cmp(elm, tmp);
@@ -576,13 +573,13 @@ Node* RB_NFIND(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
return res;
}
-// Finds the node with the same key as lelm
-template <typename Node, typename CompareFunction>
-Node* RB_FIND_LIGHT(RBHead<Node>* head, const void* lelm, CompareFunction lcmp) {
- Node* tmp = head->Root();
+template <typename T, typename U, typename Compare>
+requires HasRBEntry<T>
+constexpr T* RB_FIND_KEY(RBHead<T>& head, const U& key, Compare cmp) {
+ T* tmp = head.Root();
while (tmp) {
- const int comp = lcmp(lelm, tmp);
+ const int comp = cmp(key, tmp);
if (comp < 0) {
tmp = RB_LEFT(tmp);
} else if (comp > 0) {
@@ -595,14 +592,14 @@ Node* RB_FIND_LIGHT(RBHead<Node>* head, const void* lelm, CompareFunction lcmp)
return nullptr;
}
-// Finds the first node greater than or equal to the search key
-template <typename Node, typename CompareFunction>
-Node* RB_NFIND_LIGHT(RBHead<Node>* head, const void* lelm, CompareFunction lcmp) {
- Node* tmp = head->Root();
- Node* res = nullptr;
+template <typename T, typename U, typename Compare>
+requires HasRBEntry<T>
+constexpr T* RB_NFIND_KEY(RBHead<T>& head, const U& key, Compare cmp) {
+ T* tmp = head.Root();
+ T* res = nullptr;
while (tmp) {
- const int comp = lcmp(lelm, tmp);
+ const int comp = cmp(key, tmp);
if (comp < 0) {
res = tmp;
tmp = RB_LEFT(tmp);
@@ -616,8 +613,43 @@ Node* RB_NFIND_LIGHT(RBHead<Node>* head, const void* lelm, CompareFunction lcmp)
return res;
}
-template <typename Node>
-Node* RB_NEXT(Node* elm) {
+template <typename T, typename Compare>
+requires HasRBEntry<T>
+constexpr T* RB_FIND_EXISTING(RBHead<T>& head, T* elm, Compare cmp) {
+ T* tmp = head.Root();
+
+ while (true) {
+ const int comp = cmp(elm, tmp);
+ if (comp < 0) {
+ tmp = RB_LEFT(tmp);
+ } else if (comp > 0) {
+ tmp = RB_RIGHT(tmp);
+ } else {
+ return tmp;
+ }
+ }
+}
+
+template <typename T, typename U, typename Compare>
+requires HasRBEntry<T>
+constexpr T* RB_FIND_EXISTING_KEY(RBHead<T>& head, const U& key, Compare cmp) {
+ T* tmp = head.Root();
+
+ while (true) {
+ const int comp = cmp(key, tmp);
+ if (comp < 0) {
+ tmp = RB_LEFT(tmp);
+ } else if (comp > 0) {
+ tmp = RB_RIGHT(tmp);
+ } else {
+ return tmp;
+ }
+ }
+}
+
+template <typename T>
+requires HasRBEntry<T>
+constexpr T* RB_NEXT(T* elm) {
if (RB_RIGHT(elm)) {
elm = RB_RIGHT(elm);
while (RB_LEFT(elm)) {
@@ -636,8 +668,9 @@ Node* RB_NEXT(Node* elm) {
return elm;
}
-template <typename Node>
-Node* RB_PREV(Node* elm) {
+template <typename T>
+requires HasRBEntry<T>
+constexpr T* RB_PREV(T* elm) {
if (RB_LEFT(elm)) {
elm = RB_LEFT(elm);
while (RB_RIGHT(elm)) {
@@ -656,30 +689,32 @@ Node* RB_PREV(Node* elm) {
return elm;
}
-template <typename Node>
-Node* RB_MINMAX(RBHead<Node>* head, bool is_min) {
- Node* tmp = head->Root();
- Node* parent = nullptr;
+template <typename T>
+requires HasRBEntry<T>
+constexpr T* RB_MIN(RBHead<T>& head) {
+ T* tmp = head.Root();
+ T* parent = nullptr;
while (tmp) {
parent = tmp;
- if (is_min) {
- tmp = RB_LEFT(tmp);
- } else {
- tmp = RB_RIGHT(tmp);
- }
+ tmp = RB_LEFT(tmp);
}
return parent;
}
-template <typename Node>
-Node* RB_MIN(RBHead<Node>* head) {
- return RB_MINMAX(head, true);
-}
+template <typename T>
+requires HasRBEntry<T>
+constexpr T* RB_MAX(RBHead<T>& head) {
+ T* tmp = head.Root();
+ T* parent = nullptr;
-template <typename Node>
-Node* RB_MAX(RBHead<Node>* head) {
- return RB_MINMAX(head, false);
+ while (tmp) {
+ parent = tmp;
+ tmp = RB_RIGHT(tmp);
+ }
+
+ return parent;
}
-} // namespace Common
+
+} // namespace Common::freebsd
diff --git a/src/common/uint128.h b/src/common/uint128.h
index 4780b2f9d..f450a6db9 100644
--- a/src/common/uint128.h
+++ b/src/common/uint128.h
@@ -1,10 +1,8 @@
-// Copyright 2019 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
-#include <cstring>
#include <utility>
#ifdef _MSC_VER
@@ -13,7 +11,7 @@
#pragma intrinsic(_umul128)
#pragma intrinsic(_udiv128)
#else
-#include <x86intrin.h>
+#include <cstring>
#endif
#include "common/common_types.h"
diff --git a/src/common/unique_function.h b/src/common/unique_function.h
index ca0559071..c15d88349 100644
--- a/src/common/unique_function.h
+++ b/src/common/unique_function.h
@@ -1,6 +1,5 @@
-// Copyright 2021 yuzu emulator team
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/uuid.cpp b/src/common/uuid.cpp
index 2b6a530e3..89e1ed225 100644
--- a/src/common/uuid.cpp
+++ b/src/common/uuid.cpp
@@ -1,6 +1,5 @@
-// Copyright 2022 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <bit>
#include <optional>
diff --git a/src/common/uuid.h b/src/common/uuid.h
index fe31e64e6..7172ca165 100644
--- a/src/common/uuid.h
+++ b/src/common/uuid.h
@@ -1,13 +1,11 @@
-// Copyright 2022 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <functional>
#include <string>
-#include <string_view>
#include "common/common_types.h"
diff --git a/src/common/vector_math.h b/src/common/vector_math.h
index ba7c363c1..e62eeea2e 100644
--- a/src/common/vector_math.h
+++ b/src/common/vector_math.h
@@ -1,32 +1,6 @@
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-// Copyright 2014 Tony Wasserka
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-// * Neither the name of the owner nor the names of its contributors may
-// be used to endorse or promote products derived from this software
-// without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// SPDX-FileCopyrightText: 2014 Tony Wasserka
+// SPDX-FileCopyrightText: 2014 Dolphin Emulator Project
+// SPDX-License-Identifier: BSD-3-Clause AND GPL-2.0-or-later
#pragma once
diff --git a/src/common/virtual_buffer.cpp b/src/common/virtual_buffer.cpp
index e3ca29258..dea6de99f 100644
--- a/src/common/virtual_buffer.cpp
+++ b/src/common/virtual_buffer.cpp
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#ifdef _WIN32
#include <windows.h>
diff --git a/src/common/virtual_buffer.h b/src/common/virtual_buffer.h
index fb1a6f81f..4f6e3e6e5 100644
--- a/src/common/virtual_buffer.h
+++ b/src/common/virtual_buffer.h
@@ -1,10 +1,8 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
-#include <type_traits>
#include <utility>
namespace Common {
diff --git a/src/common/wall_clock.cpp b/src/common/wall_clock.cpp
index 9acf7551e..ae07f2811 100644
--- a/src/common/wall_clock.cpp
+++ b/src/common/wall_clock.cpp
@@ -1,8 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
-
-#include <cstdint>
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/uint128.h"
#include "common/wall_clock.h"
@@ -70,7 +67,7 @@ std::unique_ptr<WallClock> CreateBestMatchingClock(u64 emulated_cpu_frequency,
const auto& caps = GetCPUCaps();
u64 rtsc_frequency = 0;
if (caps.invariant_tsc) {
- rtsc_frequency = EstimateRDTSCFrequency();
+ rtsc_frequency = caps.tsc_frequency ? caps.tsc_frequency : EstimateRDTSCFrequency();
}
// Fallback to StandardWallClock if the hardware TSC does not have the precision greater than:
diff --git a/src/common/wall_clock.h b/src/common/wall_clock.h
index 874448c27..828a523a8 100644
--- a/src/common/wall_clock.h
+++ b/src/common/wall_clock.h
@@ -1,6 +1,5 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/x64/cpu_detect.cpp b/src/common/x64/cpu_detect.cpp
index fbeacc7e2..1a27532d4 100644
--- a/src/common/x64/cpu_detect.cpp
+++ b/src/common/x64/cpu_detect.cpp
@@ -1,8 +1,12 @@
-// Copyright 2013 Dolphin Emulator Project / 2015 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-FileCopyrightText: Copyright 2013 Dolphin Emulator Project / 2015 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <array>
#include <cstring>
+#include <iterator>
+#include <string_view>
+#include "common/bit_util.h"
#include "common/common_types.h"
#include "common/x64/cpu_detect.h"
@@ -17,7 +21,7 @@
// clang-format on
#endif
-static inline void __cpuidex(int info[4], int function_id, int subfunction_id) {
+static inline void __cpuidex(int info[4], u32 function_id, u32 subfunction_id) {
#if defined(__DragonFly__) || defined(__FreeBSD__)
// Despite the name, this is just do_cpuid() with ECX as second input.
cpuid_count((u_int)function_id, (u_int)subfunction_id, (u_int*)info);
@@ -30,7 +34,7 @@ static inline void __cpuidex(int info[4], int function_id, int subfunction_id) {
#endif
}
-static inline void __cpuid(int info[4], int function_id) {
+static inline void __cpuid(int info[4], u32 function_id) {
return __cpuidex(info, function_id, 0);
}
@@ -45,6 +49,17 @@ static inline u64 _xgetbv(u32 index) {
namespace Common {
+CPUCaps::Manufacturer CPUCaps::ParseManufacturer(std::string_view brand_string) {
+ if (brand_string == "GenuineIntel") {
+ return Manufacturer::Intel;
+ } else if (brand_string == "AuthenticAMD") {
+ return Manufacturer::AMD;
+ } else if (brand_string == "HygonGenuine") {
+ return Manufacturer::Hygon;
+ }
+ return Manufacturer::Unknown;
+}
+
// Detects the various CPU features
static CPUCaps Detect() {
CPUCaps caps = {};
@@ -53,75 +68,74 @@ static CPUCaps Detect() {
// yuzu at all anyway
int cpu_id[4];
- memset(caps.brand_string, 0, sizeof(caps.brand_string));
- // Detect CPU's CPUID capabilities and grab CPU string
+ // Detect CPU's CPUID capabilities and grab manufacturer string
__cpuid(cpu_id, 0x00000000);
- u32 max_std_fn = cpu_id[0]; // EAX
-
- std::memcpy(&caps.brand_string[0], &cpu_id[1], sizeof(int));
- std::memcpy(&caps.brand_string[4], &cpu_id[3], sizeof(int));
- std::memcpy(&caps.brand_string[8], &cpu_id[2], sizeof(int));
- if (cpu_id[1] == 0x756e6547 && cpu_id[2] == 0x6c65746e && cpu_id[3] == 0x49656e69)
- caps.manufacturer = Manufacturer::Intel;
- else if (cpu_id[1] == 0x68747541 && cpu_id[2] == 0x444d4163 && cpu_id[3] == 0x69746e65)
- caps.manufacturer = Manufacturer::AMD;
- else if (cpu_id[1] == 0x6f677948 && cpu_id[2] == 0x656e6975 && cpu_id[3] == 0x6e65476e)
- caps.manufacturer = Manufacturer::Hygon;
- else
- caps.manufacturer = Manufacturer::Unknown;
+ const u32 max_std_fn = cpu_id[0]; // EAX
- __cpuid(cpu_id, 0x80000000);
+ std::memset(caps.brand_string, 0, std::size(caps.brand_string));
+ std::memcpy(&caps.brand_string[0], &cpu_id[1], sizeof(u32));
+ std::memcpy(&caps.brand_string[4], &cpu_id[3], sizeof(u32));
+ std::memcpy(&caps.brand_string[8], &cpu_id[2], sizeof(u32));
+
+ caps.manufacturer = CPUCaps::ParseManufacturer(caps.brand_string);
+
+ // Set reasonable default cpu string even if brand string not available
+ std::strncpy(caps.cpu_string, caps.brand_string, std::size(caps.brand_string));
- u32 max_ex_fn = cpu_id[0];
+ __cpuid(cpu_id, 0x80000000);
- // Set reasonable default brand string even if brand string not available
- strcpy(caps.cpu_string, caps.brand_string);
+ const u32 max_ex_fn = cpu_id[0];
// Detect family and other miscellaneous features
if (max_std_fn >= 1) {
__cpuid(cpu_id, 0x00000001);
- if ((cpu_id[3] >> 25) & 1)
- caps.sse = true;
- if ((cpu_id[3] >> 26) & 1)
- caps.sse2 = true;
- if ((cpu_id[2]) & 1)
- caps.sse3 = true;
- if ((cpu_id[2] >> 9) & 1)
- caps.ssse3 = true;
- if ((cpu_id[2] >> 19) & 1)
- caps.sse4_1 = true;
- if ((cpu_id[2] >> 20) & 1)
- caps.sse4_2 = true;
- if ((cpu_id[2] >> 25) & 1)
- caps.aes = true;
+ caps.sse = Common::Bit<25>(cpu_id[3]);
+ caps.sse2 = Common::Bit<26>(cpu_id[3]);
+ caps.sse3 = Common::Bit<0>(cpu_id[2]);
+ caps.pclmulqdq = Common::Bit<1>(cpu_id[2]);
+ caps.ssse3 = Common::Bit<9>(cpu_id[2]);
+ caps.sse4_1 = Common::Bit<19>(cpu_id[2]);
+ caps.sse4_2 = Common::Bit<20>(cpu_id[2]);
+ caps.movbe = Common::Bit<22>(cpu_id[2]);
+ caps.popcnt = Common::Bit<23>(cpu_id[2]);
+ caps.aes = Common::Bit<25>(cpu_id[2]);
+ caps.f16c = Common::Bit<29>(cpu_id[2]);
// AVX support requires 3 separate checks:
// - Is the AVX bit set in CPUID?
// - Is the XSAVE bit set in CPUID?
// - XGETBV result has the XCR bit set.
- if (((cpu_id[2] >> 28) & 1) && ((cpu_id[2] >> 27) & 1)) {
+ if (Common::Bit<28>(cpu_id[2]) && Common::Bit<27>(cpu_id[2])) {
if ((_xgetbv(_XCR_XFEATURE_ENABLED_MASK) & 0x6) == 0x6) {
caps.avx = true;
- if ((cpu_id[2] >> 12) & 1)
+ if (Common::Bit<12>(cpu_id[2]))
caps.fma = true;
}
}
if (max_std_fn >= 7) {
__cpuidex(cpu_id, 0x00000007, 0x00000000);
- // Can't enable AVX2 unless the XSAVE/XGETBV checks above passed
- if ((cpu_id[1] >> 5) & 1)
- caps.avx2 = caps.avx;
- if ((cpu_id[1] >> 3) & 1)
- caps.bmi1 = true;
- if ((cpu_id[1] >> 8) & 1)
- caps.bmi2 = true;
- // Checks for AVX512F, AVX512CD, AVX512VL, AVX512DQ, AVX512BW (Intel Skylake-X/SP)
- if ((cpu_id[1] >> 16) & 1 && (cpu_id[1] >> 28) & 1 && (cpu_id[1] >> 31) & 1 &&
- (cpu_id[1] >> 17) & 1 && (cpu_id[1] >> 30) & 1) {
- caps.avx512 = caps.avx2;
+ // Can't enable AVX{2,512} unless the XSAVE/XGETBV checks above passed
+ if (caps.avx) {
+ caps.avx2 = Common::Bit<5>(cpu_id[1]);
+ caps.avx512f = Common::Bit<16>(cpu_id[1]);
+ caps.avx512dq = Common::Bit<17>(cpu_id[1]);
+ caps.avx512cd = Common::Bit<28>(cpu_id[1]);
+ caps.avx512bw = Common::Bit<30>(cpu_id[1]);
+ caps.avx512vl = Common::Bit<31>(cpu_id[1]);
+ caps.avx512vbmi = Common::Bit<1>(cpu_id[2]);
+ caps.avx512bitalg = Common::Bit<12>(cpu_id[2]);
}
+
+ caps.bmi1 = Common::Bit<3>(cpu_id[1]);
+ caps.bmi2 = Common::Bit<8>(cpu_id[1]);
+ caps.sha = Common::Bit<29>(cpu_id[1]);
+
+ caps.gfni = Common::Bit<8>(cpu_id[2]);
+
+ __cpuidex(cpu_id, 0x00000007, 0x00000001);
+ caps.avx_vnni = caps.avx && Common::Bit<4>(cpu_id[0]);
}
}
@@ -138,14 +152,28 @@ static CPUCaps Detect() {
if (max_ex_fn >= 0x80000001) {
// Check for more features
__cpuid(cpu_id, 0x80000001);
- if ((cpu_id[2] >> 16) & 1)
- caps.fma4 = true;
+ caps.lzcnt = Common::Bit<5>(cpu_id[2]);
+ caps.fma4 = Common::Bit<16>(cpu_id[2]);
}
if (max_ex_fn >= 0x80000007) {
__cpuid(cpu_id, 0x80000007);
- if (cpu_id[3] & (1 << 8)) {
- caps.invariant_tsc = true;
+ caps.invariant_tsc = Common::Bit<8>(cpu_id[3]);
+ }
+
+ if (max_std_fn >= 0x15) {
+ __cpuid(cpu_id, 0x15);
+ caps.tsc_crystal_ratio_denominator = cpu_id[0];
+ caps.tsc_crystal_ratio_numerator = cpu_id[1];
+ caps.crystal_frequency = cpu_id[2];
+ // Some CPU models might not return a crystal frequency.
+ // The CPU model can be detected to use the values from turbostat
+ // https://github.com/torvalds/linux/blob/master/tools/power/x86/turbostat/turbostat.c#L5569
+ // but it's easier to just estimate the TSC tick rate for these cases.
+ if (caps.tsc_crystal_ratio_denominator) {
+ caps.tsc_frequency = static_cast<u64>(caps.crystal_frequency) *
+ caps.tsc_crystal_ratio_numerator /
+ caps.tsc_crystal_ratio_denominator;
}
}
diff --git a/src/common/x64/cpu_detect.h b/src/common/x64/cpu_detect.h
index e3b63302e..6830f3795 100644
--- a/src/common/x64/cpu_detect.h
+++ b/src/common/x64/cpu_detect.h
@@ -1,42 +1,71 @@
-// Copyright 2013 Dolphin Emulator Project / 2015 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
+// SPDX-FileCopyrightText: Copyright 2013 Dolphin Emulator Project / 2015 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
-namespace Common {
+#include <string_view>
+#include "common/common_types.h"
-enum class Manufacturer : u32 {
- Intel = 0,
- AMD = 1,
- Hygon = 2,
- Unknown = 3,
-};
+namespace Common {
/// x86/x64 CPU capabilities that may be detected by this module
struct CPUCaps {
+
+ enum class Manufacturer : u8 {
+ Unknown = 0,
+ Intel = 1,
+ AMD = 2,
+ Hygon = 3,
+ };
+
+ static Manufacturer ParseManufacturer(std::string_view brand_string);
+
Manufacturer manufacturer;
- char cpu_string[0x21];
- char brand_string[0x41];
- bool sse;
- bool sse2;
- bool sse3;
- bool ssse3;
- bool sse4_1;
- bool sse4_2;
- bool lzcnt;
- bool avx;
- bool avx2;
- bool avx512;
- bool bmi1;
- bool bmi2;
- bool fma;
- bool fma4;
- bool aes;
- bool invariant_tsc;
+ char brand_string[13];
+
+ char cpu_string[48];
+
u32 base_frequency;
u32 max_frequency;
u32 bus_frequency;
+
+ u32 tsc_crystal_ratio_denominator;
+ u32 tsc_crystal_ratio_numerator;
+ u32 crystal_frequency;
+ u64 tsc_frequency; // Derived from the above three values
+
+ bool sse : 1;
+ bool sse2 : 1;
+ bool sse3 : 1;
+ bool ssse3 : 1;
+ bool sse4_1 : 1;
+ bool sse4_2 : 1;
+
+ bool avx : 1;
+ bool avx_vnni : 1;
+ bool avx2 : 1;
+ bool avx512f : 1;
+ bool avx512dq : 1;
+ bool avx512cd : 1;
+ bool avx512bw : 1;
+ bool avx512vl : 1;
+ bool avx512vbmi : 1;
+ bool avx512bitalg : 1;
+
+ bool aes : 1;
+ bool bmi1 : 1;
+ bool bmi2 : 1;
+ bool f16c : 1;
+ bool fma : 1;
+ bool fma4 : 1;
+ bool gfni : 1;
+ bool invariant_tsc : 1;
+ bool lzcnt : 1;
+ bool movbe : 1;
+ bool pclmulqdq : 1;
+ bool popcnt : 1;
+ bool sha : 1;
};
/**
diff --git a/src/common/x64/native_clock.cpp b/src/common/x64/native_clock.cpp
index 91b842829..8b08332ab 100644
--- a/src/common/x64/native_clock.cpp
+++ b/src/common/x64/native_clock.cpp
@@ -1,36 +1,57 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <array>
#include <chrono>
-#include <limits>
-#include <mutex>
#include <thread>
#include "common/atomic_ops.h"
#include "common/uint128.h"
#include "common/x64/native_clock.h"
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif
+
namespace Common {
+#ifdef _MSC_VER
+__forceinline static u64 FencedRDTSC() {
+ _mm_lfence();
+ _ReadWriteBarrier();
+ const u64 result = __rdtsc();
+ _mm_lfence();
+ _ReadWriteBarrier();
+ return result;
+}
+#else
+static u64 FencedRDTSC() {
+ u64 result;
+ asm volatile("lfence\n\t"
+ "rdtsc\n\t"
+ "shl $32, %%rdx\n\t"
+ "or %%rdx, %0\n\t"
+ "lfence"
+ : "=a"(result)
+ :
+ : "rdx", "memory", "cc");
+ return result;
+}
+#endif
+
u64 EstimateRDTSCFrequency() {
// Discard the first result measuring the rdtsc.
- _mm_mfence();
- __rdtsc();
+ FencedRDTSC();
std::this_thread::sleep_for(std::chrono::milliseconds{1});
- _mm_mfence();
- __rdtsc();
+ FencedRDTSC();
// Get the current time.
const auto start_time = std::chrono::steady_clock::now();
- _mm_mfence();
- const u64 tsc_start = __rdtsc();
+ const u64 tsc_start = FencedRDTSC();
// Wait for 200 milliseconds.
std::this_thread::sleep_for(std::chrono::milliseconds{200});
const auto end_time = std::chrono::steady_clock::now();
- _mm_mfence();
- const u64 tsc_end = __rdtsc();
+ const u64 tsc_end = FencedRDTSC();
// Calculate differences.
const u64 timer_diff = static_cast<u64>(
std::chrono::duration_cast<std::chrono::nanoseconds>(end_time - start_time).count());
@@ -44,8 +65,7 @@ NativeClock::NativeClock(u64 emulated_cpu_frequency_, u64 emulated_clock_frequen
u64 rtsc_frequency_)
: WallClock(emulated_cpu_frequency_, emulated_clock_frequency_, true), rtsc_frequency{
rtsc_frequency_} {
- _mm_mfence();
- time_point.inner.last_measure = __rdtsc();
+ time_point.inner.last_measure = FencedRDTSC();
time_point.inner.accumulated_ticks = 0U;
ns_rtsc_factor = GetFixedPoint64Factor(NS_RATIO, rtsc_frequency);
us_rtsc_factor = GetFixedPoint64Factor(US_RATIO, rtsc_frequency);
@@ -57,10 +77,10 @@ NativeClock::NativeClock(u64 emulated_cpu_frequency_, u64 emulated_clock_frequen
u64 NativeClock::GetRTSC() {
TimePoint new_time_point{};
TimePoint current_time_point{};
+
+ current_time_point.pack = Common::AtomicLoad128(time_point.pack.data());
do {
- current_time_point.pack = time_point.pack;
- _mm_mfence();
- const u64 current_measure = __rdtsc();
+ const u64 current_measure = FencedRDTSC();
u64 diff = current_measure - current_time_point.inner.last_measure;
diff = diff & ~static_cast<u64>(static_cast<s64>(diff) >> 63); // max(diff, 0)
new_time_point.inner.last_measure = current_measure > current_time_point.inner.last_measure
@@ -68,22 +88,21 @@ u64 NativeClock::GetRTSC() {
: current_time_point.inner.last_measure;
new_time_point.inner.accumulated_ticks = current_time_point.inner.accumulated_ticks + diff;
} while (!Common::AtomicCompareAndSwap(time_point.pack.data(), new_time_point.pack,
- current_time_point.pack));
- /// The clock cannot be more precise than the guest timer, remove the lower bits
- return new_time_point.inner.accumulated_ticks & inaccuracy_mask;
+ current_time_point.pack, current_time_point.pack));
+ return new_time_point.inner.accumulated_ticks;
}
void NativeClock::Pause(bool is_paused) {
if (!is_paused) {
TimePoint current_time_point{};
TimePoint new_time_point{};
+
+ current_time_point.pack = Common::AtomicLoad128(time_point.pack.data());
do {
- current_time_point.pack = time_point.pack;
new_time_point.pack = current_time_point.pack;
- _mm_mfence();
- new_time_point.inner.last_measure = __rdtsc();
+ new_time_point.inner.last_measure = FencedRDTSC();
} while (!Common::AtomicCompareAndSwap(time_point.pack.data(), new_time_point.pack,
- current_time_point.pack));
+ current_time_point.pack, current_time_point.pack));
}
}
diff --git a/src/common/x64/native_clock.h b/src/common/x64/native_clock.h
index 7cbd400d2..38ae7a462 100644
--- a/src/common/x64/native_clock.h
+++ b/src/common/x64/native_clock.h
@@ -1,11 +1,8 @@
-// Copyright 2020 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
-#include <optional>
-
#include "common/wall_clock.h"
namespace Common {
@@ -40,12 +37,8 @@ private:
} inner;
};
- /// value used to reduce the native clocks accuracy as some apss rely on
- /// undefined behavior where the level of accuracy in the clock shouldn't
- /// be higher.
- static constexpr u64 inaccuracy_mask = ~(UINT64_C(0x400) - 1);
-
TimePoint time_point;
+
// factors
u64 clock_rtsc_factor{};
u64 cpu_rtsc_factor{};
diff --git a/src/common/x64/xbyak_abi.h b/src/common/x64/xbyak_abi.h
index 87b3d63a4..67e6e63c8 100644
--- a/src/common/x64/xbyak_abi.h
+++ b/src/common/x64/xbyak_abi.h
@@ -1,6 +1,5 @@
-// Copyright 2016 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2016 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/x64/xbyak_util.h b/src/common/x64/xbyak_util.h
index 44d2558f1..250e5cddb 100644
--- a/src/common/x64/xbyak_util.h
+++ b/src/common/x64/xbyak_util.h
@@ -1,6 +1,5 @@
-// Copyright 2016 Citra Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: 2016 Citra Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
diff --git a/src/common/zstd_compression.cpp b/src/common/zstd_compression.cpp
index 695b96a43..b71a41b78 100644
--- a/src/common/zstd_compression.cpp
+++ b/src/common/zstd_compression.cpp
@@ -1,6 +1,5 @@
-// Copyright 2019 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <zstd.h>
diff --git a/src/common/zstd_compression.h b/src/common/zstd_compression.h
index bbce14f4e..a5ab2d05b 100644
--- a/src/common/zstd_compression.h
+++ b/src/common/zstd_compression.h
@@ -1,6 +1,5 @@
-// Copyright 2019 yuzu Emulator Project
-// Licensed under GPLv2 or any later version
-// Refer to the license.txt file included.
+// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once