diff options
author | Liam <byteslice@airmail.cc> | 2023-08-11 03:34:43 +0200 |
---|---|---|
committer | Liam <byteslice@airmail.cc> | 2023-08-15 23:47:25 +0200 |
commit | 86f6b6b7b2d930e8203114332b04a5c49a780b06 (patch) | |
tree | bf7ff58b0a36051d3c3489a40999d80357c570d0 /src/core/file_sys/fssystem | |
parent | Merge pull request #11287 from liamwhite/replaced-bytes (diff) | |
download | yuzu-86f6b6b7b2d930e8203114332b04a5c49a780b06.tar yuzu-86f6b6b7b2d930e8203114332b04a5c49a780b06.tar.gz yuzu-86f6b6b7b2d930e8203114332b04a5c49a780b06.tar.bz2 yuzu-86f6b6b7b2d930e8203114332b04a5c49a780b06.tar.lz yuzu-86f6b6b7b2d930e8203114332b04a5c49a780b06.tar.xz yuzu-86f6b6b7b2d930e8203114332b04a5c49a780b06.tar.zst yuzu-86f6b6b7b2d930e8203114332b04a5c49a780b06.zip |
Diffstat (limited to 'src/core/file_sys/fssystem')
44 files changed, 7720 insertions, 0 deletions
diff --git a/src/core/file_sys/fssystem/fs_i_storage.h b/src/core/file_sys/fssystem/fs_i_storage.h new file mode 100644 index 000000000..416dd57b8 --- /dev/null +++ b/src/core/file_sys/fssystem/fs_i_storage.h @@ -0,0 +1,58 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "common/overflow.h" +#include "core/file_sys/errors.h" +#include "core/file_sys/vfs.h" + +namespace FileSys { + +class IStorage : public VfsFile { +public: + virtual std::string GetName() const override { + return {}; + } + + virtual VirtualDir GetContainingDirectory() const override { + return {}; + } + + virtual bool IsWritable() const override { + return true; + } + + virtual bool IsReadable() const override { + return true; + } + + virtual bool Resize(size_t size) override { + return false; + } + + virtual bool Rename(std::string_view name) override { + return false; + } + + static inline Result CheckAccessRange(s64 offset, s64 size, s64 total_size) { + R_UNLESS(offset >= 0, ResultInvalidOffset); + R_UNLESS(size >= 0, ResultInvalidSize); + R_UNLESS(Common::WrappingAdd(offset, size) >= offset, ResultOutOfRange); + R_UNLESS(offset + size <= total_size, ResultOutOfRange); + R_SUCCEED(); + } +}; + +class IReadOnlyStorage : public IStorage { +public: + virtual bool IsWritable() const override { + return false; + } + + virtual size_t Write(const u8* buffer, size_t size, size_t offset) override { + return 0; + } +}; + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fs_types.h b/src/core/file_sys/fssystem/fs_types.h new file mode 100644 index 000000000..43aeaf447 --- /dev/null +++ b/src/core/file_sys/fssystem/fs_types.h @@ -0,0 +1,46 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "common/common_funcs.h" + +namespace FileSys { + +struct Int64 { + u32 low; + u32 high; + + constexpr void Set(s64 v) { + this->low = static_cast<u32>((v & static_cast<u64>(0x00000000FFFFFFFFULL)) >> 0); + this->high = static_cast<u32>((v & static_cast<u64>(0xFFFFFFFF00000000ULL)) >> 32); + } + + constexpr s64 Get() const { + return (static_cast<s64>(this->high) << 32) | (static_cast<s64>(this->low)); + } + + constexpr Int64& operator=(s64 v) { + this->Set(v); + return *this; + } + + constexpr operator s64() const { + return this->Get(); + } +}; + +struct HashSalt { + static constexpr size_t Size = 32; + + std::array<u8, Size> value; +}; +static_assert(std::is_trivial_v<HashSalt>); +static_assert(sizeof(HashSalt) == HashSalt::Size); + +constexpr inline size_t IntegrityMinLayerCount = 2; +constexpr inline size_t IntegrityMaxLayerCount = 7; +constexpr inline size_t IntegrityLayerCountSave = 5; +constexpr inline size_t IntegrityLayerCountSaveDataMeta = 4; + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.cpp b/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.cpp new file mode 100644 index 000000000..bf189c606 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.cpp @@ -0,0 +1,252 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h" +#include "core/file_sys/fssystem/fssystem_aes_ctr_storage.h" +#include "core/file_sys/fssystem/fssystem_nca_header.h" +#include "core/file_sys/vfs_offset.h" + +namespace FileSys { + +namespace { + +class SoftwareDecryptor final : public AesCtrCounterExtendedStorage::IDecryptor { +public: + virtual void Decrypt( + u8* buf, size_t buf_size, const std::array<u8, AesCtrCounterExtendedStorage::KeySize>& key, + const std::array<u8, AesCtrCounterExtendedStorage::IvSize>& iv) override final; +}; + +} // namespace + +Result AesCtrCounterExtendedStorage::CreateSoftwareDecryptor(std::unique_ptr<IDecryptor>* out) { + std::unique_ptr<IDecryptor> decryptor = std::make_unique<SoftwareDecryptor>(); + R_UNLESS(decryptor != nullptr, ResultAllocationMemoryFailedInAesCtrCounterExtendedStorageA); + *out = std::move(decryptor); + R_SUCCEED(); +} + +Result AesCtrCounterExtendedStorage::Initialize(const void* key, size_t key_size, u32 secure_value, + VirtualFile data_storage, + VirtualFile table_storage) { + // Read and verify the bucket tree header. + BucketTree::Header header; + table_storage->ReadObject(std::addressof(header), 0); + R_TRY(header.Verify()); + + // Determine extents. + const auto node_storage_size = QueryNodeStorageSize(header.entry_count); + const auto entry_storage_size = QueryEntryStorageSize(header.entry_count); + const auto node_storage_offset = QueryHeaderStorageSize(); + const auto entry_storage_offset = node_storage_offset + node_storage_size; + + // Create a software decryptor. + std::unique_ptr<IDecryptor> sw_decryptor; + R_TRY(CreateSoftwareDecryptor(std::addressof(sw_decryptor))); + + // Initialize. + R_RETURN(this->Initialize( + key, key_size, secure_value, 0, data_storage, + std::make_shared<OffsetVfsFile>(table_storage, node_storage_size, node_storage_offset), + std::make_shared<OffsetVfsFile>(table_storage, entry_storage_size, entry_storage_offset), + header.entry_count, std::move(sw_decryptor))); +} + +Result AesCtrCounterExtendedStorage::Initialize(const void* key, size_t key_size, u32 secure_value, + s64 counter_offset, VirtualFile data_storage, + VirtualFile node_storage, VirtualFile entry_storage, + s32 entry_count, + std::unique_ptr<IDecryptor>&& decryptor) { + // Validate preconditions. + ASSERT(key != nullptr); + ASSERT(key_size == KeySize); + ASSERT(counter_offset >= 0); + ASSERT(decryptor != nullptr); + + // Initialize the bucket tree table. + if (entry_count > 0) { + R_TRY( + m_table.Initialize(node_storage, entry_storage, NodeSize, sizeof(Entry), entry_count)); + } else { + m_table.Initialize(NodeSize, 0); + } + + // Set members. + m_data_storage = data_storage; + std::memcpy(m_key.data(), key, key_size); + m_secure_value = secure_value; + m_counter_offset = counter_offset; + m_decryptor = std::move(decryptor); + + R_SUCCEED(); +} + +void AesCtrCounterExtendedStorage::Finalize() { + if (this->IsInitialized()) { + m_table.Finalize(); + m_data_storage = VirtualFile(); + } +} + +Result AesCtrCounterExtendedStorage::GetEntryList(Entry* out_entries, s32* out_entry_count, + s32 entry_count, s64 offset, s64 size) { + // Validate pre-conditions. + ASSERT(offset >= 0); + ASSERT(size >= 0); + ASSERT(this->IsInitialized()); + + // Clear the out count. + R_UNLESS(out_entry_count != nullptr, ResultNullptrArgument); + *out_entry_count = 0; + + // Succeed if there's no range. + R_SUCCEED_IF(size == 0); + + // If we have an output array, we need it to be non-null. + R_UNLESS(out_entries != nullptr || entry_count == 0, ResultNullptrArgument); + + // Check that our range is valid. + BucketTree::Offsets table_offsets; + R_TRY(m_table.GetOffsets(std::addressof(table_offsets))); + + R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange); + + // Find the offset in our tree. + BucketTree::Visitor visitor; + R_TRY(m_table.Find(std::addressof(visitor), offset)); + { + const auto entry_offset = visitor.Get<Entry>()->GetOffset(); + R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset), + ResultInvalidAesCtrCounterExtendedEntryOffset); + } + + // Prepare to loop over entries. + const auto end_offset = offset + static_cast<s64>(size); + s32 count = 0; + + auto cur_entry = *visitor.Get<Entry>(); + while (cur_entry.GetOffset() < end_offset) { + // Try to write the entry to the out list + if (entry_count != 0) { + if (count >= entry_count) { + break; + } + std::memcpy(out_entries + count, std::addressof(cur_entry), sizeof(Entry)); + } + + count++; + + // Advance. + if (visitor.CanMoveNext()) { + R_TRY(visitor.MoveNext()); + cur_entry = *visitor.Get<Entry>(); + } else { + break; + } + } + + // Write the output count. + *out_entry_count = count; + R_SUCCEED(); +} + +size_t AesCtrCounterExtendedStorage::Read(u8* buffer, size_t size, size_t offset) const { + // Validate preconditions. + ASSERT(offset >= 0); + ASSERT(this->IsInitialized()); + + // Allow zero size. + if (size == 0) { + return size; + } + + // Validate arguments. + ASSERT(buffer != nullptr); + ASSERT(Common::IsAligned(offset, BlockSize)); + ASSERT(Common::IsAligned(size, BlockSize)); + + BucketTree::Offsets table_offsets; + ASSERT(R_SUCCEEDED(m_table.GetOffsets(std::addressof(table_offsets)))); + + ASSERT(table_offsets.IsInclude(offset, size)); + + // Read the data. + m_data_storage->Read(buffer, size, offset); + + // Find the offset in our tree. + BucketTree::Visitor visitor; + ASSERT(R_SUCCEEDED(m_table.Find(std::addressof(visitor), offset))); + { + const auto entry_offset = visitor.Get<Entry>()->GetOffset(); + ASSERT(Common::IsAligned(entry_offset, BlockSize)); + ASSERT(0 <= entry_offset && table_offsets.IsInclude(entry_offset)); + } + + // Prepare to read in chunks. + u8* cur_data = static_cast<u8*>(buffer); + auto cur_offset = offset; + const auto end_offset = offset + static_cast<s64>(size); + + while (cur_offset < end_offset) { + // Get the current entry. + const auto cur_entry = *visitor.Get<Entry>(); + + // Get and validate the entry's offset. + const auto cur_entry_offset = cur_entry.GetOffset(); + ASSERT(static_cast<size_t>(cur_entry_offset) <= cur_offset); + + // Get and validate the next entry offset. + s64 next_entry_offset; + if (visitor.CanMoveNext()) { + ASSERT(R_SUCCEEDED(visitor.MoveNext())); + next_entry_offset = visitor.Get<Entry>()->GetOffset(); + ASSERT(table_offsets.IsInclude(next_entry_offset)); + } else { + next_entry_offset = table_offsets.end_offset; + } + ASSERT(Common::IsAligned(next_entry_offset, BlockSize)); + ASSERT(cur_offset < static_cast<size_t>(next_entry_offset)); + + // Get the offset of the entry in the data we read. + const auto data_offset = cur_offset - cur_entry_offset; + const auto data_size = (next_entry_offset - cur_entry_offset) - data_offset; + ASSERT(data_size > 0); + + // Determine how much is left. + const auto remaining_size = end_offset - cur_offset; + const auto cur_size = static_cast<size_t>(std::min(remaining_size, data_size)); + ASSERT(cur_size <= size); + + // If necessary, perform decryption. + if (cur_entry.encryption_value == Entry::Encryption::Encrypted) { + // Make the CTR for the data we're decrypting. + const auto counter_offset = m_counter_offset + cur_entry_offset + data_offset; + NcaAesCtrUpperIv upper_iv = { + .part = {.generation = static_cast<u32>(cur_entry.generation), + .secure_value = m_secure_value}}; + + std::array<u8, IvSize> iv; + AesCtrStorage::MakeIv(iv.data(), IvSize, upper_iv.value, counter_offset); + + // Decrypt. + m_decryptor->Decrypt(cur_data, cur_size, m_key, iv); + } + + // Advance. + cur_data += cur_size; + cur_offset += cur_size; + } + + return size; +} + +void SoftwareDecryptor::Decrypt(u8* buf, size_t buf_size, + const std::array<u8, AesCtrCounterExtendedStorage::KeySize>& key, + const std::array<u8, AesCtrCounterExtendedStorage::IvSize>& iv) { + Core::Crypto::AESCipher<Core::Crypto::Key128, AesCtrCounterExtendedStorage::KeySize> cipher( + key, Core::Crypto::Mode::CTR); + cipher.SetIV(iv); + cipher.Transcode(buf, buf_size, buf, Core::Crypto::Op::Decrypt); +} + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h b/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h new file mode 100644 index 000000000..a79904fad --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h @@ -0,0 +1,114 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include <optional> + +#include "common/literals.h" +#include "core/file_sys/fssystem/fs_i_storage.h" +#include "core/file_sys/fssystem/fssystem_bucket_tree.h" + +namespace FileSys { + +using namespace Common::Literals; + +class AesCtrCounterExtendedStorage : public IReadOnlyStorage { + YUZU_NON_COPYABLE(AesCtrCounterExtendedStorage); + YUZU_NON_MOVEABLE(AesCtrCounterExtendedStorage); + +public: + static constexpr size_t BlockSize = 0x10; + static constexpr size_t KeySize = 0x10; + static constexpr size_t IvSize = 0x10; + static constexpr size_t NodeSize = 16_KiB; + + class IDecryptor { + public: + virtual ~IDecryptor() {} + virtual void Decrypt(u8* buf, size_t buf_size, const std::array<u8, KeySize>& key, + const std::array<u8, IvSize>& iv) = 0; + }; + + struct Entry { + enum class Encryption : u8 { + Encrypted = 0, + NotEncrypted = 1, + }; + + std::array<u8, sizeof(s64)> offset; + Encryption encryption_value; + std::array<u8, 3> reserved; + s32 generation; + + void SetOffset(s64 value) { + std::memcpy(this->offset.data(), std::addressof(value), sizeof(s64)); + } + + s64 GetOffset() const { + s64 value; + std::memcpy(std::addressof(value), this->offset.data(), sizeof(s64)); + return value; + } + }; + static_assert(sizeof(Entry) == 0x10); + static_assert(alignof(Entry) == 4); + static_assert(std::is_trivial_v<Entry>); + +public: + static constexpr s64 QueryHeaderStorageSize() { + return BucketTree::QueryHeaderStorageSize(); + } + + static constexpr s64 QueryNodeStorageSize(s32 entry_count) { + return BucketTree::QueryNodeStorageSize(NodeSize, sizeof(Entry), entry_count); + } + + static constexpr s64 QueryEntryStorageSize(s32 entry_count) { + return BucketTree::QueryEntryStorageSize(NodeSize, sizeof(Entry), entry_count); + } + + static Result CreateSoftwareDecryptor(std::unique_ptr<IDecryptor>* out); + +private: + mutable BucketTree m_table; + VirtualFile m_data_storage; + std::array<u8, KeySize> m_key; + u32 m_secure_value; + s64 m_counter_offset; + std::unique_ptr<IDecryptor> m_decryptor; + +public: + AesCtrCounterExtendedStorage() + : m_table(), m_data_storage(), m_secure_value(), m_counter_offset(), m_decryptor() {} + virtual ~AesCtrCounterExtendedStorage() { + this->Finalize(); + } + + Result Initialize(const void* key, size_t key_size, u32 secure_value, s64 counter_offset, + VirtualFile data_storage, VirtualFile node_storage, VirtualFile entry_storage, + s32 entry_count, std::unique_ptr<IDecryptor>&& decryptor); + void Finalize(); + + bool IsInitialized() const { + return m_table.IsInitialized(); + } + + virtual size_t Read(u8* buffer, size_t size, size_t offset) const override; + + virtual size_t GetSize() const override { + BucketTree::Offsets offsets; + ASSERT(R_SUCCEEDED(m_table.GetOffsets(std::addressof(offsets)))); + + return offsets.end_offset; + } + + Result GetEntryList(Entry* out_entries, s32* out_entry_count, s32 entry_count, s64 offset, + s64 size); + +private: + Result Initialize(const void* key, size_t key_size, u32 secure_value, VirtualFile data_storage, + VirtualFile table_storage); +}; + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.cpp b/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.cpp new file mode 100644 index 000000000..b65aca18d --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.cpp @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "common/alignment.h" +#include "common/swap.h" +#include "core/file_sys/fssystem/fssystem_aes_ctr_storage.h" +#include "core/file_sys/fssystem/fssystem_pooled_buffer.h" +#include "core/file_sys/fssystem/fssystem_utility.h" + +namespace FileSys { + +void AesCtrStorage::MakeIv(void* dst, size_t dst_size, u64 upper, s64 offset) { + ASSERT(dst != nullptr); + ASSERT(dst_size == IvSize); + ASSERT(offset >= 0); + + const uintptr_t out_addr = reinterpret_cast<uintptr_t>(dst); + + *reinterpret_cast<u64_be*>(out_addr + 0) = upper; + *reinterpret_cast<s64_be*>(out_addr + sizeof(u64)) = static_cast<s64>(offset / BlockSize); +} + +AesCtrStorage::AesCtrStorage(VirtualFile base, const void* key, size_t key_size, const void* iv, + size_t iv_size) + : m_base_storage(std::move(base)) { + ASSERT(m_base_storage != nullptr); + ASSERT(key != nullptr); + ASSERT(iv != nullptr); + ASSERT(key_size == KeySize); + ASSERT(iv_size == IvSize); + + std::memcpy(m_key.data(), key, KeySize); + std::memcpy(m_iv.data(), iv, IvSize); + + m_cipher.emplace(m_key, Core::Crypto::Mode::CTR); +} + +size_t AesCtrStorage::Read(u8* buffer, size_t size, size_t offset) const { + // Allow zero-size reads. + if (size == 0) { + return size; + } + + // Ensure buffer is valid. + ASSERT(buffer != nullptr); + + // We can only read at block aligned offsets. + ASSERT(Common::IsAligned(offset, BlockSize)); + ASSERT(Common::IsAligned(size, BlockSize)); + + // Read the data. + m_base_storage->Read(buffer, size, offset); + + // Setup the counter. + std::array<u8, IvSize> ctr; + std::memcpy(ctr.data(), m_iv.data(), IvSize); + AddCounter(ctr.data(), IvSize, offset / BlockSize); + + // Decrypt. + m_cipher->SetIV(ctr); + m_cipher->Transcode(buffer, size, buffer, Core::Crypto::Op::Decrypt); + + return size; +} + +size_t AesCtrStorage::Write(const u8* buffer, size_t size, size_t offset) { + // Allow zero-size writes. + if (size == 0) { + return size; + } + + // Ensure buffer is valid. + ASSERT(buffer != nullptr); + + // We can only write at block aligned offsets. + ASSERT(Common::IsAligned(offset, BlockSize)); + ASSERT(Common::IsAligned(size, BlockSize)); + + // Get a pooled buffer. + PooledBuffer pooled_buffer; + const bool use_work_buffer = true; + if (use_work_buffer) { + pooled_buffer.Allocate(size, BlockSize); + } + + // Setup the counter. + std::array<u8, IvSize> ctr; + std::memcpy(ctr.data(), m_iv.data(), IvSize); + AddCounter(ctr.data(), IvSize, offset / BlockSize); + + // Loop until all data is written. + size_t remaining = size; + s64 cur_offset = 0; + while (remaining > 0) { + // Determine data we're writing and where. + const size_t write_size = + use_work_buffer ? std::min(pooled_buffer.GetSize(), remaining) : remaining; + + void* write_buf; + if (use_work_buffer) { + write_buf = pooled_buffer.GetBuffer(); + } else { + write_buf = const_cast<u8*>(buffer); + } + + // Encrypt the data. + m_cipher->SetIV(ctr); + m_cipher->Transcode(buffer, write_size, reinterpret_cast<u8*>(write_buf), + Core::Crypto::Op::Encrypt); + + // Write the encrypted data. + m_base_storage->Write(reinterpret_cast<u8*>(write_buf), write_size, offset + cur_offset); + + // Advance. + cur_offset += write_size; + remaining -= write_size; + if (remaining > 0) { + AddCounter(ctr.data(), IvSize, write_size / BlockSize); + } + } + + return size; +} + +size_t AesCtrStorage::GetSize() const { + return m_base_storage->GetSize(); +} + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.h b/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.h new file mode 100644 index 000000000..bceb1f9ad --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_aes_ctr_storage.h @@ -0,0 +1,43 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include <optional> + +#include "core/crypto/aes_util.h" +#include "core/crypto/key_manager.h" +#include "core/file_sys/errors.h" +#include "core/file_sys/fssystem/fs_i_storage.h" +#include "core/file_sys/vfs.h" + +namespace FileSys { + +class AesCtrStorage : public IStorage { + YUZU_NON_COPYABLE(AesCtrStorage); + YUZU_NON_MOVEABLE(AesCtrStorage); + +public: + static constexpr size_t BlockSize = 0x10; + static constexpr size_t KeySize = 0x10; + static constexpr size_t IvSize = 0x10; + +private: + VirtualFile m_base_storage; + std::array<u8, KeySize> m_key; + std::array<u8, IvSize> m_iv; + mutable std::optional<Core::Crypto::AESCipher<Core::Crypto::Key128>> m_cipher; + +public: + static void MakeIv(void* dst, size_t dst_size, u64 upper, s64 offset); + +public: + AesCtrStorage(VirtualFile base, const void* key, size_t key_size, const void* iv, + size_t iv_size); + + virtual size_t Read(u8* buffer, size_t size, size_t offset) const override; + virtual size_t Write(const u8* buffer, size_t size, size_t offset) override; + virtual size_t GetSize() const override; +}; + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_aes_xts_storage.cpp b/src/core/file_sys/fssystem/fssystem_aes_xts_storage.cpp new file mode 100644 index 000000000..022424229 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_aes_xts_storage.cpp @@ -0,0 +1,112 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "common/alignment.h" +#include "common/swap.h" +#include "core/file_sys/errors.h" +#include "core/file_sys/fssystem/fssystem_aes_xts_storage.h" +#include "core/file_sys/fssystem/fssystem_pooled_buffer.h" +#include "core/file_sys/fssystem/fssystem_utility.h" + +namespace FileSys { + +void AesXtsStorage::MakeAesXtsIv(void* dst, size_t dst_size, s64 offset, size_t block_size) { + ASSERT(dst != nullptr); + ASSERT(dst_size == IvSize); + ASSERT(offset >= 0); + + const uintptr_t out_addr = reinterpret_cast<uintptr_t>(dst); + + *reinterpret_cast<s64_be*>(out_addr + sizeof(s64)) = offset / block_size; +} + +AesXtsStorage::AesXtsStorage(VirtualFile base, const void* key1, const void* key2, size_t key_size, + const void* iv, size_t iv_size, size_t block_size) + : m_base_storage(std::move(base)), m_block_size(block_size), m_mutex() { + ASSERT(m_base_storage != nullptr); + ASSERT(key1 != nullptr); + ASSERT(key2 != nullptr); + ASSERT(iv != nullptr); + ASSERT(key_size == KeySize); + ASSERT(iv_size == IvSize); + ASSERT(Common::IsAligned(m_block_size, AesBlockSize)); + + std::memcpy(m_key.data() + 0, key1, KeySize); + std::memcpy(m_key.data() + 0x10, key2, KeySize); + std::memcpy(m_iv.data(), iv, IvSize); + + m_cipher.emplace(m_key, Core::Crypto::Mode::XTS); +} + +size_t AesXtsStorage::Read(u8* buffer, size_t size, size_t offset) const { + // Allow zero-size reads. + if (size == 0) { + return size; + } + + // Ensure buffer is valid. + ASSERT(buffer != nullptr); + + // We can only read at block aligned offsets. + ASSERT(Common::IsAligned(offset, AesBlockSize)); + ASSERT(Common::IsAligned(size, AesBlockSize)); + + // Read the data. + m_base_storage->Read(buffer, size, offset); + + // Setup the counter. + std::array<u8, IvSize> ctr; + std::memcpy(ctr.data(), m_iv.data(), IvSize); + AddCounter(ctr.data(), IvSize, offset / m_block_size); + + // Handle any unaligned data before the start. + size_t processed_size = 0; + if ((offset % m_block_size) != 0) { + // Determine the size of the pre-data read. + const size_t skip_size = + static_cast<size_t>(offset - Common::AlignDown(offset, m_block_size)); + const size_t data_size = std::min(size, m_block_size - skip_size); + + // Decrypt into a pooled buffer. + { + PooledBuffer tmp_buf(m_block_size, m_block_size); + ASSERT(tmp_buf.GetSize() >= m_block_size); + + std::memset(tmp_buf.GetBuffer(), 0, skip_size); + std::memcpy(tmp_buf.GetBuffer() + skip_size, buffer, data_size); + + m_cipher->SetIV(ctr); + m_cipher->Transcode(tmp_buf.GetBuffer(), m_block_size, tmp_buf.GetBuffer(), + Core::Crypto::Op::Decrypt); + + std::memcpy(buffer, tmp_buf.GetBuffer() + skip_size, data_size); + } + + AddCounter(ctr.data(), IvSize, 1); + processed_size += data_size; + ASSERT(processed_size == std::min(size, m_block_size - skip_size)); + } + + // Decrypt aligned chunks. + char* cur = reinterpret_cast<char*>(buffer) + processed_size; + size_t remaining = size - processed_size; + while (remaining > 0) { + const size_t cur_size = std::min(m_block_size, remaining); + + m_cipher->SetIV(ctr); + m_cipher->Transcode(cur, cur_size, cur, Core::Crypto::Op::Decrypt); + + remaining -= cur_size; + cur += cur_size; + + AddCounter(ctr.data(), IvSize, 1); + } + + return size; +} + +size_t AesXtsStorage::GetSize() const { + return m_base_storage->GetSize(); +} + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_aes_xts_storage.h b/src/core/file_sys/fssystem/fssystem_aes_xts_storage.h new file mode 100644 index 000000000..2307a2659 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_aes_xts_storage.h @@ -0,0 +1,42 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include <optional> + +#include "core/crypto/aes_util.h" +#include "core/crypto/key_manager.h" +#include "core/file_sys/fssystem/fs_i_storage.h" + +namespace FileSys { + +class AesXtsStorage : public IReadOnlyStorage { + YUZU_NON_COPYABLE(AesXtsStorage); + YUZU_NON_MOVEABLE(AesXtsStorage); + +public: + static constexpr size_t AesBlockSize = 0x10; + static constexpr size_t KeySize = 0x20; + static constexpr size_t IvSize = 0x10; + +private: + VirtualFile m_base_storage; + std::array<u8, KeySize> m_key; + std::array<u8, IvSize> m_iv; + const size_t m_block_size; + std::mutex m_mutex; + mutable std::optional<Core::Crypto::AESCipher<Core::Crypto::Key256>> m_cipher; + +public: + static void MakeAesXtsIv(void* dst, size_t dst_size, s64 offset, size_t block_size); + +public: + AesXtsStorage(VirtualFile base, const void* key1, const void* key2, size_t key_size, + const void* iv, size_t iv_size, size_t block_size); + + virtual size_t Read(u8* buffer, size_t size, size_t offset) const override; + virtual size_t GetSize() const override; +}; + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_alignment_matching_storage.h b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage.h new file mode 100644 index 000000000..27d34fd17 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage.h @@ -0,0 +1,146 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "common/alignment.h" +#include "core/file_sys/errors.h" +#include "core/file_sys/fssystem/fs_i_storage.h" +#include "core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h" +#include "core/file_sys/fssystem/fssystem_pooled_buffer.h" + +namespace FileSys { + +template <size_t DataAlign_, size_t BufferAlign_> +class AlignmentMatchingStorage : public IStorage { + YUZU_NON_COPYABLE(AlignmentMatchingStorage); + YUZU_NON_MOVEABLE(AlignmentMatchingStorage); + +public: + static constexpr size_t DataAlign = DataAlign_; + static constexpr size_t BufferAlign = BufferAlign_; + + static constexpr size_t DataAlignMax = 0x200; + static_assert(DataAlign <= DataAlignMax); + static_assert(Common::IsPowerOfTwo(DataAlign)); + static_assert(Common::IsPowerOfTwo(BufferAlign)); + +private: + VirtualFile m_base_storage; + s64 m_base_storage_size; + +public: + explicit AlignmentMatchingStorage(VirtualFile bs) : m_base_storage(std::move(bs)) {} + + virtual size_t Read(u8* buffer, size_t size, size_t offset) const override { + // Allocate a work buffer on stack. + alignas(DataAlignMax) char work_buf[DataAlign]; + + // Succeed if zero size. + if (size == 0) { + return size; + } + + // Validate arguments. + ASSERT(buffer != nullptr); + + s64 bs_size = this->GetSize(); + ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(offset, size, bs_size))); + + return AlignmentMatchingStorageImpl::Read(m_base_storage, work_buf, sizeof(work_buf), + DataAlign, BufferAlign, offset, buffer, size); + } + + virtual size_t Write(const u8* buffer, size_t size, size_t offset) override { + // Allocate a work buffer on stack. + alignas(DataAlignMax) char work_buf[DataAlign]; + + // Succeed if zero size. + if (size == 0) { + return size; + } + + // Validate arguments. + ASSERT(buffer != nullptr); + + s64 bs_size = this->GetSize(); + ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(offset, size, bs_size))); + + return AlignmentMatchingStorageImpl::Write(m_base_storage, work_buf, sizeof(work_buf), + DataAlign, BufferAlign, offset, buffer, size); + } + + virtual size_t GetSize() const override { + return m_base_storage->GetSize(); + } +}; + +template <size_t BufferAlign_> +class AlignmentMatchingStoragePooledBuffer : public IStorage { + YUZU_NON_COPYABLE(AlignmentMatchingStoragePooledBuffer); + YUZU_NON_MOVEABLE(AlignmentMatchingStoragePooledBuffer); + +public: + static constexpr size_t BufferAlign = BufferAlign_; + + static_assert(Common::IsPowerOfTwo(BufferAlign)); + +private: + VirtualFile m_base_storage; + s64 m_base_storage_size; + size_t m_data_align; + +public: + explicit AlignmentMatchingStoragePooledBuffer(VirtualFile bs, size_t da) + : m_base_storage(std::move(bs)), m_data_align(da) { + ASSERT(Common::IsPowerOfTwo(da)); + } + + virtual size_t Read(u8* buffer, size_t size, size_t offset) const override { + // Succeed if zero size. + if (size == 0) { + return size; + } + + // Validate arguments. + ASSERT(buffer != nullptr); + + s64 bs_size = this->GetSize(); + ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(offset, size, bs_size))); + + // Allocate a pooled buffer. + PooledBuffer pooled_buffer; + pooled_buffer.AllocateParticularlyLarge(m_data_align, m_data_align); + + return AlignmentMatchingStorageImpl::Read(m_base_storage, pooled_buffer.GetBuffer(), + pooled_buffer.GetSize(), m_data_align, + BufferAlign, offset, buffer, size); + } + + virtual size_t Write(const u8* buffer, size_t size, size_t offset) override { + // Succeed if zero size. + if (size == 0) { + return size; + } + + // Validate arguments. + ASSERT(buffer != nullptr); + + s64 bs_size = this->GetSize(); + ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(offset, size, bs_size))); + + // Allocate a pooled buffer. + PooledBuffer pooled_buffer; + pooled_buffer.AllocateParticularlyLarge(m_data_align, m_data_align); + + return AlignmentMatchingStorageImpl::Write(m_base_storage, pooled_buffer.GetBuffer(), + pooled_buffer.GetSize(), m_data_align, + BufferAlign, offset, buffer, size); + } + + virtual size_t GetSize() const override { + return m_base_storage->GetSize(); + } +}; + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.cpp b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.cpp new file mode 100644 index 000000000..641c888ae --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.cpp @@ -0,0 +1,204 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "common/alignment.h" +#include "core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h" + +namespace FileSys { + +namespace { + +template <typename T> +constexpr size_t GetRoundDownDifference(T x, size_t align) { + return static_cast<size_t>(x - Common::AlignDown(x, align)); +} + +template <typename T> +constexpr size_t GetRoundUpDifference(T x, size_t align) { + return static_cast<size_t>(Common::AlignUp(x, align) - x); +} + +template <typename T> +size_t GetRoundUpDifference(T* x, size_t align) { + return GetRoundUpDifference(reinterpret_cast<uintptr_t>(x), align); +} + +} // namespace + +size_t AlignmentMatchingStorageImpl::Read(VirtualFile base_storage, char* work_buf, + size_t work_buf_size, size_t data_alignment, + size_t buffer_alignment, s64 offset, u8* buffer, + size_t size) { + // Check preconditions. + ASSERT(work_buf_size >= data_alignment); + + // Succeed if zero size. + if (size == 0) { + return size; + } + + // Validate arguments. + ASSERT(buffer != nullptr); + + // Determine extents. + u8* aligned_core_buffer; + s64 core_offset; + size_t core_size; + size_t buffer_gap; + size_t offset_gap; + s64 covered_offset; + + const size_t offset_round_up_difference = GetRoundUpDifference(offset, data_alignment); + if (Common::IsAligned(reinterpret_cast<uintptr_t>(buffer) + offset_round_up_difference, + buffer_alignment)) { + aligned_core_buffer = buffer + offset_round_up_difference; + + core_offset = Common::AlignUp(offset, data_alignment); + core_size = (size < offset_round_up_difference) + ? 0 + : Common::AlignDown(size - offset_round_up_difference, data_alignment); + buffer_gap = 0; + offset_gap = 0; + + covered_offset = core_size > 0 ? core_offset : offset; + } else { + const size_t buffer_round_up_difference = GetRoundUpDifference(buffer, buffer_alignment); + + aligned_core_buffer = buffer + buffer_round_up_difference; + + core_offset = Common::AlignDown(offset, data_alignment); + core_size = (size < buffer_round_up_difference) + ? 0 + : Common::AlignDown(size - buffer_round_up_difference, data_alignment); + buffer_gap = buffer_round_up_difference; + offset_gap = GetRoundDownDifference(offset, data_alignment); + + covered_offset = offset; + } + + // Read the core portion. + if (core_size > 0) { + base_storage->Read(aligned_core_buffer, core_size, core_offset); + + if (offset_gap != 0 || buffer_gap != 0) { + std::memmove(aligned_core_buffer - buffer_gap, aligned_core_buffer + offset_gap, + core_size - offset_gap); + core_size -= offset_gap; + } + } + + // Handle the head portion. + if (offset < covered_offset) { + const s64 head_offset = Common::AlignDown(offset, data_alignment); + const size_t head_size = static_cast<size_t>(covered_offset - offset); + + ASSERT(GetRoundDownDifference(offset, data_alignment) + head_size <= work_buf_size); + + base_storage->Read(reinterpret_cast<u8*>(work_buf), data_alignment, head_offset); + std::memcpy(buffer, work_buf + GetRoundDownDifference(offset, data_alignment), head_size); + } + + // Handle the tail portion. + s64 tail_offset = covered_offset + core_size; + size_t remaining_tail_size = static_cast<size_t>((offset + size) - tail_offset); + while (remaining_tail_size > 0) { + const auto aligned_tail_offset = Common::AlignDown(tail_offset, data_alignment); + const auto cur_size = + std::min(static_cast<size_t>(aligned_tail_offset + data_alignment - tail_offset), + remaining_tail_size); + base_storage->Read(reinterpret_cast<u8*>(work_buf), data_alignment, aligned_tail_offset); + + ASSERT((tail_offset - offset) + cur_size <= size); + ASSERT((tail_offset - aligned_tail_offset) + cur_size <= data_alignment); + std::memcpy(reinterpret_cast<char*>(buffer) + (tail_offset - offset), + work_buf + (tail_offset - aligned_tail_offset), cur_size); + + remaining_tail_size -= cur_size; + tail_offset += cur_size; + } + + return size; +} + +size_t AlignmentMatchingStorageImpl::Write(VirtualFile base_storage, char* work_buf, + size_t work_buf_size, size_t data_alignment, + size_t buffer_alignment, s64 offset, const u8* buffer, + size_t size) { + // Check preconditions. + ASSERT(work_buf_size >= data_alignment); + + // Succeed if zero size. + if (size == 0) { + return size; + } + + // Validate arguments. + ASSERT(buffer != nullptr); + + // Determine extents. + const u8* aligned_core_buffer; + s64 core_offset; + size_t core_size; + s64 covered_offset; + + const size_t offset_round_up_difference = GetRoundUpDifference(offset, data_alignment); + if (Common::IsAligned(reinterpret_cast<uintptr_t>(buffer) + offset_round_up_difference, + buffer_alignment)) { + aligned_core_buffer = buffer + offset_round_up_difference; + + core_offset = Common::AlignUp(offset, data_alignment); + core_size = (size < offset_round_up_difference) + ? 0 + : Common::AlignDown(size - offset_round_up_difference, data_alignment); + + covered_offset = core_size > 0 ? core_offset : offset; + } else { + aligned_core_buffer = nullptr; + + core_offset = Common::AlignDown(offset, data_alignment); + core_size = 0; + + covered_offset = offset; + } + + // Write the core portion. + if (core_size > 0) { + base_storage->Write(aligned_core_buffer, core_size, core_offset); + } + + // Handle the head portion. + if (offset < covered_offset) { + const s64 head_offset = Common::AlignDown(offset, data_alignment); + const size_t head_size = static_cast<size_t>(covered_offset - offset); + + ASSERT((offset - head_offset) + head_size <= data_alignment); + + base_storage->Read(reinterpret_cast<u8*>(work_buf), data_alignment, head_offset); + std::memcpy(work_buf + (offset - head_offset), buffer, head_size); + base_storage->Write(reinterpret_cast<u8*>(work_buf), data_alignment, head_offset); + } + + // Handle the tail portion. + s64 tail_offset = covered_offset + core_size; + size_t remaining_tail_size = static_cast<size_t>((offset + size) - tail_offset); + while (remaining_tail_size > 0) { + ASSERT(static_cast<size_t>(tail_offset - offset) < size); + + const auto aligned_tail_offset = Common::AlignDown(tail_offset, data_alignment); + const auto cur_size = + std::min(static_cast<size_t>(aligned_tail_offset + data_alignment - tail_offset), + remaining_tail_size); + + base_storage->Read(reinterpret_cast<u8*>(work_buf), data_alignment, aligned_tail_offset); + std::memcpy(work_buf + GetRoundDownDifference(tail_offset, data_alignment), + buffer + (tail_offset - offset), cur_size); + base_storage->Write(reinterpret_cast<u8*>(work_buf), data_alignment, aligned_tail_offset); + + remaining_tail_size -= cur_size; + tail_offset += cur_size; + } + + return size; +} + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h new file mode 100644 index 000000000..4a05b0e88 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h @@ -0,0 +1,21 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "core/file_sys/errors.h" +#include "core/file_sys/fssystem/fs_i_storage.h" + +namespace FileSys { + +class AlignmentMatchingStorageImpl { +public: + static size_t Read(VirtualFile base_storage, char* work_buf, size_t work_buf_size, + size_t data_alignment, size_t buffer_alignment, s64 offset, u8* buffer, + size_t size); + static size_t Write(VirtualFile base_storage, char* work_buf, size_t work_buf_size, + size_t data_alignment, size_t buffer_alignment, s64 offset, + const u8* buffer, size_t size); +}; + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_bucket_tree.cpp b/src/core/file_sys/fssystem/fssystem_bucket_tree.cpp new file mode 100644 index 000000000..699a366f1 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_bucket_tree.cpp @@ -0,0 +1,598 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "core/file_sys/errors.h" +#include "core/file_sys/fssystem/fssystem_bucket_tree.h" +#include "core/file_sys/fssystem/fssystem_bucket_tree_utils.h" +#include "core/file_sys/fssystem/fssystem_pooled_buffer.h" + +namespace FileSys { + +namespace { + +using Node = impl::BucketTreeNode<const s64*>; +static_assert(sizeof(Node) == sizeof(BucketTree::NodeHeader)); +static_assert(std::is_trivial_v<Node>); + +constexpr inline s32 NodeHeaderSize = sizeof(BucketTree::NodeHeader); + +class StorageNode { +private: + class Offset { + public: + using difference_type = s64; + + private: + s64 m_offset; + s32 m_stride; + + public: + constexpr Offset(s64 offset, s32 stride) : m_offset(offset), m_stride(stride) {} + + constexpr Offset& operator++() { + m_offset += m_stride; + return *this; + } + constexpr Offset operator++(int) { + Offset ret(*this); + m_offset += m_stride; + return ret; + } + + constexpr Offset& operator--() { + m_offset -= m_stride; + return *this; + } + constexpr Offset operator--(int) { + Offset ret(*this); + m_offset -= m_stride; + return ret; + } + + constexpr difference_type operator-(const Offset& rhs) const { + return (m_offset - rhs.m_offset) / m_stride; + } + + constexpr Offset operator+(difference_type ofs) const { + return Offset(m_offset + ofs * m_stride, m_stride); + } + constexpr Offset operator-(difference_type ofs) const { + return Offset(m_offset - ofs * m_stride, m_stride); + } + + constexpr Offset& operator+=(difference_type ofs) { + m_offset += ofs * m_stride; + return *this; + } + constexpr Offset& operator-=(difference_type ofs) { + m_offset -= ofs * m_stride; + return *this; + } + + constexpr bool operator==(const Offset& rhs) const { + return m_offset == rhs.m_offset; + } + constexpr bool operator!=(const Offset& rhs) const { + return m_offset != rhs.m_offset; + } + + constexpr s64 Get() const { + return m_offset; + } + }; + +private: + const Offset m_start; + const s32 m_count; + s32 m_index; + +public: + StorageNode(size_t size, s32 count) + : m_start(NodeHeaderSize, static_cast<s32>(size)), m_count(count), m_index(-1) {} + StorageNode(s64 ofs, size_t size, s32 count) + : m_start(NodeHeaderSize + ofs, static_cast<s32>(size)), m_count(count), m_index(-1) {} + + s32 GetIndex() const { + return m_index; + } + + void Find(const char* buffer, s64 virtual_address) { + s32 end = m_count; + auto pos = m_start; + + while (end > 0) { + auto half = end / 2; + auto mid = pos + half; + + s64 offset = 0; + std::memcpy(std::addressof(offset), buffer + mid.Get(), sizeof(s64)); + + if (offset <= virtual_address) { + pos = mid + 1; + end -= half + 1; + } else { + end = half; + } + } + + m_index = static_cast<s32>(pos - m_start) - 1; + } + + Result Find(VirtualFile storage, s64 virtual_address) { + s32 end = m_count; + auto pos = m_start; + + while (end > 0) { + auto half = end / 2; + auto mid = pos + half; + + s64 offset = 0; + storage->ReadObject(std::addressof(offset), mid.Get()); + + if (offset <= virtual_address) { + pos = mid + 1; + end -= half + 1; + } else { + end = half; + } + } + + m_index = static_cast<s32>(pos - m_start) - 1; + R_SUCCEED(); + } +}; + +} // namespace + +void BucketTree::Header::Format(s32 entry_count_) { + ASSERT(entry_count_ >= 0); + + this->magic = Magic; + this->version = Version; + this->entry_count = entry_count_; + this->reserved = 0; +} + +Result BucketTree::Header::Verify() const { + R_UNLESS(this->magic == Magic, ResultInvalidBucketTreeSignature); + R_UNLESS(this->entry_count >= 0, ResultInvalidBucketTreeEntryCount); + R_UNLESS(this->version <= Version, ResultUnsupportedVersion); + R_SUCCEED(); +} + +Result BucketTree::NodeHeader::Verify(s32 node_index, size_t node_size, size_t entry_size) const { + R_UNLESS(this->index == node_index, ResultInvalidBucketTreeNodeIndex); + R_UNLESS(entry_size != 0 && node_size >= entry_size + NodeHeaderSize, ResultInvalidSize); + + const size_t max_entry_count = (node_size - NodeHeaderSize) / entry_size; + R_UNLESS(this->count > 0 && static_cast<size_t>(this->count) <= max_entry_count, + ResultInvalidBucketTreeNodeEntryCount); + R_UNLESS(this->offset >= 0, ResultInvalidBucketTreeNodeOffset); + + R_SUCCEED(); +} + +Result BucketTree::Initialize(VirtualFile node_storage, VirtualFile entry_storage, size_t node_size, + size_t entry_size, s32 entry_count) { + // Validate preconditions. + ASSERT(entry_size >= sizeof(s64)); + ASSERT(node_size >= entry_size + sizeof(NodeHeader)); + ASSERT(NodeSizeMin <= node_size && node_size <= NodeSizeMax); + ASSERT(Common::IsPowerOfTwo(node_size)); + ASSERT(!this->IsInitialized()); + + // Ensure valid entry count. + R_UNLESS(entry_count > 0, ResultInvalidArgument); + + // Allocate node. + R_UNLESS(m_node_l1.Allocate(node_size), ResultBufferAllocationFailed); + ON_RESULT_FAILURE { + m_node_l1.Free(node_size); + }; + + // Read node. + node_storage->Read(reinterpret_cast<u8*>(m_node_l1.Get()), node_size); + + // Verify node. + R_TRY(m_node_l1->Verify(0, node_size, sizeof(s64))); + + // Validate offsets. + const auto offset_count = GetOffsetCount(node_size); + const auto entry_set_count = GetEntrySetCount(node_size, entry_size, entry_count); + const auto* const node = m_node_l1.Get<Node>(); + + s64 start_offset; + if (offset_count < entry_set_count && node->GetCount() < offset_count) { + start_offset = *node->GetEnd(); + } else { + start_offset = *node->GetBegin(); + } + const auto end_offset = node->GetEndOffset(); + + R_UNLESS(0 <= start_offset && start_offset <= node->GetBeginOffset(), + ResultInvalidBucketTreeEntryOffset); + R_UNLESS(start_offset < end_offset, ResultInvalidBucketTreeEntryOffset); + + // Set member variables. + m_node_storage = node_storage; + m_entry_storage = entry_storage; + m_node_size = node_size; + m_entry_size = entry_size; + m_entry_count = entry_count; + m_offset_count = offset_count; + m_entry_set_count = entry_set_count; + + m_offset_cache.offsets.start_offset = start_offset; + m_offset_cache.offsets.end_offset = end_offset; + m_offset_cache.is_initialized = true; + + // Cancel guard. + R_SUCCEED(); +} + +void BucketTree::Initialize(size_t node_size, s64 end_offset) { + ASSERT(NodeSizeMin <= node_size && node_size <= NodeSizeMax); + ASSERT(Common::IsPowerOfTwo(node_size)); + ASSERT(end_offset > 0); + ASSERT(!this->IsInitialized()); + + m_node_size = node_size; + + m_offset_cache.offsets.start_offset = 0; + m_offset_cache.offsets.end_offset = end_offset; + m_offset_cache.is_initialized = true; +} + +void BucketTree::Finalize() { + if (this->IsInitialized()) { + m_node_storage = VirtualFile(); + m_entry_storage = VirtualFile(); + m_node_l1.Free(m_node_size); + m_node_size = 0; + m_entry_size = 0; + m_entry_count = 0; + m_offset_count = 0; + m_entry_set_count = 0; + + m_offset_cache.offsets.start_offset = 0; + m_offset_cache.offsets.end_offset = 0; + m_offset_cache.is_initialized = false; + } +} + +Result BucketTree::Find(Visitor* visitor, s64 virtual_address) { + ASSERT(visitor != nullptr); + ASSERT(this->IsInitialized()); + + R_UNLESS(virtual_address >= 0, ResultInvalidOffset); + R_UNLESS(!this->IsEmpty(), ResultOutOfRange); + + BucketTree::Offsets offsets; + R_TRY(this->GetOffsets(std::addressof(offsets))); + + R_TRY(visitor->Initialize(this, offsets)); + + R_RETURN(visitor->Find(virtual_address)); +} + +Result BucketTree::InvalidateCache() { + // Reset our offsets. + m_offset_cache.is_initialized = false; + + R_SUCCEED(); +} + +Result BucketTree::EnsureOffsetCache() { + // If we already have an offset cache, we're good. + R_SUCCEED_IF(m_offset_cache.is_initialized); + + // Acquire exclusive right to edit the offset cache. + std::scoped_lock lk(m_offset_cache.mutex); + + // Check again, to be sure. + R_SUCCEED_IF(m_offset_cache.is_initialized); + + // Read/verify L1. + m_node_storage->Read(reinterpret_cast<u8*>(m_node_l1.Get()), m_node_size); + R_TRY(m_node_l1->Verify(0, m_node_size, sizeof(s64))); + + // Get the node. + auto* const node = m_node_l1.Get<Node>(); + + s64 start_offset; + if (m_offset_count < m_entry_set_count && node->GetCount() < m_offset_count) { + start_offset = *node->GetEnd(); + } else { + start_offset = *node->GetBegin(); + } + const auto end_offset = node->GetEndOffset(); + + R_UNLESS(0 <= start_offset && start_offset <= node->GetBeginOffset(), + ResultInvalidBucketTreeEntryOffset); + R_UNLESS(start_offset < end_offset, ResultInvalidBucketTreeEntryOffset); + + m_offset_cache.offsets.start_offset = start_offset; + m_offset_cache.offsets.end_offset = end_offset; + m_offset_cache.is_initialized = true; + + R_SUCCEED(); +} + +Result BucketTree::Visitor::Initialize(const BucketTree* tree, const BucketTree::Offsets& offsets) { + ASSERT(tree != nullptr); + ASSERT(m_tree == nullptr || m_tree == tree); + + if (m_entry == nullptr) { + m_entry = ::operator new(tree->m_entry_size); + R_UNLESS(m_entry != nullptr, ResultBufferAllocationFailed); + + m_tree = tree; + m_offsets = offsets; + } + + R_SUCCEED(); +} + +Result BucketTree::Visitor::MoveNext() { + R_UNLESS(this->IsValid(), ResultOutOfRange); + + // Invalidate our index, and read the header for the next index. + auto entry_index = m_entry_index + 1; + if (entry_index == m_entry_set.info.count) { + const auto entry_set_index = m_entry_set.info.index + 1; + R_UNLESS(entry_set_index < m_entry_set_count, ResultOutOfRange); + + m_entry_index = -1; + + const auto end = m_entry_set.info.end; + + const auto entry_set_size = m_tree->m_node_size; + const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size); + + m_tree->m_entry_storage->ReadObject(std::addressof(m_entry_set), entry_set_offset); + R_TRY(m_entry_set.header.Verify(entry_set_index, entry_set_size, m_tree->m_entry_size)); + + R_UNLESS(m_entry_set.info.start == end && m_entry_set.info.start < m_entry_set.info.end, + ResultInvalidBucketTreeEntrySetOffset); + + entry_index = 0; + } else { + m_entry_index = 1; + } + + // Read the new entry. + const auto entry_size = m_tree->m_entry_size; + const auto entry_offset = impl::GetBucketTreeEntryOffset( + m_entry_set.info.index, m_tree->m_node_size, entry_size, entry_index); + m_tree->m_entry_storage->Read(reinterpret_cast<u8*>(m_entry), entry_size, entry_offset); + + // Note that we changed index. + m_entry_index = entry_index; + R_SUCCEED(); +} + +Result BucketTree::Visitor::MovePrevious() { + R_UNLESS(this->IsValid(), ResultOutOfRange); + + // Invalidate our index, and read the header for the previous index. + auto entry_index = m_entry_index; + if (entry_index == 0) { + R_UNLESS(m_entry_set.info.index > 0, ResultOutOfRange); + + m_entry_index = -1; + + const auto start = m_entry_set.info.start; + + const auto entry_set_size = m_tree->m_node_size; + const auto entry_set_index = m_entry_set.info.index - 1; + const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size); + + m_tree->m_entry_storage->ReadObject(std::addressof(m_entry_set), entry_set_offset); + R_TRY(m_entry_set.header.Verify(entry_set_index, entry_set_size, m_tree->m_entry_size)); + + R_UNLESS(m_entry_set.info.end == start && m_entry_set.info.start < m_entry_set.info.end, + ResultInvalidBucketTreeEntrySetOffset); + + entry_index = m_entry_set.info.count; + } else { + m_entry_index = -1; + } + + --entry_index; + + // Read the new entry. + const auto entry_size = m_tree->m_entry_size; + const auto entry_offset = impl::GetBucketTreeEntryOffset( + m_entry_set.info.index, m_tree->m_node_size, entry_size, entry_index); + m_tree->m_entry_storage->Read(reinterpret_cast<u8*>(m_entry), entry_size, entry_offset); + + // Note that we changed index. + m_entry_index = entry_index; + R_SUCCEED(); +} + +Result BucketTree::Visitor::Find(s64 virtual_address) { + ASSERT(m_tree != nullptr); + + // Get the node. + const auto* const node = m_tree->m_node_l1.Get<Node>(); + R_UNLESS(virtual_address < node->GetEndOffset(), ResultOutOfRange); + + // Get the entry set index. + s32 entry_set_index = -1; + if (m_tree->IsExistOffsetL2OnL1() && virtual_address < node->GetBeginOffset()) { + const auto start = node->GetEnd(); + const auto end = node->GetBegin() + m_tree->m_offset_count; + + auto pos = std::upper_bound(start, end, virtual_address); + R_UNLESS(start < pos, ResultOutOfRange); + --pos; + + entry_set_index = static_cast<s32>(pos - start); + } else { + const auto start = node->GetBegin(); + const auto end = node->GetEnd(); + + auto pos = std::upper_bound(start, end, virtual_address); + R_UNLESS(start < pos, ResultOutOfRange); + --pos; + + if (m_tree->IsExistL2()) { + const auto node_index = static_cast<s32>(pos - start); + R_UNLESS(0 <= node_index && node_index < m_tree->m_offset_count, + ResultInvalidBucketTreeNodeOffset); + + R_TRY(this->FindEntrySet(std::addressof(entry_set_index), virtual_address, node_index)); + } else { + entry_set_index = static_cast<s32>(pos - start); + } + } + + // Validate the entry set index. + R_UNLESS(0 <= entry_set_index && entry_set_index < m_tree->m_entry_set_count, + ResultInvalidBucketTreeNodeOffset); + + // Find the entry. + R_TRY(this->FindEntry(virtual_address, entry_set_index)); + + // Set count. + m_entry_set_count = m_tree->m_entry_set_count; + R_SUCCEED(); +} + +Result BucketTree::Visitor::FindEntrySet(s32* out_index, s64 virtual_address, s32 node_index) { + const auto node_size = m_tree->m_node_size; + + PooledBuffer pool(node_size, 1); + if (node_size <= pool.GetSize()) { + R_RETURN( + this->FindEntrySetWithBuffer(out_index, virtual_address, node_index, pool.GetBuffer())); + } else { + pool.Deallocate(); + R_RETURN(this->FindEntrySetWithoutBuffer(out_index, virtual_address, node_index)); + } +} + +Result BucketTree::Visitor::FindEntrySetWithBuffer(s32* out_index, s64 virtual_address, + s32 node_index, char* buffer) { + // Calculate node extents. + const auto node_size = m_tree->m_node_size; + const auto node_offset = (node_index + 1) * static_cast<s64>(node_size); + VirtualFile storage = m_tree->m_node_storage; + + // Read the node. + storage->Read(reinterpret_cast<u8*>(buffer), node_size, node_offset); + + // Validate the header. + NodeHeader header; + std::memcpy(std::addressof(header), buffer, NodeHeaderSize); + R_TRY(header.Verify(node_index, node_size, sizeof(s64))); + + // Create the node, and find. + StorageNode node(sizeof(s64), header.count); + node.Find(buffer, virtual_address); + R_UNLESS(node.GetIndex() >= 0, ResultInvalidBucketTreeVirtualOffset); + + // Return the index. + *out_index = static_cast<s32>(m_tree->GetEntrySetIndex(header.index, node.GetIndex())); + R_SUCCEED(); +} + +Result BucketTree::Visitor::FindEntrySetWithoutBuffer(s32* out_index, s64 virtual_address, + s32 node_index) { + // Calculate node extents. + const auto node_size = m_tree->m_node_size; + const auto node_offset = (node_index + 1) * static_cast<s64>(node_size); + VirtualFile storage = m_tree->m_node_storage; + + // Read and validate the header. + NodeHeader header; + storage->ReadObject(std::addressof(header), node_offset); + R_TRY(header.Verify(node_index, node_size, sizeof(s64))); + + // Create the node, and find. + StorageNode node(node_offset, sizeof(s64), header.count); + R_TRY(node.Find(storage, virtual_address)); + R_UNLESS(node.GetIndex() >= 0, ResultOutOfRange); + + // Return the index. + *out_index = static_cast<s32>(m_tree->GetEntrySetIndex(header.index, node.GetIndex())); + R_SUCCEED(); +} + +Result BucketTree::Visitor::FindEntry(s64 virtual_address, s32 entry_set_index) { + const auto entry_set_size = m_tree->m_node_size; + + PooledBuffer pool(entry_set_size, 1); + if (entry_set_size <= pool.GetSize()) { + R_RETURN(this->FindEntryWithBuffer(virtual_address, entry_set_index, pool.GetBuffer())); + } else { + pool.Deallocate(); + R_RETURN(this->FindEntryWithoutBuffer(virtual_address, entry_set_index)); + } +} + +Result BucketTree::Visitor::FindEntryWithBuffer(s64 virtual_address, s32 entry_set_index, + char* buffer) { + // Calculate entry set extents. + const auto entry_size = m_tree->m_entry_size; + const auto entry_set_size = m_tree->m_node_size; + const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size); + VirtualFile storage = m_tree->m_entry_storage; + + // Read the entry set. + storage->Read(reinterpret_cast<u8*>(buffer), entry_set_size, entry_set_offset); + + // Validate the entry_set. + EntrySetHeader entry_set; + std::memcpy(std::addressof(entry_set), buffer, sizeof(EntrySetHeader)); + R_TRY(entry_set.header.Verify(entry_set_index, entry_set_size, entry_size)); + + // Create the node, and find. + StorageNode node(entry_size, entry_set.info.count); + node.Find(buffer, virtual_address); + R_UNLESS(node.GetIndex() >= 0, ResultOutOfRange); + + // Copy the data into entry. + const auto entry_index = node.GetIndex(); + const auto entry_offset = impl::GetBucketTreeEntryOffset(0, entry_size, entry_index); + std::memcpy(m_entry, buffer + entry_offset, entry_size); + + // Set our entry set/index. + m_entry_set = entry_set; + m_entry_index = entry_index; + + R_SUCCEED(); +} + +Result BucketTree::Visitor::FindEntryWithoutBuffer(s64 virtual_address, s32 entry_set_index) { + // Calculate entry set extents. + const auto entry_size = m_tree->m_entry_size; + const auto entry_set_size = m_tree->m_node_size; + const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size); + VirtualFile storage = m_tree->m_entry_storage; + + // Read and validate the entry_set. + EntrySetHeader entry_set; + storage->ReadObject(std::addressof(entry_set), entry_set_offset); + R_TRY(entry_set.header.Verify(entry_set_index, entry_set_size, entry_size)); + + // Create the node, and find. + StorageNode node(entry_set_offset, entry_size, entry_set.info.count); + R_TRY(node.Find(storage, virtual_address)); + R_UNLESS(node.GetIndex() >= 0, ResultOutOfRange); + + // Copy the data into entry. + const auto entry_index = node.GetIndex(); + const auto entry_offset = + impl::GetBucketTreeEntryOffset(entry_set_offset, entry_size, entry_index); + storage->Read(reinterpret_cast<u8*>(m_entry), entry_size, entry_offset); + + // Set our entry set/index. + m_entry_set = entry_set; + m_entry_index = entry_index; + + R_SUCCEED(); +} + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_bucket_tree.h b/src/core/file_sys/fssystem/fssystem_bucket_tree.h new file mode 100644 index 000000000..74a2f7583 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_bucket_tree.h @@ -0,0 +1,417 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include <mutex> + +#include "common/alignment.h" +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "common/literals.h" + +#include "core/file_sys/vfs.h" +#include "core/hle/result.h" + +namespace FileSys { + +using namespace Common::Literals; + +class BucketTree { + YUZU_NON_COPYABLE(BucketTree); + YUZU_NON_MOVEABLE(BucketTree); + +public: + static constexpr u32 Magic = Common::MakeMagic('B', 'K', 'T', 'R'); + static constexpr u32 Version = 1; + + static constexpr size_t NodeSizeMin = 1_KiB; + static constexpr size_t NodeSizeMax = 512_KiB; + +public: + class Visitor; + + struct Header { + u32 magic; + u32 version; + s32 entry_count; + s32 reserved; + + void Format(s32 entry_count); + Result Verify() const; + }; + static_assert(std::is_trivial_v<Header>); + static_assert(sizeof(Header) == 0x10); + + struct NodeHeader { + s32 index; + s32 count; + s64 offset; + + Result Verify(s32 node_index, size_t node_size, size_t entry_size) const; + }; + static_assert(std::is_trivial_v<NodeHeader>); + static_assert(sizeof(NodeHeader) == 0x10); + + struct Offsets { + s64 start_offset; + s64 end_offset; + + constexpr bool IsInclude(s64 offset) const { + return this->start_offset <= offset && offset < this->end_offset; + } + + constexpr bool IsInclude(s64 offset, s64 size) const { + return size > 0 && this->start_offset <= offset && size <= (this->end_offset - offset); + } + }; + static_assert(std::is_trivial_v<Offsets>); + static_assert(sizeof(Offsets) == 0x10); + + struct OffsetCache { + Offsets offsets; + std::mutex mutex; + bool is_initialized; + + OffsetCache() : offsets{-1, -1}, mutex(), is_initialized(false) {} + }; + + class ContinuousReadingInfo { + private: + size_t m_read_size; + s32 m_skip_count; + bool m_done; + + public: + constexpr ContinuousReadingInfo() : m_read_size(), m_skip_count(), m_done() {} + + constexpr void Reset() { + m_read_size = 0; + m_skip_count = 0; + m_done = false; + } + + constexpr void SetSkipCount(s32 count) { + ASSERT(count >= 0); + m_skip_count = count; + } + constexpr s32 GetSkipCount() const { + return m_skip_count; + } + constexpr bool CheckNeedScan() { + return (--m_skip_count) <= 0; + } + + constexpr void Done() { + m_read_size = 0; + m_done = true; + } + constexpr bool IsDone() const { + return m_done; + } + + constexpr void SetReadSize(size_t size) { + m_read_size = size; + } + constexpr size_t GetReadSize() const { + return m_read_size; + } + constexpr bool CanDo() const { + return m_read_size > 0; + } + }; + +private: + class NodeBuffer { + YUZU_NON_COPYABLE(NodeBuffer); + + private: + void* m_header; + + public: + NodeBuffer() : m_header() {} + + ~NodeBuffer() { + ASSERT(m_header == nullptr); + } + + NodeBuffer(NodeBuffer&& rhs) : m_header(rhs.m_header) { + rhs.m_header = nullptr; + } + + NodeBuffer& operator=(NodeBuffer&& rhs) { + if (this != std::addressof(rhs)) { + ASSERT(m_header == nullptr); + + m_header = rhs.m_header; + + rhs.m_header = nullptr; + } + return *this; + } + + bool Allocate(size_t node_size) { + ASSERT(m_header == nullptr); + + m_header = ::operator new(node_size, std::align_val_t{sizeof(s64)}); + + // ASSERT(Common::IsAligned(m_header, sizeof(s64))); + + return m_header != nullptr; + } + + void Free(size_t node_size) { + if (m_header) { + ::operator delete(m_header, std::align_val_t{sizeof(s64)}); + m_header = nullptr; + } + } + + void FillZero(size_t node_size) const { + if (m_header) { + std::memset(m_header, 0, node_size); + } + } + + NodeHeader* Get() const { + return reinterpret_cast<NodeHeader*>(m_header); + } + + NodeHeader* operator->() const { + return this->Get(); + } + + template <typename T> + T* Get() const { + static_assert(std::is_trivial_v<T>); + static_assert(sizeof(T) == sizeof(NodeHeader)); + return reinterpret_cast<T*>(m_header); + } + }; + +private: + static constexpr s32 GetEntryCount(size_t node_size, size_t entry_size) { + return static_cast<s32>((node_size - sizeof(NodeHeader)) / entry_size); + } + + static constexpr s32 GetOffsetCount(size_t node_size) { + return static_cast<s32>((node_size - sizeof(NodeHeader)) / sizeof(s64)); + } + + static constexpr s32 GetEntrySetCount(size_t node_size, size_t entry_size, s32 entry_count) { + const s32 entry_count_per_node = GetEntryCount(node_size, entry_size); + return Common::DivideUp(entry_count, entry_count_per_node); + } + + static constexpr s32 GetNodeL2Count(size_t node_size, size_t entry_size, s32 entry_count) { + const s32 offset_count_per_node = GetOffsetCount(node_size); + const s32 entry_set_count = GetEntrySetCount(node_size, entry_size, entry_count); + + if (entry_set_count <= offset_count_per_node) { + return 0; + } + + const s32 node_l2_count = Common::DivideUp(entry_set_count, offset_count_per_node); + ASSERT(node_l2_count <= offset_count_per_node); + + return Common::DivideUp(entry_set_count - (offset_count_per_node - (node_l2_count - 1)), + offset_count_per_node); + } + +public: + static constexpr s64 QueryHeaderStorageSize() { + return sizeof(Header); + } + + static constexpr s64 QueryNodeStorageSize(size_t node_size, size_t entry_size, + s32 entry_count) { + ASSERT(entry_size >= sizeof(s64)); + ASSERT(node_size >= entry_size + sizeof(NodeHeader)); + ASSERT(NodeSizeMin <= node_size && node_size <= NodeSizeMax); + ASSERT(Common::IsPowerOfTwo(node_size)); + ASSERT(entry_count >= 0); + + if (entry_count <= 0) { + return 0; + } + return (1 + GetNodeL2Count(node_size, entry_size, entry_count)) * + static_cast<s64>(node_size); + } + + static constexpr s64 QueryEntryStorageSize(size_t node_size, size_t entry_size, + s32 entry_count) { + ASSERT(entry_size >= sizeof(s64)); + ASSERT(node_size >= entry_size + sizeof(NodeHeader)); + ASSERT(NodeSizeMin <= node_size && node_size <= NodeSizeMax); + ASSERT(Common::IsPowerOfTwo(node_size)); + ASSERT(entry_count >= 0); + + if (entry_count <= 0) { + return 0; + } + return GetEntrySetCount(node_size, entry_size, entry_count) * static_cast<s64>(node_size); + } + +private: + mutable VirtualFile m_node_storage; + mutable VirtualFile m_entry_storage; + NodeBuffer m_node_l1; + size_t m_node_size; + size_t m_entry_size; + s32 m_entry_count; + s32 m_offset_count; + s32 m_entry_set_count; + OffsetCache m_offset_cache; + +public: + BucketTree() + : m_node_storage(), m_entry_storage(), m_node_l1(), m_node_size(), m_entry_size(), + m_entry_count(), m_offset_count(), m_entry_set_count(), m_offset_cache() {} + ~BucketTree() { + this->Finalize(); + } + + Result Initialize(VirtualFile node_storage, VirtualFile entry_storage, size_t node_size, + size_t entry_size, s32 entry_count); + void Initialize(size_t node_size, s64 end_offset); + void Finalize(); + + bool IsInitialized() const { + return m_node_size > 0; + } + bool IsEmpty() const { + return m_entry_size == 0; + } + + Result Find(Visitor* visitor, s64 virtual_address); + Result InvalidateCache(); + + s32 GetEntryCount() const { + return m_entry_count; + } + + Result GetOffsets(Offsets* out) { + // Ensure we have an offset cache. + R_TRY(this->EnsureOffsetCache()); + + // Set the output. + *out = m_offset_cache.offsets; + R_SUCCEED(); + } + +private: + template <typename EntryType> + struct ContinuousReadingParam { + s64 offset; + size_t size; + NodeHeader entry_set; + s32 entry_index; + Offsets offsets; + EntryType entry; + }; + +private: + template <typename EntryType> + Result ScanContinuousReading(ContinuousReadingInfo* out_info, + const ContinuousReadingParam<EntryType>& param) const; + + bool IsExistL2() const { + return m_offset_count < m_entry_set_count; + } + bool IsExistOffsetL2OnL1() const { + return this->IsExistL2() && m_node_l1->count < m_offset_count; + } + + s64 GetEntrySetIndex(s32 node_index, s32 offset_index) const { + return (m_offset_count - m_node_l1->count) + (m_offset_count * node_index) + offset_index; + } + + Result EnsureOffsetCache(); +}; + +class BucketTree::Visitor { + YUZU_NON_COPYABLE(Visitor); + YUZU_NON_MOVEABLE(Visitor); + +private: + friend class BucketTree; + + union EntrySetHeader { + NodeHeader header; + struct Info { + s32 index; + s32 count; + s64 end; + s64 start; + } info; + static_assert(std::is_trivial_v<Info>); + }; + static_assert(std::is_trivial_v<EntrySetHeader>); + +private: + const BucketTree* m_tree; + BucketTree::Offsets m_offsets; + void* m_entry; + s32 m_entry_index; + s32 m_entry_set_count; + EntrySetHeader m_entry_set; + +public: + constexpr Visitor() + : m_tree(), m_entry(), m_entry_index(-1), m_entry_set_count(), m_entry_set{} {} + ~Visitor() { + if (m_entry != nullptr) { + ::operator delete(m_entry, m_tree->m_entry_size); + m_tree = nullptr; + m_entry = nullptr; + } + } + + bool IsValid() const { + return m_entry_index >= 0; + } + bool CanMoveNext() const { + return this->IsValid() && (m_entry_index + 1 < m_entry_set.info.count || + m_entry_set.info.index + 1 < m_entry_set_count); + } + bool CanMovePrevious() const { + return this->IsValid() && (m_entry_index > 0 || m_entry_set.info.index > 0); + } + + Result MoveNext(); + Result MovePrevious(); + + template <typename EntryType> + Result ScanContinuousReading(ContinuousReadingInfo* out_info, s64 offset, size_t size) const; + + const void* Get() const { + ASSERT(this->IsValid()); + return m_entry; + } + + template <typename T> + const T* Get() const { + ASSERT(this->IsValid()); + return reinterpret_cast<const T*>(m_entry); + } + + const BucketTree* GetTree() const { + return m_tree; + } + +private: + Result Initialize(const BucketTree* tree, const BucketTree::Offsets& offsets); + + Result Find(s64 virtual_address); + + Result FindEntrySet(s32* out_index, s64 virtual_address, s32 node_index); + Result FindEntrySetWithBuffer(s32* out_index, s64 virtual_address, s32 node_index, + char* buffer); + Result FindEntrySetWithoutBuffer(s32* out_index, s64 virtual_address, s32 node_index); + + Result FindEntry(s64 virtual_address, s32 entry_set_index); + Result FindEntryWithBuffer(s64 virtual_address, s32 entry_set_index, char* buffer); + Result FindEntryWithoutBuffer(s64 virtual_address, s32 entry_set_index); +}; + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h b/src/core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h new file mode 100644 index 000000000..030b2916b --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h @@ -0,0 +1,170 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "core/file_sys/errors.h" +#include "core/file_sys/fssystem/fssystem_bucket_tree.h" +#include "core/file_sys/fssystem/fssystem_bucket_tree_utils.h" +#include "core/file_sys/fssystem/fssystem_pooled_buffer.h" + +namespace FileSys { + +template <typename EntryType> +Result BucketTree::ScanContinuousReading(ContinuousReadingInfo* out_info, + const ContinuousReadingParam<EntryType>& param) const { + static_assert(std::is_trivial_v<ContinuousReadingParam<EntryType>>); + + // Validate our preconditions. + ASSERT(this->IsInitialized()); + ASSERT(out_info != nullptr); + ASSERT(m_entry_size == sizeof(EntryType)); + + // Reset the output. + out_info->Reset(); + + // If there's nothing to read, we're done. + R_SUCCEED_IF(param.size == 0); + + // If we're reading a fragment, we're done. + R_SUCCEED_IF(param.entry.IsFragment()); + + // Validate the first entry. + auto entry = param.entry; + auto cur_offset = param.offset; + R_UNLESS(entry.GetVirtualOffset() <= cur_offset, ResultOutOfRange); + + // Create a pooled buffer for our scan. + PooledBuffer pool(m_node_size, 1); + char* buffer = nullptr; + + s64 entry_storage_size = m_entry_storage->GetSize(); + + // Read the node. + if (m_node_size <= pool.GetSize()) { + buffer = pool.GetBuffer(); + const auto ofs = param.entry_set.index * static_cast<s64>(m_node_size); + R_UNLESS(m_node_size + ofs <= static_cast<size_t>(entry_storage_size), + ResultInvalidBucketTreeNodeEntryCount); + + m_entry_storage->Read(reinterpret_cast<u8*>(buffer), m_node_size, ofs); + } + + // Calculate extents. + const auto end_offset = cur_offset + static_cast<s64>(param.size); + s64 phys_offset = entry.GetPhysicalOffset(); + + // Start merge tracking. + s64 merge_size = 0; + s64 readable_size = 0; + bool merged = false; + + // Iterate. + auto entry_index = param.entry_index; + for (const auto entry_count = param.entry_set.count; entry_index < entry_count; ++entry_index) { + // If we're past the end, we're done. + if (end_offset <= cur_offset) { + break; + } + + // Validate the entry offset. + const auto entry_offset = entry.GetVirtualOffset(); + R_UNLESS(entry_offset <= cur_offset, ResultInvalidIndirectEntryOffset); + + // Get the next entry. + EntryType next_entry = {}; + s64 next_entry_offset; + + if (entry_index + 1 < entry_count) { + if (buffer != nullptr) { + const auto ofs = impl::GetBucketTreeEntryOffset(0, m_entry_size, entry_index + 1); + std::memcpy(std::addressof(next_entry), buffer + ofs, m_entry_size); + } else { + const auto ofs = impl::GetBucketTreeEntryOffset(param.entry_set.index, m_node_size, + m_entry_size, entry_index + 1); + m_entry_storage->ReadObject(std::addressof(next_entry), ofs); + } + + next_entry_offset = next_entry.GetVirtualOffset(); + R_UNLESS(param.offsets.IsInclude(next_entry_offset), ResultInvalidIndirectEntryOffset); + } else { + next_entry_offset = param.entry_set.offset; + } + + // Validate the next entry offset. + R_UNLESS(cur_offset < next_entry_offset, ResultInvalidIndirectEntryOffset); + + // Determine the much data there is. + const auto data_size = next_entry_offset - cur_offset; + ASSERT(data_size > 0); + + // Determine how much data we should read. + const auto remaining_size = end_offset - cur_offset; + const size_t read_size = static_cast<size_t>(std::min(data_size, remaining_size)); + ASSERT(read_size <= param.size); + + // Update our merge tracking. + if (entry.IsFragment()) { + // If we can't merge, stop looping. + if (EntryType::FragmentSizeMax <= read_size || remaining_size <= data_size) { + break; + } + + // Otherwise, add the current size to the merge size. + merge_size += read_size; + } else { + // If we can't merge, stop looping. + if (phys_offset != entry.GetPhysicalOffset()) { + break; + } + + // Add the size to the readable amount. + readable_size += merge_size + read_size; + ASSERT(readable_size <= static_cast<s64>(param.size)); + + // Update whether we've merged. + merged |= merge_size > 0; + merge_size = 0; + } + + // Advance. + cur_offset += read_size; + ASSERT(cur_offset <= end_offset); + + phys_offset += next_entry_offset - entry_offset; + entry = next_entry; + } + + // If we merged, set our readable size. + if (merged) { + out_info->SetReadSize(static_cast<size_t>(readable_size)); + } + out_info->SetSkipCount(entry_index - param.entry_index); + + R_SUCCEED(); +} + +template <typename EntryType> +Result BucketTree::Visitor::ScanContinuousReading(ContinuousReadingInfo* out_info, s64 offset, + size_t size) const { + static_assert(std::is_trivial_v<EntryType>); + ASSERT(this->IsValid()); + + // Create our parameters. + ContinuousReadingParam<EntryType> param = { + .offset = offset, + .size = size, + .entry_set = m_entry_set.header, + .entry_index = m_entry_index, + .offsets{}, + .entry{}, + }; + std::memcpy(std::addressof(param.offsets), std::addressof(m_offsets), + sizeof(BucketTree::Offsets)); + std::memcpy(std::addressof(param.entry), m_entry, sizeof(EntryType)); + + // Scan. + R_RETURN(m_tree->ScanContinuousReading<EntryType>(out_info, param)); +} + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_bucket_tree_utils.h b/src/core/file_sys/fssystem/fssystem_bucket_tree_utils.h new file mode 100644 index 000000000..5503613fc --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_bucket_tree_utils.h @@ -0,0 +1,110 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "core/file_sys/fssystem/fssystem_bucket_tree.h" + +namespace FileSys::impl { + +class SafeValue { +public: + static s64 GetInt64(const void* ptr) { + s64 value; + std::memcpy(std::addressof(value), ptr, sizeof(s64)); + return value; + } + + static s64 GetInt64(const s64* ptr) { + return GetInt64(static_cast<const void*>(ptr)); + } + + static s64 GetInt64(const s64& v) { + return GetInt64(std::addressof(v)); + } + + static void SetInt64(void* dst, const void* src) { + std::memcpy(dst, src, sizeof(s64)); + } + + static void SetInt64(void* dst, const s64* src) { + return SetInt64(dst, static_cast<const void*>(src)); + } + + static void SetInt64(void* dst, const s64& v) { + return SetInt64(dst, std::addressof(v)); + } +}; + +template <typename IteratorType> +struct BucketTreeNode { + using Header = BucketTree::NodeHeader; + + Header header; + + s32 GetCount() const { + return this->header.count; + } + + void* GetArray() { + return std::addressof(this->header) + 1; + } + template <typename T> + T* GetArray() { + return reinterpret_cast<T*>(this->GetArray()); + } + const void* GetArray() const { + return std::addressof(this->header) + 1; + } + template <typename T> + const T* GetArray() const { + return reinterpret_cast<const T*>(this->GetArray()); + } + + s64 GetBeginOffset() const { + return *this->GetArray<s64>(); + } + s64 GetEndOffset() const { + return this->header.offset; + } + + IteratorType GetBegin() { + return IteratorType(this->GetArray<s64>()); + } + IteratorType GetEnd() { + return IteratorType(this->GetArray<s64>()) + this->header.count; + } + IteratorType GetBegin() const { + return IteratorType(this->GetArray<s64>()); + } + IteratorType GetEnd() const { + return IteratorType(this->GetArray<s64>()) + this->header.count; + } + + IteratorType GetBegin(size_t entry_size) { + return IteratorType(this->GetArray(), entry_size); + } + IteratorType GetEnd(size_t entry_size) { + return IteratorType(this->GetArray(), entry_size) + this->header.count; + } + IteratorType GetBegin(size_t entry_size) const { + return IteratorType(this->GetArray(), entry_size); + } + IteratorType GetEnd(size_t entry_size) const { + return IteratorType(this->GetArray(), entry_size) + this->header.count; + } +}; + +constexpr inline s64 GetBucketTreeEntryOffset(s64 entry_set_offset, size_t entry_size, + s32 entry_index) { + return entry_set_offset + sizeof(BucketTree::NodeHeader) + + entry_index * static_cast<s64>(entry_size); +} + +constexpr inline s64 GetBucketTreeEntryOffset(s32 entry_set_index, size_t node_size, + size_t entry_size, s32 entry_index) { + return GetBucketTreeEntryOffset(entry_set_index * static_cast<s64>(node_size), entry_size, + entry_index); +} + +} // namespace FileSys::impl diff --git a/src/core/file_sys/fssystem/fssystem_compressed_storage.h b/src/core/file_sys/fssystem/fssystem_compressed_storage.h new file mode 100644 index 000000000..e407add1b --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_compressed_storage.h @@ -0,0 +1,960 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "common/literals.h" + +#include "core/file_sys/errors.h" +#include "core/file_sys/fssystem/fs_i_storage.h" +#include "core/file_sys/fssystem/fssystem_bucket_tree.h" +#include "core/file_sys/fssystem/fssystem_compression_common.h" +#include "core/file_sys/fssystem/fssystem_pooled_buffer.h" +#include "core/file_sys/vfs.h" + +namespace FileSys { + +using namespace Common::Literals; + +class CompressedStorage : public IReadOnlyStorage { + YUZU_NON_COPYABLE(CompressedStorage); + YUZU_NON_MOVEABLE(CompressedStorage); + +public: + static constexpr size_t NodeSize = 16_KiB; + + struct Entry { + s64 virt_offset; + s64 phys_offset; + CompressionType compression_type; + s32 phys_size; + + s64 GetPhysicalSize() const { + return this->phys_size; + } + }; + static_assert(std::is_trivial_v<Entry>); + static_assert(sizeof(Entry) == 0x18); + +public: + static constexpr s64 QueryNodeStorageSize(s32 entry_count) { + return BucketTree::QueryNodeStorageSize(NodeSize, sizeof(Entry), entry_count); + } + + static constexpr s64 QueryEntryStorageSize(s32 entry_count) { + return BucketTree::QueryEntryStorageSize(NodeSize, sizeof(Entry), entry_count); + } + +private: + class CompressedStorageCore { + YUZU_NON_COPYABLE(CompressedStorageCore); + YUZU_NON_MOVEABLE(CompressedStorageCore); + + private: + size_t m_block_size_max; + size_t m_continuous_reading_size_max; + BucketTree m_table; + VirtualFile m_data_storage; + GetDecompressorFunction m_get_decompressor_function; + + public: + CompressedStorageCore() : m_table(), m_data_storage() {} + + ~CompressedStorageCore() { + this->Finalize(); + } + + public: + Result Initialize(VirtualFile data_storage, VirtualFile node_storage, + VirtualFile entry_storage, s32 bktr_entry_count, size_t block_size_max, + size_t continuous_reading_size_max, + GetDecompressorFunction get_decompressor) { + // Check pre-conditions. + ASSERT(0 < block_size_max); + ASSERT(block_size_max <= continuous_reading_size_max); + ASSERT(get_decompressor != nullptr); + + // Initialize our entry table. + R_TRY(m_table.Initialize(node_storage, entry_storage, NodeSize, sizeof(Entry), + bktr_entry_count)); + + // Set our other fields. + m_block_size_max = block_size_max; + m_continuous_reading_size_max = continuous_reading_size_max; + m_data_storage = data_storage; + m_get_decompressor_function = get_decompressor; + + R_SUCCEED(); + } + + void Finalize() { + if (this->IsInitialized()) { + m_table.Finalize(); + m_data_storage = VirtualFile(); + } + } + + VirtualFile GetDataStorage() { + return m_data_storage; + } + + Result GetDataStorageSize(s64* out) { + // Check pre-conditions. + ASSERT(out != nullptr); + + // Get size. + *out = m_data_storage->GetSize(); + + R_SUCCEED(); + } + + BucketTree& GetEntryTable() { + return m_table; + } + + Result GetEntryList(Entry* out_entries, s32* out_read_count, s32 max_entry_count, + s64 offset, s64 size) { + // Check pre-conditions. + ASSERT(offset >= 0); + ASSERT(size >= 0); + ASSERT(this->IsInitialized()); + + // Check that we can output the count. + R_UNLESS(out_read_count != nullptr, ResultNullptrArgument); + + // Check that we have anything to read at all. + R_SUCCEED_IF(size == 0); + + // Check that either we have a buffer, or this is to determine how many we need. + if (max_entry_count != 0) { + R_UNLESS(out_entries != nullptr, ResultNullptrArgument); + } + + // Get the table offsets. + BucketTree::Offsets table_offsets; + R_TRY(m_table.GetOffsets(std::addressof(table_offsets))); + + // Validate arguments. + R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange); + + // Find the offset in our tree. + BucketTree::Visitor visitor; + R_TRY(m_table.Find(std::addressof(visitor), offset)); + { + const auto entry_offset = visitor.Get<Entry>()->virt_offset; + R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset), + ResultUnexpectedInCompressedStorageA); + } + + // Get the entries. + const auto end_offset = offset + size; + s32 read_count = 0; + while (visitor.Get<Entry>()->virt_offset < end_offset) { + // If we should be setting the output, do so. + if (max_entry_count != 0) { + // Ensure we only read as many entries as we can. + if (read_count >= max_entry_count) { + break; + } + + // Set the current output entry. + out_entries[read_count] = *visitor.Get<Entry>(); + } + + // Increase the read count. + ++read_count; + + // If we're at the end, we're done. + if (!visitor.CanMoveNext()) { + break; + } + + // Move to the next entry. + R_TRY(visitor.MoveNext()); + } + + // Set the output read count. + *out_read_count = read_count; + R_SUCCEED(); + } + + Result GetSize(s64* out) { + // Check pre-conditions. + ASSERT(out != nullptr); + + // Get our table offsets. + BucketTree::Offsets offsets; + R_TRY(m_table.GetOffsets(std::addressof(offsets))); + + // Set the output. + *out = offsets.end_offset; + R_SUCCEED(); + } + + Result OperatePerEntry(s64 offset, s64 size, auto f) { + // Check pre-conditions. + ASSERT(offset >= 0); + ASSERT(size >= 0); + ASSERT(this->IsInitialized()); + + // Succeed if there's nothing to operate on. + R_SUCCEED_IF(size == 0); + + // Get the table offsets. + BucketTree::Offsets table_offsets; + R_TRY(m_table.GetOffsets(std::addressof(table_offsets))); + + // Validate arguments. + R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange); + + // Find the offset in our tree. + BucketTree::Visitor visitor; + R_TRY(m_table.Find(std::addressof(visitor), offset)); + { + const auto entry_offset = visitor.Get<Entry>()->virt_offset; + R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset), + ResultUnexpectedInCompressedStorageA); + } + + // Prepare to operate in chunks. + auto cur_offset = offset; + const auto end_offset = offset + static_cast<s64>(size); + + while (cur_offset < end_offset) { + // Get the current entry. + const auto cur_entry = *visitor.Get<Entry>(); + + // Get and validate the entry's offset. + const auto cur_entry_offset = cur_entry.virt_offset; + R_UNLESS(cur_entry_offset <= cur_offset, ResultUnexpectedInCompressedStorageA); + + // Get and validate the next entry offset. + s64 next_entry_offset; + if (visitor.CanMoveNext()) { + R_TRY(visitor.MoveNext()); + next_entry_offset = visitor.Get<Entry>()->virt_offset; + R_UNLESS(table_offsets.IsInclude(next_entry_offset), + ResultUnexpectedInCompressedStorageA); + } else { + next_entry_offset = table_offsets.end_offset; + } + R_UNLESS(cur_offset < next_entry_offset, ResultUnexpectedInCompressedStorageA); + + // Get the offset of the entry in the data we read. + const auto data_offset = cur_offset - cur_entry_offset; + const auto data_size = (next_entry_offset - cur_entry_offset); + ASSERT(data_size > 0); + + // Determine how much is left. + const auto remaining_size = end_offset - cur_offset; + const auto cur_size = std::min<s64>(remaining_size, data_size - data_offset); + ASSERT(cur_size <= size); + + // Get the data storage size. + s64 storage_size = m_data_storage->GetSize(); + + // Check that our read remains naively physically in bounds. + R_UNLESS(0 <= cur_entry.phys_offset && cur_entry.phys_offset <= storage_size, + ResultUnexpectedInCompressedStorageC); + + // If we have any compression, verify that we remain physically in bounds. + if (cur_entry.compression_type != CompressionType::None) { + R_UNLESS(cur_entry.phys_offset + cur_entry.GetPhysicalSize() <= storage_size, + ResultUnexpectedInCompressedStorageC); + } + + // Check that block alignment requirements are met. + if (CompressionTypeUtility::IsBlockAlignmentRequired(cur_entry.compression_type)) { + R_UNLESS(Common::IsAligned(cur_entry.phys_offset, CompressionBlockAlignment), + ResultUnexpectedInCompressedStorageA); + } + + // Invoke the operator. + bool is_continuous = true; + R_TRY( + f(std::addressof(is_continuous), cur_entry, data_size, data_offset, cur_size)); + + // If not continuous, we're done. + if (!is_continuous) { + break; + } + + // Advance. + cur_offset += cur_size; + } + + R_SUCCEED(); + } + + public: + using ReadImplFunction = std::function<Result(void*, size_t)>; + using ReadFunction = std::function<Result(size_t, const ReadImplFunction&)>; + + public: + Result Read(s64 offset, s64 size, const ReadFunction& read_func) { + // Check pre-conditions. + ASSERT(offset >= 0); + ASSERT(this->IsInitialized()); + + // Succeed immediately, if we hvae nothing to read. + R_SUCCEED_IF(size == 0); + + // Declare read lambda. + constexpr int EntriesCountMax = 0x80; + struct Entries { + CompressionType compression_type; + u32 gap_from_prev; + u32 physical_size; + u32 virtual_size; + }; + Entries entries[EntriesCountMax]; + s32 entry_count = 0; + Entry prev_entry = { + .virt_offset = -1, + }; + bool will_allocate_pooled_buffer = false; + s64 required_access_physical_offset = 0; + s64 required_access_physical_size = 0; + + auto PerformRequiredRead = [&]() -> Result { + // If there are no entries, we have nothing to do. + R_SUCCEED_IF(entry_count == 0); + + // Get the remaining size in a convenient form. + const size_t total_required_size = + static_cast<size_t>(required_access_physical_size); + + // Perform the read based on whether we need to allocate a buffer. + if (will_allocate_pooled_buffer) { + // Allocate a pooled buffer. + PooledBuffer pooled_buffer; + if (pooled_buffer.GetAllocatableSizeMax() >= total_required_size) { + pooled_buffer.Allocate(total_required_size, m_block_size_max); + } else { + pooled_buffer.AllocateParticularlyLarge( + std::min<size_t>( + total_required_size, + PooledBuffer::GetAllocatableParticularlyLargeSizeMax()), + m_block_size_max); + } + + // Read each of the entries. + for (s32 entry_idx = 0; entry_idx < entry_count; ++entry_idx) { + // Determine the current read size. + bool will_use_pooled_buffer = false; + const size_t cur_read_size = [&]() -> size_t { + if (const size_t target_entry_size = + static_cast<size_t>(entries[entry_idx].physical_size) + + static_cast<size_t>(entries[entry_idx].gap_from_prev); + target_entry_size <= pooled_buffer.GetSize()) { + // We'll be using the pooled buffer. + will_use_pooled_buffer = true; + + // Determine how much we can read. + const size_t max_size = std::min<size_t>( + required_access_physical_size, pooled_buffer.GetSize()); + + size_t read_size = 0; + for (auto n = entry_idx; n < entry_count; ++n) { + const size_t cur_entry_size = + static_cast<size_t>(entries[n].physical_size) + + static_cast<size_t>(entries[n].gap_from_prev); + if (read_size + cur_entry_size > max_size) { + break; + } + + read_size += cur_entry_size; + } + + return read_size; + } else { + // If we don't fit, we must be uncompressed. + ASSERT(entries[entry_idx].compression_type == + CompressionType::None); + + // We can perform the whole of an uncompressed read directly. + return entries[entry_idx].virtual_size; + } + }(); + + // Perform the read based on whether or not we'll use the pooled buffer. + if (will_use_pooled_buffer) { + // Read the compressed data into the pooled buffer. + auto* const buffer = pooled_buffer.GetBuffer(); + m_data_storage->Read(reinterpret_cast<u8*>(buffer), cur_read_size, + required_access_physical_offset); + + // Decompress the data. + size_t buffer_offset; + for (buffer_offset = 0; + entry_idx < entry_count && + ((static_cast<size_t>(entries[entry_idx].physical_size) + + static_cast<size_t>(entries[entry_idx].gap_from_prev)) == 0 || + buffer_offset < cur_read_size); + buffer_offset += entries[entry_idx++].physical_size) { + // Advance by the relevant gap. + buffer_offset += entries[entry_idx].gap_from_prev; + + const auto compression_type = entries[entry_idx].compression_type; + switch (compression_type) { + case CompressionType::None: { + // Check that we can remain within bounds. + ASSERT(buffer_offset + entries[entry_idx].virtual_size <= + cur_read_size); + + // Perform no decompression. + R_TRY(read_func( + entries[entry_idx].virtual_size, + [&](void* dst, size_t dst_size) -> Result { + // Check that the size is valid. + ASSERT(dst_size == entries[entry_idx].virtual_size); + + // We have no compression, so just copy the data + // out. + std::memcpy(dst, buffer + buffer_offset, + entries[entry_idx].virtual_size); + R_SUCCEED(); + })); + + break; + } + case CompressionType::Zeros: { + // Check that we can remain within bounds. + ASSERT(buffer_offset <= cur_read_size); + + // Zero the memory. + R_TRY(read_func( + entries[entry_idx].virtual_size, + [&](void* dst, size_t dst_size) -> Result { + // Check that the size is valid. + ASSERT(dst_size == entries[entry_idx].virtual_size); + + // The data is zeroes, so zero the buffer. + std::memset(dst, 0, entries[entry_idx].virtual_size); + R_SUCCEED(); + })); + + break; + } + default: { + // Check that we can remain within bounds. + ASSERT(buffer_offset + entries[entry_idx].physical_size <= + cur_read_size); + + // Get the decompressor. + const auto decompressor = + this->GetDecompressor(compression_type); + R_UNLESS(decompressor != nullptr, + ResultUnexpectedInCompressedStorageB); + + // Decompress the data. + R_TRY(read_func(entries[entry_idx].virtual_size, + [&](void* dst, size_t dst_size) -> Result { + // Check that the size is valid. + ASSERT(dst_size == + entries[entry_idx].virtual_size); + + // Perform the decompression. + R_RETURN(decompressor( + dst, entries[entry_idx].virtual_size, + buffer + buffer_offset, + entries[entry_idx].physical_size)); + })); + + break; + } + } + } + + // Check that we processed the correct amount of data. + ASSERT(buffer_offset == cur_read_size); + } else { + // Account for the gap from the previous entry. + required_access_physical_offset += entries[entry_idx].gap_from_prev; + required_access_physical_size -= entries[entry_idx].gap_from_prev; + + // We don't need the buffer (as the data is uncompressed), so just + // execute the read. + R_TRY( + read_func(cur_read_size, [&](void* dst, size_t dst_size) -> Result { + // Check that the size is valid. + ASSERT(dst_size == cur_read_size); + + // Perform the read. + m_data_storage->Read(reinterpret_cast<u8*>(dst), cur_read_size, + required_access_physical_offset); + + R_SUCCEED(); + })); + } + + // Advance on. + required_access_physical_offset += cur_read_size; + required_access_physical_size -= cur_read_size; + } + + // Verify that we have nothing remaining to read. + ASSERT(required_access_physical_size == 0); + + R_SUCCEED(); + } else { + // We don't need a buffer, so just execute the read. + R_TRY(read_func(total_required_size, [&](void* dst, size_t dst_size) -> Result { + // Check that the size is valid. + ASSERT(dst_size == total_required_size); + + // Perform the read. + m_data_storage->Read(reinterpret_cast<u8*>(dst), total_required_size, + required_access_physical_offset); + + R_SUCCEED(); + })); + } + + R_SUCCEED(); + }; + + R_TRY(this->OperatePerEntry( + offset, size, + [&](bool* out_continuous, const Entry& entry, s64 virtual_data_size, + s64 data_offset, s64 read_size) -> Result { + // Determine the physical extents. + s64 physical_offset, physical_size; + if (CompressionTypeUtility::IsRandomAccessible(entry.compression_type)) { + physical_offset = entry.phys_offset + data_offset; + physical_size = read_size; + } else { + physical_offset = entry.phys_offset; + physical_size = entry.GetPhysicalSize(); + } + + // If we have a pending data storage operation, perform it if we have to. + const s64 required_access_physical_end = + required_access_physical_offset + required_access_physical_size; + if (required_access_physical_size > 0) { + const bool required_by_gap = + !(required_access_physical_end <= physical_offset && + physical_offset <= Common::AlignUp(required_access_physical_end, + CompressionBlockAlignment)); + const bool required_by_continuous_size = + ((physical_size + physical_offset) - required_access_physical_end) + + required_access_physical_size > + static_cast<s64>(m_continuous_reading_size_max); + const bool required_by_entry_count = entry_count == EntriesCountMax; + if (required_by_gap || required_by_continuous_size || + required_by_entry_count) { + // Check that our planned access is sane. + ASSERT(!will_allocate_pooled_buffer || + required_access_physical_size <= + static_cast<s64>(m_continuous_reading_size_max)); + + // Perform the required read. + const Result rc = PerformRequiredRead(); + if (R_FAILED(rc)) { + R_THROW(rc); + } + + // Reset our requirements. + prev_entry.virt_offset = -1; + required_access_physical_size = 0; + entry_count = 0; + will_allocate_pooled_buffer = false; + } + } + + // Sanity check that we're within bounds on entries. + ASSERT(entry_count < EntriesCountMax); + + // Determine if a buffer allocation is needed. + if (entry.compression_type != CompressionType::None || + (prev_entry.virt_offset >= 0 && + entry.virt_offset - prev_entry.virt_offset != + entry.phys_offset - prev_entry.phys_offset)) { + will_allocate_pooled_buffer = true; + } + + // If we need to access the data storage, update our required access parameters. + if (CompressionTypeUtility::IsDataStorageAccessRequired( + entry.compression_type)) { + // If the data is compressed, ensure the access is sane. + if (entry.compression_type != CompressionType::None) { + R_UNLESS(data_offset == 0, ResultInvalidOffset); + R_UNLESS(virtual_data_size == read_size, ResultInvalidSize); + R_UNLESS(entry.GetPhysicalSize() <= static_cast<s64>(m_block_size_max), + ResultUnexpectedInCompressedStorageD); + } + + // Update the required access parameters. + s64 gap_from_prev; + if (required_access_physical_size > 0) { + gap_from_prev = physical_offset - required_access_physical_end; + } else { + gap_from_prev = 0; + required_access_physical_offset = physical_offset; + } + required_access_physical_size += physical_size + gap_from_prev; + + // Create an entry. to access the data storage. + entries[entry_count++] = { + .compression_type = entry.compression_type, + .gap_from_prev = static_cast<u32>(gap_from_prev), + .physical_size = static_cast<u32>(physical_size), + .virtual_size = static_cast<u32>(read_size), + }; + } else { + // Verify that we're allowed to be operating on the non-data-storage-access + // type. + R_UNLESS(entry.compression_type == CompressionType::Zeros, + ResultUnexpectedInCompressedStorageB); + + // If we have entries, create a fake entry for the zero region. + if (entry_count != 0) { + // We need to have a physical size. + R_UNLESS(entry.GetPhysicalSize() != 0, + ResultUnexpectedInCompressedStorageD); + + // Create a fake entry. + entries[entry_count++] = { + .compression_type = CompressionType::Zeros, + .gap_from_prev = 0, + .physical_size = 0, + .virtual_size = static_cast<u32>(read_size), + }; + } else { + // We have no entries, we we can just perform the read. + const Result rc = + read_func(static_cast<size_t>(read_size), + [&](void* dst, size_t dst_size) -> Result { + // Check the space we should zero is correct. + ASSERT(dst_size == static_cast<size_t>(read_size)); + + // Zero the memory. + std::memset(dst, 0, read_size); + R_SUCCEED(); + }); + if (R_FAILED(rc)) { + R_THROW(rc); + } + } + } + + // Set the previous entry. + prev_entry = entry; + + // We're continuous. + *out_continuous = true; + R_SUCCEED(); + })); + + // If we still have a pending access, perform it. + if (required_access_physical_size != 0) { + R_TRY(PerformRequiredRead()); + } + + R_SUCCEED(); + } + + private: + DecompressorFunction GetDecompressor(CompressionType type) const { + // Check that we can get a decompressor for the type. + if (CompressionTypeUtility::IsUnknownType(type)) { + return nullptr; + } + + // Get the decompressor. + return m_get_decompressor_function(type); + } + + bool IsInitialized() const { + return m_table.IsInitialized(); + } + }; + + class CacheManager { + YUZU_NON_COPYABLE(CacheManager); + YUZU_NON_MOVEABLE(CacheManager); + + private: + struct AccessRange { + s64 virtual_offset; + s64 virtual_size; + u32 physical_size; + bool is_block_alignment_required; + + s64 GetEndVirtualOffset() const { + return this->virtual_offset + this->virtual_size; + } + }; + static_assert(std::is_trivial_v<AccessRange>); + + private: + s64 m_storage_size = 0; + + public: + CacheManager() = default; + + public: + Result Initialize(s64 storage_size, size_t cache_size_0, size_t cache_size_1, + size_t max_cache_entries) { + // Set our fields. + m_storage_size = storage_size; + + R_SUCCEED(); + } + + Result Read(CompressedStorageCore& core, s64 offset, void* buffer, size_t size) { + // If we have nothing to read, succeed. + R_SUCCEED_IF(size == 0); + + // Check that we have a buffer to read into. + R_UNLESS(buffer != nullptr, ResultNullptrArgument); + + // Check that the read is in bounds. + R_UNLESS(offset <= m_storage_size, ResultInvalidOffset); + + // Determine how much we can read. + const size_t read_size = std::min<size_t>(size, m_storage_size - offset); + + // Create head/tail ranges. + AccessRange head_range = {}; + AccessRange tail_range = {}; + bool is_tail_set = false; + + // Operate to determine the head range. + R_TRY(core.OperatePerEntry( + offset, 1, + [&](bool* out_continuous, const Entry& entry, s64 virtual_data_size, + s64 data_offset, s64 data_read_size) -> Result { + // Set the head range. + head_range = { + .virtual_offset = entry.virt_offset, + .virtual_size = virtual_data_size, + .physical_size = static_cast<u32>(entry.phys_size), + .is_block_alignment_required = + CompressionTypeUtility::IsBlockAlignmentRequired( + entry.compression_type), + }; + + // If required, set the tail range. + if (static_cast<s64>(offset + read_size) <= + entry.virt_offset + virtual_data_size) { + tail_range = { + .virtual_offset = entry.virt_offset, + .virtual_size = virtual_data_size, + .physical_size = static_cast<u32>(entry.phys_size), + .is_block_alignment_required = + CompressionTypeUtility::IsBlockAlignmentRequired( + entry.compression_type), + }; + is_tail_set = true; + } + + // We only want to determine the head range, so we're not continuous. + *out_continuous = false; + R_SUCCEED(); + })); + + // If necessary, determine the tail range. + if (!is_tail_set) { + R_TRY(core.OperatePerEntry( + offset + read_size - 1, 1, + [&](bool* out_continuous, const Entry& entry, s64 virtual_data_size, + s64 data_offset, s64 data_read_size) -> Result { + // Set the tail range. + tail_range = { + .virtual_offset = entry.virt_offset, + .virtual_size = virtual_data_size, + .physical_size = static_cast<u32>(entry.phys_size), + .is_block_alignment_required = + CompressionTypeUtility::IsBlockAlignmentRequired( + entry.compression_type), + }; + + // We only want to determine the tail range, so we're not continuous. + *out_continuous = false; + R_SUCCEED(); + })); + } + + // Begin performing the accesses. + s64 cur_offset = offset; + size_t cur_size = read_size; + char* cur_dst = static_cast<char*>(buffer); + + // Determine our alignment. + const bool head_unaligned = head_range.is_block_alignment_required && + (cur_offset != head_range.virtual_offset || + static_cast<s64>(cur_size) < head_range.virtual_size); + const bool tail_unaligned = [&]() -> bool { + if (tail_range.is_block_alignment_required) { + if (static_cast<s64>(cur_size + cur_offset) == + tail_range.GetEndVirtualOffset()) { + return false; + } else if (!head_unaligned) { + return true; + } else { + return head_range.GetEndVirtualOffset() < + static_cast<s64>(cur_size + cur_offset); + } + } else { + return false; + } + }(); + + // Determine start/end offsets. + const s64 start_offset = + head_range.is_block_alignment_required ? head_range.virtual_offset : cur_offset; + const s64 end_offset = tail_range.is_block_alignment_required + ? tail_range.GetEndVirtualOffset() + : cur_offset + cur_size; + + // Perform the read. + bool is_burst_reading = false; + R_TRY(core.Read( + start_offset, end_offset - start_offset, + [&](size_t size_buffer_required, + const CompressedStorageCore::ReadImplFunction& read_impl) -> Result { + // Determine whether we're burst reading. + const AccessRange* unaligned_range = nullptr; + if (!is_burst_reading) { + // Check whether we're using head, tail, or none as unaligned. + if (head_unaligned && head_range.virtual_offset <= cur_offset && + cur_offset < head_range.GetEndVirtualOffset()) { + unaligned_range = std::addressof(head_range); + } else if (tail_unaligned && tail_range.virtual_offset <= cur_offset && + cur_offset < tail_range.GetEndVirtualOffset()) { + unaligned_range = std::addressof(tail_range); + } else { + is_burst_reading = true; + } + } + ASSERT((is_burst_reading ^ (unaligned_range != nullptr))); + + // Perform reading by burst, or not. + if (is_burst_reading) { + // Check that the access is valid for burst reading. + ASSERT(size_buffer_required <= cur_size); + + // Perform the read. + Result rc = read_impl(cur_dst, size_buffer_required); + if (R_FAILED(rc)) { + R_THROW(rc); + } + + // Advance. + cur_dst += size_buffer_required; + cur_offset += size_buffer_required; + cur_size -= size_buffer_required; + + // Determine whether we're going to continue burst reading. + const s64 offset_aligned = + tail_unaligned ? tail_range.virtual_offset : end_offset; + ASSERT(cur_offset <= offset_aligned); + + if (offset_aligned <= cur_offset) { + is_burst_reading = false; + } + } else { + // We're not burst reading, so we have some unaligned range. + ASSERT(unaligned_range != nullptr); + + // Check that the size is correct. + ASSERT(size_buffer_required == + static_cast<size_t>(unaligned_range->virtual_size)); + + // Get a pooled buffer for our read. + PooledBuffer pooled_buffer; + pooled_buffer.Allocate(size_buffer_required, size_buffer_required); + + // Perform read. + Result rc = read_impl(pooled_buffer.GetBuffer(), size_buffer_required); + if (R_FAILED(rc)) { + R_THROW(rc); + } + + // Copy the data we read to the destination. + const size_t skip_size = cur_offset - unaligned_range->virtual_offset; + const size_t copy_size = std::min<size_t>( + cur_size, unaligned_range->GetEndVirtualOffset() - cur_offset); + + std::memcpy(cur_dst, pooled_buffer.GetBuffer() + skip_size, copy_size); + + // Advance. + cur_dst += copy_size; + cur_offset += copy_size; + cur_size -= copy_size; + } + + R_SUCCEED(); + })); + + R_SUCCEED(); + } + }; + +private: + mutable CompressedStorageCore m_core; + mutable CacheManager m_cache_manager; + +public: + CompressedStorage() = default; + virtual ~CompressedStorage() { + this->Finalize(); + } + + Result Initialize(VirtualFile data_storage, VirtualFile node_storage, VirtualFile entry_storage, + s32 bktr_entry_count, size_t block_size_max, + size_t continuous_reading_size_max, GetDecompressorFunction get_decompressor, + size_t cache_size_0, size_t cache_size_1, s32 max_cache_entries) { + // Initialize our core. + R_TRY(m_core.Initialize(data_storage, node_storage, entry_storage, bktr_entry_count, + block_size_max, continuous_reading_size_max, get_decompressor)); + + // Get our core size. + s64 core_size = 0; + R_TRY(m_core.GetSize(std::addressof(core_size))); + + // Initialize our cache manager. + R_TRY(m_cache_manager.Initialize(core_size, cache_size_0, cache_size_1, max_cache_entries)); + + R_SUCCEED(); + } + + void Finalize() { + m_core.Finalize(); + } + + VirtualFile GetDataStorage() { + return m_core.GetDataStorage(); + } + + Result GetDataStorageSize(s64* out) { + R_RETURN(m_core.GetDataStorageSize(out)); + } + + Result GetEntryList(Entry* out_entries, s32* out_read_count, s32 max_entry_count, s64 offset, + s64 size) { + R_RETURN(m_core.GetEntryList(out_entries, out_read_count, max_entry_count, offset, size)); + } + + BucketTree& GetEntryTable() { + return m_core.GetEntryTable(); + } + +public: + virtual size_t GetSize() const override { + s64 ret{}; + m_core.GetSize(&ret); + return ret; + } + + virtual size_t Read(u8* buffer, size_t size, size_t offset) const override { + if (R_SUCCEEDED(m_cache_manager.Read(m_core, offset, buffer, size))) { + return size; + } else { + return 0; + } + } +}; + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_compression_common.h b/src/core/file_sys/fssystem/fssystem_compression_common.h new file mode 100644 index 000000000..266e0a7e5 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_compression_common.h @@ -0,0 +1,43 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "core/hle/result.h" + +namespace FileSys { + +enum class CompressionType : u8 { + None = 0, + Zeros = 1, + Two = 2, + Lz4 = 3, + Unknown = 4, +}; + +using DecompressorFunction = Result (*)(void*, size_t, const void*, size_t); +using GetDecompressorFunction = DecompressorFunction (*)(CompressionType); + +constexpr s64 CompressionBlockAlignment = 0x10; + +namespace CompressionTypeUtility { + +constexpr bool IsBlockAlignmentRequired(CompressionType type) { + return type != CompressionType::None && type != CompressionType::Zeros; +} + +constexpr bool IsDataStorageAccessRequired(CompressionType type) { + return type != CompressionType::Zeros; +} + +constexpr bool IsRandomAccessible(CompressionType type) { + return type == CompressionType::None; +} + +constexpr bool IsUnknownType(CompressionType type) { + return type >= CompressionType::Unknown; +} + +} // namespace CompressionTypeUtility + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_compression_configuration.cpp b/src/core/file_sys/fssystem/fssystem_compression_configuration.cpp new file mode 100644 index 000000000..8734f84ca --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_compression_configuration.cpp @@ -0,0 +1,36 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "common/lz4_compression.h" +#include "core/file_sys/fssystem/fssystem_compression_configuration.h" + +namespace FileSys { + +namespace { + +Result DecompressLz4(void* dst, size_t dst_size, const void* src, size_t src_size) { + auto result = Common::Compression::DecompressLZ4(dst, dst_size, src, src_size); + R_UNLESS(static_cast<size_t>(result) == dst_size, ResultUnexpectedInCompressedStorageC); + R_SUCCEED(); +} + +constexpr DecompressorFunction GetNcaDecompressorFunction(CompressionType type) { + switch (type) { + case CompressionType::Lz4: + return DecompressLz4; + default: + return nullptr; + } +} + +constexpr NcaCompressionConfiguration g_nca_compression_configuration{ + .get_decompressor = GetNcaDecompressorFunction, +}; + +} // namespace + +const NcaCompressionConfiguration* GetNcaCompressionConfiguration() { + return std::addressof(g_nca_compression_configuration); +} + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_compression_configuration.h b/src/core/file_sys/fssystem/fssystem_compression_configuration.h new file mode 100644 index 000000000..b4ec4f203 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_compression_configuration.h @@ -0,0 +1,12 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "core/file_sys/fssystem/fssystem_nca_file_system_driver.h" + +namespace FileSys { + +const NcaCompressionConfiguration* GetNcaCompressionConfiguration(); + +} diff --git a/src/core/file_sys/fssystem/fssystem_crypto_configuration.cpp b/src/core/file_sys/fssystem/fssystem_crypto_configuration.cpp new file mode 100644 index 000000000..7b89d4512 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_crypto_configuration.cpp @@ -0,0 +1,57 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "core/crypto/aes_util.h" +#include "core/crypto/key_manager.h" +#include "core/file_sys/fssystem/fssystem_crypto_configuration.h" + +namespace FileSys { + +namespace { + +void GenerateKey(void* dst_key, size_t dst_key_size, const void* src_key, size_t src_key_size, + s32 key_type) { + if (key_type == static_cast<s32>(KeyType::ZeroKey)) { + std::memset(dst_key, 0, dst_key_size); + return; + } + + if (key_type == static_cast<s32>(KeyType::InvalidKey) || + key_type < static_cast<s32>(KeyType::ZeroKey) || + key_type >= static_cast<s32>(KeyType::NcaExternalKey)) { + std::memset(dst_key, 0xFF, dst_key_size); + return; + } + + const auto& instance = Core::Crypto::KeyManager::Instance(); + + if (key_type == static_cast<s32>(KeyType::NcaHeaderKey1) || + key_type == static_cast<s32>(KeyType::NcaHeaderKey2)) { + const s32 key_index = static_cast<s32>(KeyType::NcaHeaderKey2) == key_type; + const auto key = instance.GetKey(Core::Crypto::S256KeyType::Header); + std::memcpy(dst_key, key.data() + key_index * 0x10, std::min(dst_key_size, key.size() / 2)); + return; + } + + const s32 key_generation = + std::max(key_type / NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount, 1) - 1; + const s32 key_index = key_type % NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount; + + Core::Crypto::AESCipher<Core::Crypto::Key128> cipher( + instance.GetKey(Core::Crypto::S128KeyType::KeyArea, key_generation, key_index), + Core::Crypto::Mode::ECB); + cipher.Transcode(reinterpret_cast<const u8*>(src_key), src_key_size, + reinterpret_cast<u8*>(dst_key), Core::Crypto::Op::Decrypt); +} + +} // namespace + +const NcaCryptoConfiguration& GetCryptoConfiguration() { + static const NcaCryptoConfiguration configuration = { + .generate_key = GenerateKey, + }; + + return configuration; +} + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_crypto_configuration.h b/src/core/file_sys/fssystem/fssystem_crypto_configuration.h new file mode 100644 index 000000000..7fd9c5a8d --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_crypto_configuration.h @@ -0,0 +1,12 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "core/file_sys/fssystem/fssystem_nca_file_system_driver.h" + +namespace FileSys { + +const NcaCryptoConfiguration& GetCryptoConfiguration(); + +} diff --git a/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.cpp b/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.cpp new file mode 100644 index 000000000..b2e031d5f --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.cpp @@ -0,0 +1,132 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h" +#include "core/file_sys/vfs_offset.h" + +namespace FileSys { + +HierarchicalIntegrityVerificationStorage::HierarchicalIntegrityVerificationStorage() + : m_data_size(-1) { + for (size_t i = 0; i < MaxLayers - 1; i++) { + m_verify_storages[i] = std::make_shared<IntegrityVerificationStorage>(); + } +} + +Result HierarchicalIntegrityVerificationStorage::Initialize( + const HierarchicalIntegrityVerificationInformation& info, + HierarchicalStorageInformation storage, int max_data_cache_entries, int max_hash_cache_entries, + s8 buffer_level) { + using AlignedStorage = AlignmentMatchingStoragePooledBuffer<1>; + + // Validate preconditions. + ASSERT(IntegrityMinLayerCount <= info.max_layers && info.max_layers <= IntegrityMaxLayerCount); + + // Set member variables. + m_max_layers = info.max_layers; + + // Initialize the top level verification storage. + m_verify_storages[0]->Initialize(storage[HierarchicalStorageInformation::MasterStorage], + storage[HierarchicalStorageInformation::Layer1Storage], + static_cast<s64>(1) << info.info[0].block_order, HashSize, + false); + + // Ensure we don't leak state if further initialization goes wrong. + ON_RESULT_FAILURE { + m_verify_storages[0]->Finalize(); + m_data_size = -1; + }; + + // Initialize the top level buffer storage. + m_buffer_storages[0] = std::make_shared<AlignedStorage>( + m_verify_storages[0], static_cast<s64>(1) << info.info[0].block_order); + R_UNLESS(m_buffer_storages[0] != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Prepare to initialize the level storages. + s32 level = 0; + + // Ensure we don't leak state if further initialization goes wrong. + ON_RESULT_FAILURE_2 { + m_verify_storages[level + 1]->Finalize(); + for (; level > 0; --level) { + m_buffer_storages[level].reset(); + m_verify_storages[level]->Finalize(); + } + }; + + // Initialize the level storages. + for (; level < m_max_layers - 3; ++level) { + // Initialize the verification storage. + auto buffer_storage = + std::make_shared<OffsetVfsFile>(m_buffer_storages[level], info.info[level].size, 0); + m_verify_storages[level + 1]->Initialize( + std::move(buffer_storage), storage[level + 2], + static_cast<s64>(1) << info.info[level + 1].block_order, + static_cast<s64>(1) << info.info[level].block_order, false); + + // Initialize the buffer storage. + m_buffer_storages[level + 1] = std::make_shared<AlignedStorage>( + m_verify_storages[level + 1], static_cast<s64>(1) << info.info[level + 1].block_order); + R_UNLESS(m_buffer_storages[level + 1] != nullptr, + ResultAllocationMemoryFailedAllocateShared); + } + + // Initialize the final level storage. + { + // Initialize the verification storage. + auto buffer_storage = + std::make_shared<OffsetVfsFile>(m_buffer_storages[level], info.info[level].size, 0); + m_verify_storages[level + 1]->Initialize( + std::move(buffer_storage), storage[level + 2], + static_cast<s64>(1) << info.info[level + 1].block_order, + static_cast<s64>(1) << info.info[level].block_order, true); + + // Initialize the buffer storage. + m_buffer_storages[level + 1] = std::make_shared<AlignedStorage>( + m_verify_storages[level + 1], static_cast<s64>(1) << info.info[level + 1].block_order); + R_UNLESS(m_buffer_storages[level + 1] != nullptr, + ResultAllocationMemoryFailedAllocateShared); + } + + // Set the data size. + m_data_size = info.info[level + 1].size; + + // We succeeded. + R_SUCCEED(); +} + +void HierarchicalIntegrityVerificationStorage::Finalize() { + if (m_data_size >= 0) { + m_data_size = 0; + + for (s32 level = m_max_layers - 2; level >= 0; --level) { + m_buffer_storages[level].reset(); + m_verify_storages[level]->Finalize(); + } + + m_data_size = -1; + } +} + +size_t HierarchicalIntegrityVerificationStorage::Read(u8* buffer, size_t size, + size_t offset) const { + // Validate preconditions. + ASSERT(m_data_size >= 0); + + // Succeed if zero-size. + if (size == 0) { + return size; + } + + // Validate arguments. + ASSERT(buffer != nullptr); + + // Read the data. + return m_buffer_storages[m_max_layers - 2]->Read(buffer, size, offset); +} + +size_t HierarchicalIntegrityVerificationStorage::GetSize() const { + return m_data_size; +} + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h b/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h new file mode 100644 index 000000000..5e0a1d143 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h @@ -0,0 +1,164 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "common/alignment.h" +#include "core/file_sys/fssystem/fs_i_storage.h" +#include "core/file_sys/fssystem/fs_types.h" +#include "core/file_sys/fssystem/fssystem_alignment_matching_storage.h" +#include "core/file_sys/fssystem/fssystem_integrity_verification_storage.h" +#include "core/file_sys/vfs_offset.h" + +namespace FileSys { + +struct HierarchicalIntegrityVerificationLevelInformation { + Int64 offset; + Int64 size; + s32 block_order; + std::array<u8, 4> reserved; +}; +static_assert(std::is_trivial_v<HierarchicalIntegrityVerificationLevelInformation>); +static_assert(sizeof(HierarchicalIntegrityVerificationLevelInformation) == 0x18); +static_assert(alignof(HierarchicalIntegrityVerificationLevelInformation) == 0x4); + +struct HierarchicalIntegrityVerificationInformation { + u32 max_layers; + HierarchicalIntegrityVerificationLevelInformation info[IntegrityMaxLayerCount - 1]; + HashSalt seed; + + s64 GetLayeredHashSize() const { + return this->info[this->max_layers - 2].offset; + } + + s64 GetDataOffset() const { + return this->info[this->max_layers - 2].offset; + } + + s64 GetDataSize() const { + return this->info[this->max_layers - 2].size; + } +}; +static_assert(std::is_trivial_v<HierarchicalIntegrityVerificationInformation>); + +struct HierarchicalIntegrityVerificationMetaInformation { + u32 magic; + u32 version; + u32 master_hash_size; + HierarchicalIntegrityVerificationInformation level_hash_info; +}; +static_assert(std::is_trivial_v<HierarchicalIntegrityVerificationMetaInformation>); + +struct HierarchicalIntegrityVerificationSizeSet { + s64 control_size; + s64 master_hash_size; + s64 layered_hash_sizes[IntegrityMaxLayerCount - 2]; +}; +static_assert(std::is_trivial_v<HierarchicalIntegrityVerificationSizeSet>); + +class HierarchicalIntegrityVerificationStorage : public IReadOnlyStorage { + YUZU_NON_COPYABLE(HierarchicalIntegrityVerificationStorage); + YUZU_NON_MOVEABLE(HierarchicalIntegrityVerificationStorage); + +private: + friend struct HierarchicalIntegrityVerificationMetaInformation; + +protected: + static constexpr s64 HashSize = 256 / 8; + static constexpr size_t MaxLayers = IntegrityMaxLayerCount; + +public: + using GenerateRandomFunction = void (*)(void* dst, size_t size); + + class HierarchicalStorageInformation { + public: + enum { + MasterStorage = 0, + Layer1Storage = 1, + Layer2Storage = 2, + Layer3Storage = 3, + Layer4Storage = 4, + Layer5Storage = 5, + DataStorage = 6, + }; + + private: + VirtualFile m_storages[DataStorage + 1]; + + public: + void SetMasterHashStorage(VirtualFile s) { + m_storages[MasterStorage] = s; + } + void SetLayer1HashStorage(VirtualFile s) { + m_storages[Layer1Storage] = s; + } + void SetLayer2HashStorage(VirtualFile s) { + m_storages[Layer2Storage] = s; + } + void SetLayer3HashStorage(VirtualFile s) { + m_storages[Layer3Storage] = s; + } + void SetLayer4HashStorage(VirtualFile s) { + m_storages[Layer4Storage] = s; + } + void SetLayer5HashStorage(VirtualFile s) { + m_storages[Layer5Storage] = s; + } + void SetDataStorage(VirtualFile s) { + m_storages[DataStorage] = s; + } + + VirtualFile& operator[](s32 index) { + ASSERT(MasterStorage <= index && index <= DataStorage); + return m_storages[index]; + } + }; + +private: + static GenerateRandomFunction s_generate_random; + + static void SetGenerateRandomFunction(GenerateRandomFunction func) { + s_generate_random = func; + } + +private: + std::shared_ptr<IntegrityVerificationStorage> m_verify_storages[MaxLayers - 1]; + std::shared_ptr<AlignmentMatchingStoragePooledBuffer<1>> m_buffer_storages[MaxLayers - 1]; + s64 m_data_size; + s32 m_max_layers; + +public: + HierarchicalIntegrityVerificationStorage(); + virtual ~HierarchicalIntegrityVerificationStorage() override { + this->Finalize(); + } + + Result Initialize(const HierarchicalIntegrityVerificationInformation& info, + HierarchicalStorageInformation storage, int max_data_cache_entries, + int max_hash_cache_entries, s8 buffer_level); + void Finalize(); + + virtual size_t Read(u8* buffer, size_t size, size_t offset) const override; + virtual size_t GetSize() const override; + + bool IsInitialized() const { + return m_data_size >= 0; + } + + s64 GetL1HashVerificationBlockSize() const { + return m_verify_storages[m_max_layers - 2]->GetBlockSize(); + } + + VirtualFile GetL1HashStorage() { + return std::make_shared<OffsetVfsFile>( + m_buffer_storages[m_max_layers - 3], + Common::DivideUp(m_data_size, this->GetL1HashVerificationBlockSize()), 0); + } + +public: + static constexpr s8 GetDefaultDataCacheBufferLevel(u32 max_layers) { + return static_cast<s8>(16 + max_layers - 2); + } +}; + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.cpp b/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.cpp new file mode 100644 index 000000000..357fa7741 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.cpp @@ -0,0 +1,103 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "common/alignment.h" +#include "common/scope_exit.h" +#include "core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h" + +namespace FileSys { + +namespace { + +s32 Log2(s32 value) { + ASSERT(value > 0); + ASSERT(Common::IsPowerOfTwo(value)); + + s32 log = 0; + while ((value >>= 1) > 0) { + ++log; + } + return log; +} + +} // namespace + +Result HierarchicalSha256Storage::Initialize(VirtualFile* base_storages, s32 layer_count, + size_t htbs, void* hash_buf, size_t hash_buf_size) { + // Validate preconditions. + ASSERT(layer_count == LayerCount); + ASSERT(Common::IsPowerOfTwo(htbs)); + ASSERT(hash_buf != nullptr); + + // Set size tracking members. + m_hash_target_block_size = static_cast<s32>(htbs); + m_log_size_ratio = Log2(m_hash_target_block_size / HashSize); + + // Get the base storage size. + m_base_storage_size = base_storages[2]->GetSize(); + { + auto size_guard = SCOPE_GUARD({ m_base_storage_size = 0; }); + R_UNLESS(m_base_storage_size <= static_cast<s64>(HashSize) + << m_log_size_ratio << m_log_size_ratio, + ResultHierarchicalSha256BaseStorageTooLarge); + size_guard.Cancel(); + } + + // Set hash buffer tracking members. + m_base_storage = base_storages[2]; + m_hash_buffer = static_cast<char*>(hash_buf); + m_hash_buffer_size = hash_buf_size; + + // Read the master hash. + std::array<u8, HashSize> master_hash{}; + base_storages[0]->ReadObject(std::addressof(master_hash)); + + // Read and validate the data being hashed. + s64 hash_storage_size = base_storages[1]->GetSize(); + ASSERT(Common::IsAligned(hash_storage_size, HashSize)); + ASSERT(hash_storage_size <= m_hash_target_block_size); + ASSERT(hash_storage_size <= static_cast<s64>(m_hash_buffer_size)); + + base_storages[1]->Read(reinterpret_cast<u8*>(m_hash_buffer), + static_cast<size_t>(hash_storage_size), 0); + + R_SUCCEED(); +} + +size_t HierarchicalSha256Storage::Read(u8* buffer, size_t size, size_t offset) const { + // Succeed if zero-size. + if (size == 0) { + return size; + } + + // Validate that we have a buffer to read into. + ASSERT(buffer != nullptr); + + // Validate preconditions. + ASSERT(Common::IsAligned(offset, m_hash_target_block_size)); + ASSERT(Common::IsAligned(size, m_hash_target_block_size)); + + // Read the data. + const size_t reduced_size = static_cast<size_t>( + std::min<s64>(m_base_storage_size, + Common::AlignUp(offset + size, m_hash_target_block_size)) - + offset); + m_base_storage->Read(buffer, reduced_size, offset); + + // Setup tracking variables. + auto cur_offset = offset; + auto remaining_size = reduced_size; + while (remaining_size > 0) { + const auto cur_size = + static_cast<size_t>(std::min<s64>(m_hash_target_block_size, remaining_size)); + ASSERT(static_cast<size_t>(cur_offset >> m_log_size_ratio) < m_hash_buffer_size); + + // Advance. + cur_offset += cur_size; + remaining_size -= cur_size; + } + + return size; +} + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h b/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h new file mode 100644 index 000000000..717ba9748 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h @@ -0,0 +1,44 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include <mutex> + +#include "core/file_sys/errors.h" +#include "core/file_sys/fssystem/fs_i_storage.h" +#include "core/file_sys/vfs.h" + +namespace FileSys { + +class HierarchicalSha256Storage : public IReadOnlyStorage { + YUZU_NON_COPYABLE(HierarchicalSha256Storage); + YUZU_NON_MOVEABLE(HierarchicalSha256Storage); + +public: + static constexpr s32 LayerCount = 3; + static constexpr size_t HashSize = 256 / 8; + +private: + VirtualFile m_base_storage; + s64 m_base_storage_size; + char* m_hash_buffer; + size_t m_hash_buffer_size; + s32 m_hash_target_block_size; + s32 m_log_size_ratio; + std::mutex m_mutex; + +public: + HierarchicalSha256Storage() : m_mutex() {} + + Result Initialize(VirtualFile* base_storages, s32 layer_count, size_t htbs, void* hash_buf, + size_t hash_buf_size); + + virtual size_t GetSize() const override { + return m_base_storage->GetSize(); + } + + virtual size_t Read(u8* buffer, size_t length, size_t offset) const override; +}; + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_indirect_storage.cpp b/src/core/file_sys/fssystem/fssystem_indirect_storage.cpp new file mode 100644 index 000000000..45aa08d30 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_indirect_storage.cpp @@ -0,0 +1,120 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "core/file_sys/errors.h" +#include "core/file_sys/fssystem/fssystem_indirect_storage.h" + +namespace FileSys { + +Result IndirectStorage::Initialize(VirtualFile table_storage) { + // Read and verify the bucket tree header. + BucketTree::Header header; + table_storage->ReadObject(std::addressof(header)); + R_TRY(header.Verify()); + + // Determine extents. + const auto node_storage_size = QueryNodeStorageSize(header.entry_count); + const auto entry_storage_size = QueryEntryStorageSize(header.entry_count); + const auto node_storage_offset = QueryHeaderStorageSize(); + const auto entry_storage_offset = node_storage_offset + node_storage_size; + + // Initialize. + R_RETURN(this->Initialize( + std::make_shared<OffsetVfsFile>(table_storage, node_storage_size, node_storage_offset), + std::make_shared<OffsetVfsFile>(table_storage, entry_storage_size, entry_storage_offset), + header.entry_count)); +} + +void IndirectStorage::Finalize() { + if (this->IsInitialized()) { + m_table.Finalize(); + for (auto i = 0; i < StorageCount; i++) { + m_data_storage[i] = VirtualFile(); + } + } +} + +Result IndirectStorage::GetEntryList(Entry* out_entries, s32* out_entry_count, s32 entry_count, + s64 offset, s64 size) { + // Validate pre-conditions. + ASSERT(offset >= 0); + ASSERT(size >= 0); + ASSERT(this->IsInitialized()); + + // Clear the out count. + R_UNLESS(out_entry_count != nullptr, ResultNullptrArgument); + *out_entry_count = 0; + + // Succeed if there's no range. + R_SUCCEED_IF(size == 0); + + // If we have an output array, we need it to be non-null. + R_UNLESS(out_entries != nullptr || entry_count == 0, ResultNullptrArgument); + + // Check that our range is valid. + BucketTree::Offsets table_offsets; + R_TRY(m_table.GetOffsets(std::addressof(table_offsets))); + + R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange); + + // Find the offset in our tree. + BucketTree::Visitor visitor; + R_TRY(m_table.Find(std::addressof(visitor), offset)); + { + const auto entry_offset = visitor.Get<Entry>()->GetVirtualOffset(); + R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset), + ResultInvalidIndirectEntryOffset); + } + + // Prepare to loop over entries. + const auto end_offset = offset + static_cast<s64>(size); + s32 count = 0; + + auto cur_entry = *visitor.Get<Entry>(); + while (cur_entry.GetVirtualOffset() < end_offset) { + // Try to write the entry to the out list + if (entry_count != 0) { + if (count >= entry_count) { + break; + } + std::memcpy(out_entries + count, std::addressof(cur_entry), sizeof(Entry)); + } + + count++; + + // Advance. + if (visitor.CanMoveNext()) { + R_TRY(visitor.MoveNext()); + cur_entry = *visitor.Get<Entry>(); + } else { + break; + } + } + + // Write the output count. + *out_entry_count = count; + R_SUCCEED(); +} + +size_t IndirectStorage::Read(u8* buffer, size_t size, size_t offset) const { + // Validate pre-conditions. + ASSERT(offset >= 0); + ASSERT(this->IsInitialized()); + ASSERT(buffer != nullptr); + + // Succeed if there's nothing to read. + if (size == 0) { + return 0; + } + + const_cast<IndirectStorage*>(this)->OperatePerEntry<true, true>( + offset, size, + [=](VirtualFile storage, s64 data_offset, s64 cur_offset, s64 cur_size) -> Result { + storage->Read(reinterpret_cast<u8*>(buffer) + (cur_offset - offset), + static_cast<size_t>(cur_size), data_offset); + R_SUCCEED(); + }); + + return size; +} +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_indirect_storage.h b/src/core/file_sys/fssystem/fssystem_indirect_storage.h new file mode 100644 index 000000000..39293667b --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_indirect_storage.h @@ -0,0 +1,294 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "core/file_sys/errors.h" +#include "core/file_sys/fssystem/fs_i_storage.h" +#include "core/file_sys/fssystem/fssystem_bucket_tree.h" +#include "core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h" +#include "core/file_sys/vfs.h" +#include "core/file_sys/vfs_offset.h" + +namespace FileSys { + +class IndirectStorage : public IReadOnlyStorage { + YUZU_NON_COPYABLE(IndirectStorage); + YUZU_NON_MOVEABLE(IndirectStorage); + +public: + static constexpr s32 StorageCount = 2; + static constexpr size_t NodeSize = 16_KiB; + + struct Entry { + u8 virt_offset[sizeof(s64)]; + u8 phys_offset[sizeof(s64)]; + s32 storage_index; + + void SetVirtualOffset(const s64& ofs) { + std::memcpy(this->virt_offset, std::addressof(ofs), sizeof(s64)); + } + + s64 GetVirtualOffset() const { + s64 offset; + std::memcpy(std::addressof(offset), this->virt_offset, sizeof(s64)); + return offset; + } + + void SetPhysicalOffset(const s64& ofs) { + std::memcpy(this->phys_offset, std::addressof(ofs), sizeof(s64)); + } + + s64 GetPhysicalOffset() const { + s64 offset; + std::memcpy(std::addressof(offset), this->phys_offset, sizeof(s64)); + return offset; + } + }; + static_assert(std::is_trivial_v<Entry>); + static_assert(sizeof(Entry) == 0x14); + + struct EntryData { + s64 virt_offset; + s64 phys_offset; + s32 storage_index; + + void Set(const Entry& entry) { + this->virt_offset = entry.GetVirtualOffset(); + this->phys_offset = entry.GetPhysicalOffset(); + this->storage_index = entry.storage_index; + } + }; + static_assert(std::is_trivial_v<EntryData>); + +private: + struct ContinuousReadingEntry { + static constexpr size_t FragmentSizeMax = 4_KiB; + + IndirectStorage::Entry entry; + + s64 GetVirtualOffset() const { + return this->entry.GetVirtualOffset(); + } + + s64 GetPhysicalOffset() const { + return this->entry.GetPhysicalOffset(); + } + + bool IsFragment() const { + return this->entry.storage_index != 0; + } + }; + static_assert(std::is_trivial_v<ContinuousReadingEntry>); + +public: + static constexpr s64 QueryHeaderStorageSize() { + return BucketTree::QueryHeaderStorageSize(); + } + + static constexpr s64 QueryNodeStorageSize(s32 entry_count) { + return BucketTree::QueryNodeStorageSize(NodeSize, sizeof(Entry), entry_count); + } + + static constexpr s64 QueryEntryStorageSize(s32 entry_count) { + return BucketTree::QueryEntryStorageSize(NodeSize, sizeof(Entry), entry_count); + } + +private: + mutable BucketTree m_table; + std::array<VirtualFile, StorageCount> m_data_storage; + +public: + IndirectStorage() : m_table(), m_data_storage() {} + virtual ~IndirectStorage() { + this->Finalize(); + } + + Result Initialize(VirtualFile table_storage); + void Finalize(); + + bool IsInitialized() const { + return m_table.IsInitialized(); + } + + Result Initialize(VirtualFile node_storage, VirtualFile entry_storage, s32 entry_count) { + R_RETURN( + m_table.Initialize(node_storage, entry_storage, NodeSize, sizeof(Entry), entry_count)); + } + + void SetStorage(s32 idx, VirtualFile storage) { + ASSERT(0 <= idx && idx < StorageCount); + m_data_storage[idx] = storage; + } + + template <typename T> + void SetStorage(s32 idx, T storage, s64 offset, s64 size) { + ASSERT(0 <= idx && idx < StorageCount); + m_data_storage[idx] = std::make_shared<OffsetVfsFile>(storage, size, offset); + } + + Result GetEntryList(Entry* out_entries, s32* out_entry_count, s32 entry_count, s64 offset, + s64 size); + + virtual size_t GetSize() const override { + BucketTree::Offsets offsets; + m_table.GetOffsets(std::addressof(offsets)); + + return offsets.end_offset; + } + + virtual size_t Read(u8* buffer, size_t size, size_t offset) const override; + +protected: + BucketTree& GetEntryTable() { + return m_table; + } + + VirtualFile& GetDataStorage(s32 index) { + ASSERT(0 <= index && index < StorageCount); + return m_data_storage[index]; + } + + template <bool ContinuousCheck, bool RangeCheck, typename F> + Result OperatePerEntry(s64 offset, s64 size, F func); +}; + +template <bool ContinuousCheck, bool RangeCheck, typename F> +Result IndirectStorage::OperatePerEntry(s64 offset, s64 size, F func) { + // Validate preconditions. + ASSERT(offset >= 0); + ASSERT(size >= 0); + ASSERT(this->IsInitialized()); + + // Succeed if there's nothing to operate on. + R_SUCCEED_IF(size == 0); + + // Get the table offsets. + BucketTree::Offsets table_offsets; + R_TRY(m_table.GetOffsets(std::addressof(table_offsets))); + + // Validate arguments. + R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange); + + // Find the offset in our tree. + BucketTree::Visitor visitor; + R_TRY(m_table.Find(std::addressof(visitor), offset)); + { + const auto entry_offset = visitor.Get<Entry>()->GetVirtualOffset(); + R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset), + ResultInvalidIndirectEntryOffset); + } + + // Prepare to operate in chunks. + auto cur_offset = offset; + const auto end_offset = offset + static_cast<s64>(size); + BucketTree::ContinuousReadingInfo cr_info; + + while (cur_offset < end_offset) { + // Get the current entry. + const auto cur_entry = *visitor.Get<Entry>(); + + // Get and validate the entry's offset. + const auto cur_entry_offset = cur_entry.GetVirtualOffset(); + R_UNLESS(cur_entry_offset <= cur_offset, ResultInvalidIndirectEntryOffset); + + // Validate the storage index. + R_UNLESS(0 <= cur_entry.storage_index && cur_entry.storage_index < StorageCount, + ResultInvalidIndirectEntryStorageIndex); + + // If we need to check the continuous info, do so. + if constexpr (ContinuousCheck) { + // Scan, if we need to. + if (cr_info.CheckNeedScan()) { + R_TRY(visitor.ScanContinuousReading<ContinuousReadingEntry>( + std::addressof(cr_info), cur_offset, + static_cast<size_t>(end_offset - cur_offset))); + } + + // Process a base storage entry. + if (cr_info.CanDo()) { + // Ensure that we can process. + R_UNLESS(cur_entry.storage_index == 0, ResultInvalidIndirectEntryStorageIndex); + + // Ensure that we remain within range. + const auto data_offset = cur_offset - cur_entry_offset; + const auto cur_entry_phys_offset = cur_entry.GetPhysicalOffset(); + const auto cur_size = static_cast<s64>(cr_info.GetReadSize()); + + // If we should, verify the range. + if constexpr (RangeCheck) { + // Get the current data storage's size. + s64 cur_data_storage_size = m_data_storage[0]->GetSize(); + + R_UNLESS(0 <= cur_entry_phys_offset && + cur_entry_phys_offset <= cur_data_storage_size, + ResultInvalidIndirectEntryOffset); + R_UNLESS(cur_entry_phys_offset + data_offset + cur_size <= + cur_data_storage_size, + ResultInvalidIndirectStorageSize); + } + + // Operate. + R_TRY(func(m_data_storage[0], cur_entry_phys_offset + data_offset, cur_offset, + cur_size)); + + // Mark as done. + cr_info.Done(); + } + } + + // Get and validate the next entry offset. + s64 next_entry_offset; + if (visitor.CanMoveNext()) { + R_TRY(visitor.MoveNext()); + next_entry_offset = visitor.Get<Entry>()->GetVirtualOffset(); + R_UNLESS(table_offsets.IsInclude(next_entry_offset), ResultInvalidIndirectEntryOffset); + } else { + next_entry_offset = table_offsets.end_offset; + } + R_UNLESS(cur_offset < next_entry_offset, ResultInvalidIndirectEntryOffset); + + // Get the offset of the entry in the data we read. + const auto data_offset = cur_offset - cur_entry_offset; + const auto data_size = (next_entry_offset - cur_entry_offset); + ASSERT(data_size > 0); + + // Determine how much is left. + const auto remaining_size = end_offset - cur_offset; + const auto cur_size = std::min<s64>(remaining_size, data_size - data_offset); + ASSERT(cur_size <= size); + + // Operate, if we need to. + bool needs_operate; + if constexpr (!ContinuousCheck) { + needs_operate = true; + } else { + needs_operate = !cr_info.IsDone() || cur_entry.storage_index != 0; + } + + if (needs_operate) { + const auto cur_entry_phys_offset = cur_entry.GetPhysicalOffset(); + + if constexpr (RangeCheck) { + // Get the current data storage's size. + s64 cur_data_storage_size = m_data_storage[cur_entry.storage_index]->GetSize(); + + // Ensure that we remain within range. + R_UNLESS(0 <= cur_entry_phys_offset && + cur_entry_phys_offset <= cur_data_storage_size, + ResultIndirectStorageCorrupted); + R_UNLESS(cur_entry_phys_offset + data_offset + cur_size <= cur_data_storage_size, + ResultIndirectStorageCorrupted); + } + + R_TRY(func(m_data_storage[cur_entry.storage_index], cur_entry_phys_offset + data_offset, + cur_offset, cur_size)); + } + + cur_offset += cur_size; + } + + R_SUCCEED(); +} + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.cpp b/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.cpp new file mode 100644 index 000000000..2c3da230c --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.cpp @@ -0,0 +1,30 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "core/file_sys/fssystem/fssystem_integrity_romfs_storage.h" + +namespace FileSys { + +Result IntegrityRomFsStorage::Initialize( + HierarchicalIntegrityVerificationInformation level_hash_info, Hash master_hash, + HierarchicalIntegrityVerificationStorage::HierarchicalStorageInformation storage_info, + int max_data_cache_entries, int max_hash_cache_entries, s8 buffer_level) { + // Set master hash. + m_master_hash = master_hash; + m_master_hash_storage = std::make_shared<ArrayVfsFile<sizeof(Hash)>>(m_master_hash.value); + R_UNLESS(m_master_hash_storage != nullptr, + ResultAllocationMemoryFailedInIntegrityRomFsStorageA); + + // Set the master hash storage. + storage_info[0] = m_master_hash_storage; + + // Initialize our integrity storage. + R_RETURN(m_integrity_storage.Initialize(level_hash_info, storage_info, max_data_cache_entries, + max_hash_cache_entries, buffer_level)); +} + +void IntegrityRomFsStorage::Finalize() { + m_integrity_storage.Finalize(); +} + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.h b/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.h new file mode 100644 index 000000000..b80e9a302 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_integrity_romfs_storage.h @@ -0,0 +1,42 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h" +#include "core/file_sys/fssystem/fssystem_nca_header.h" +#include "core/file_sys/vfs_vector.h" + +namespace FileSys { + +constexpr inline size_t IntegrityLayerCountRomFs = 7; +constexpr inline size_t IntegrityHashLayerBlockSize = 16_KiB; + +class IntegrityRomFsStorage : public IReadOnlyStorage { +private: + HierarchicalIntegrityVerificationStorage m_integrity_storage; + Hash m_master_hash; + std::shared_ptr<ArrayVfsFile<sizeof(Hash)>> m_master_hash_storage; + +public: + IntegrityRomFsStorage() {} + virtual ~IntegrityRomFsStorage() override { + this->Finalize(); + } + + Result Initialize( + HierarchicalIntegrityVerificationInformation level_hash_info, Hash master_hash, + HierarchicalIntegrityVerificationStorage::HierarchicalStorageInformation storage_info, + int max_data_cache_entries, int max_hash_cache_entries, s8 buffer_level); + void Finalize(); + + virtual size_t Read(u8* buffer, size_t size, size_t offset) const override { + return m_integrity_storage.Read(buffer, size, offset); + } + + virtual size_t GetSize() const override { + return m_integrity_storage.GetSize(); + } +}; + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.cpp b/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.cpp new file mode 100644 index 000000000..ef36b755e --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.cpp @@ -0,0 +1,95 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "common/alignment.h" +#include "core/file_sys/fssystem/fssystem_integrity_verification_storage.h" + +namespace FileSys { + +constexpr inline u32 ILog2(u32 val) { + ASSERT(val > 0); + return ((sizeof(u32) * 8) - 1 - std::countl_zero<u32>(val)); +} + +void IntegrityVerificationStorage::Initialize(VirtualFile hs, VirtualFile ds, s64 verif_block_size, + s64 upper_layer_verif_block_size, bool is_real_data) { + // Validate preconditions. + ASSERT(verif_block_size >= HashSize); + + // Set storages. + m_hash_storage = hs; + m_data_storage = ds; + + // Set verification block sizes. + m_verification_block_size = verif_block_size; + m_verification_block_order = ILog2(static_cast<u32>(verif_block_size)); + ASSERT(m_verification_block_size == 1ll << m_verification_block_order); + + // Set upper layer block sizes. + upper_layer_verif_block_size = std::max(upper_layer_verif_block_size, HashSize); + m_upper_layer_verification_block_size = upper_layer_verif_block_size; + m_upper_layer_verification_block_order = ILog2(static_cast<u32>(upper_layer_verif_block_size)); + ASSERT(m_upper_layer_verification_block_size == 1ll << m_upper_layer_verification_block_order); + + // Validate sizes. + { + s64 hash_size = m_hash_storage->GetSize(); + s64 data_size = m_data_storage->GetSize(); + ASSERT(((hash_size / HashSize) * m_verification_block_size) >= data_size); + } + + // Set data. + m_is_real_data = is_real_data; +} + +void IntegrityVerificationStorage::Finalize() { + m_hash_storage = VirtualFile(); + m_data_storage = VirtualFile(); +} + +size_t IntegrityVerificationStorage::Read(u8* buffer, size_t size, size_t offset) const { + // Validate preconditions. + ASSERT(Common::IsAligned(offset, static_cast<size_t>(m_verification_block_size))); + ASSERT(Common::IsAligned(size, static_cast<size_t>(m_verification_block_size))); + + // Succeed if zero size. + if (size == 0) { + return size; + } + + // Validate arguments. + ASSERT(buffer != nullptr); + + // Validate the offset. + s64 data_size = m_data_storage->GetSize(); + ASSERT(offset <= static_cast<size_t>(data_size)); + + // Validate the access range. + ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange( + offset, size, Common::AlignUp(data_size, static_cast<size_t>(m_verification_block_size))))); + + // Determine the read extents. + size_t read_size = size; + if (static_cast<s64>(offset + read_size) > data_size) { + // Determine the padding sizes. + s64 padding_offset = data_size - offset; + size_t padding_size = static_cast<size_t>( + m_verification_block_size - (padding_offset & (m_verification_block_size - 1))); + ASSERT(static_cast<s64>(padding_size) < m_verification_block_size); + + // Clear the padding. + std::memset(static_cast<u8*>(buffer) + padding_offset, 0, padding_size); + + // Set the new in-bounds size. + read_size = static_cast<size_t>(data_size - offset); + } + + // Perform the read. + return m_data_storage->Read(buffer, read_size, offset); +} + +size_t IntegrityVerificationStorage::GetSize() const { + return m_data_storage->GetSize(); +} + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.h b/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.h new file mode 100644 index 000000000..08515a268 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_integrity_verification_storage.h @@ -0,0 +1,65 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include <optional> + +#include "core/file_sys/fssystem/fs_i_storage.h" +#include "core/file_sys/fssystem/fs_types.h" + +namespace FileSys { + +class IntegrityVerificationStorage : public IReadOnlyStorage { + YUZU_NON_COPYABLE(IntegrityVerificationStorage); + YUZU_NON_MOVEABLE(IntegrityVerificationStorage); + +public: + static constexpr s64 HashSize = 256 / 8; + + struct BlockHash { + u8 hash[HashSize]; + }; + static_assert(std::is_trivial_v<BlockHash>); + +private: + VirtualFile m_hash_storage; + VirtualFile m_data_storage; + s64 m_verification_block_size; + s64 m_verification_block_order; + s64 m_upper_layer_verification_block_size; + s64 m_upper_layer_verification_block_order; + bool m_is_real_data; + +public: + IntegrityVerificationStorage() + : m_verification_block_size(0), m_verification_block_order(0), + m_upper_layer_verification_block_size(0), m_upper_layer_verification_block_order(0) {} + virtual ~IntegrityVerificationStorage() override { + this->Finalize(); + } + + void Initialize(VirtualFile hs, VirtualFile ds, s64 verif_block_size, + s64 upper_layer_verif_block_size, bool is_real_data); + void Finalize(); + + virtual size_t Read(u8* buffer, size_t size, size_t offset) const override; + virtual size_t GetSize() const override; + + s64 GetBlockSize() const { + return m_verification_block_size; + } + +private: + static void SetValidationBit(BlockHash* hash) { + ASSERT(hash != nullptr); + hash->hash[HashSize - 1] |= 0x80; + } + + static bool IsValidationBit(const BlockHash* hash) { + ASSERT(hash != nullptr); + return (hash->hash[HashSize - 1] & 0x80) != 0; + } +}; + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_memory_resource_buffer_hold_storage.h b/src/core/file_sys/fssystem/fssystem_memory_resource_buffer_hold_storage.h new file mode 100644 index 000000000..7637272d5 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_memory_resource_buffer_hold_storage.h @@ -0,0 +1,58 @@ +#pragma once + +#include "core/file_sys/fssystem/fs_i_storage.h" + +namespace FileSys { + +class MemoryResourceBufferHoldStorage : public IStorage { + YUZU_NON_COPYABLE(MemoryResourceBufferHoldStorage); + YUZU_NON_MOVEABLE(MemoryResourceBufferHoldStorage); + +private: + VirtualFile m_storage; + void* m_buffer; + size_t m_buffer_size; + +public: + MemoryResourceBufferHoldStorage(VirtualFile storage, size_t buffer_size) + : m_storage(std::move(storage)), m_buffer(::operator new(buffer_size)), + m_buffer_size(buffer_size) {} + + virtual ~MemoryResourceBufferHoldStorage() { + // If we have a buffer, deallocate it. + if (m_buffer != nullptr) { + ::operator delete(m_buffer); + } + } + + bool IsValid() const { + return m_buffer != nullptr; + } + void* GetBuffer() const { + return m_buffer; + } + +public: + virtual size_t Read(u8* buffer, size_t size, size_t offset) const override { + // Check pre-conditions. + ASSERT(m_storage != nullptr); + + return m_storage->Read(buffer, size, offset); + } + + virtual size_t GetSize() const override { + // Check pre-conditions. + ASSERT(m_storage != nullptr); + + return m_storage->GetSize(); + } + + virtual size_t Write(const u8* buffer, size_t size, size_t offset) override { + // Check pre-conditions. + ASSERT(m_storage != nullptr); + + return m_storage->Write(buffer, size, offset); + } +}; + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.cpp b/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.cpp new file mode 100644 index 000000000..b1b5fb156 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.cpp @@ -0,0 +1,1345 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h" +#include "core/file_sys/fssystem/fssystem_aes_ctr_storage.h" +#include "core/file_sys/fssystem/fssystem_aes_xts_storage.h" +#include "core/file_sys/fssystem/fssystem_alignment_matching_storage.h" +#include "core/file_sys/fssystem/fssystem_compressed_storage.h" +#include "core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h" +#include "core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h" +#include "core/file_sys/fssystem/fssystem_indirect_storage.h" +#include "core/file_sys/fssystem/fssystem_integrity_romfs_storage.h" +#include "core/file_sys/fssystem/fssystem_memory_resource_buffer_hold_storage.h" +#include "core/file_sys/fssystem/fssystem_nca_file_system_driver.h" +#include "core/file_sys/fssystem/fssystem_sparse_storage.h" +#include "core/file_sys/fssystem/fssystem_switch_storage.h" +#include "core/file_sys/vfs_offset.h" +#include "core/file_sys/vfs_vector.h" + +namespace FileSys { + +namespace { + +constexpr inline s32 IntegrityDataCacheCount = 24; +constexpr inline s32 IntegrityHashCacheCount = 8; + +constexpr inline s32 IntegrityDataCacheCountForMeta = 16; +constexpr inline s32 IntegrityHashCacheCountForMeta = 2; + +class SharedNcaBodyStorage : public IReadOnlyStorage { + YUZU_NON_COPYABLE(SharedNcaBodyStorage); + YUZU_NON_MOVEABLE(SharedNcaBodyStorage); + +private: + VirtualFile m_storage; + std::shared_ptr<NcaReader> m_nca_reader; + +public: + SharedNcaBodyStorage(VirtualFile s, std::shared_ptr<NcaReader> r) + : m_storage(std::move(s)), m_nca_reader(std::move(r)) {} + + virtual size_t Read(u8* buffer, size_t size, size_t offset) const override { + // Validate pre-conditions. + ASSERT(m_storage != nullptr); + + // Read from the base storage. + return m_storage->Read(buffer, size, offset); + } + + virtual size_t GetSize() const override { + // Validate pre-conditions. + ASSERT(m_storage != nullptr); + + return m_storage->GetSize(); + } +}; + +inline s64 GetFsOffset(const NcaReader& reader, s32 fs_index) { + return static_cast<s64>(reader.GetFsOffset(fs_index)); +} + +inline s64 GetFsEndOffset(const NcaReader& reader, s32 fs_index) { + return static_cast<s64>(reader.GetFsEndOffset(fs_index)); +} + +using Sha256DataRegion = NcaFsHeader::Region; +using IntegrityLevelInfo = NcaFsHeader::HashData::IntegrityMetaInfo::LevelHashInfo; +using IntegrityDataInfo = IntegrityLevelInfo::HierarchicalIntegrityVerificationLevelInformation; + +} // namespace + +Result NcaFileSystemDriver::OpenStorageWithContext(VirtualFile* out, + NcaFsHeaderReader* out_header_reader, + s32 fs_index, StorageContext* ctx) { + // Open storage. + R_RETURN(this->OpenStorageImpl(out, out_header_reader, fs_index, ctx)); +} + +Result NcaFileSystemDriver::OpenStorageImpl(VirtualFile* out, NcaFsHeaderReader* out_header_reader, + s32 fs_index, StorageContext* ctx) { + // Validate preconditions. + ASSERT(out != nullptr); + ASSERT(out_header_reader != nullptr); + ASSERT(0 <= fs_index && fs_index < NcaHeader::FsCountMax); + + // Validate the fs index. + R_UNLESS(m_reader->HasFsInfo(fs_index), ResultPartitionNotFound); + + // Initialize our header reader for the fs index. + R_TRY(out_header_reader->Initialize(*m_reader, fs_index)); + + // Declare the storage we're opening. + VirtualFile storage; + + // Process sparse layer. + s64 fs_data_offset = 0; + if (out_header_reader->ExistsSparseLayer()) { + // Get the sparse info. + const auto& sparse_info = out_header_reader->GetSparseInfo(); + + // Create based on whether we have a meta hash layer. + if (out_header_reader->ExistsSparseMetaHashLayer()) { + // Create the sparse storage with verification. + R_TRY(this->CreateSparseStorageWithVerification( + std::addressof(storage), std::addressof(fs_data_offset), + ctx != nullptr ? std::addressof(ctx->current_sparse_storage) : nullptr, + ctx != nullptr ? std::addressof(ctx->sparse_storage_meta_storage) : nullptr, + ctx != nullptr ? std::addressof(ctx->sparse_layer_info_storage) : nullptr, fs_index, + out_header_reader->GetAesCtrUpperIv(), sparse_info, + out_header_reader->GetSparseMetaDataHashDataInfo(), + out_header_reader->GetSparseMetaHashType())); + } else { + // Create the sparse storage. + R_TRY(this->CreateSparseStorage( + std::addressof(storage), std::addressof(fs_data_offset), + ctx != nullptr ? std::addressof(ctx->current_sparse_storage) : nullptr, + ctx != nullptr ? std::addressof(ctx->sparse_storage_meta_storage) : nullptr, + fs_index, out_header_reader->GetAesCtrUpperIv(), sparse_info)); + } + } else { + // Get the data offsets. + fs_data_offset = GetFsOffset(*m_reader, fs_index); + const auto fs_end_offset = GetFsEndOffset(*m_reader, fs_index); + + // Validate that we're within range. + const auto data_size = fs_end_offset - fs_data_offset; + R_UNLESS(data_size > 0, ResultInvalidNcaHeader); + + // Create the body substorage. + R_TRY(this->CreateBodySubStorage(std::addressof(storage), fs_data_offset, data_size)); + + // Potentially save the body substorage to our context. + if (ctx != nullptr) { + ctx->body_substorage = storage; + } + } + + // Process patch layer. + const auto& patch_info = out_header_reader->GetPatchInfo(); + VirtualFile patch_meta_aes_ctr_ex_meta_storage; + VirtualFile patch_meta_indirect_meta_storage; + if (out_header_reader->ExistsPatchMetaHashLayer()) { + // Check the meta hash type. + R_UNLESS(out_header_reader->GetPatchMetaHashType() == + NcaFsHeader::MetaDataHashType::HierarchicalIntegrity, + ResultRomNcaInvalidPatchMetaDataHashType); + + // Create the patch meta storage. + R_TRY(this->CreatePatchMetaStorage( + std::addressof(patch_meta_aes_ctr_ex_meta_storage), + std::addressof(patch_meta_indirect_meta_storage), + ctx != nullptr ? std::addressof(ctx->patch_layer_info_storage) : nullptr, storage, + fs_data_offset, out_header_reader->GetAesCtrUpperIv(), patch_info, + out_header_reader->GetPatchMetaDataHashDataInfo())); + } + + if (patch_info.HasAesCtrExTable()) { + // Check the encryption type. + ASSERT(out_header_reader->GetEncryptionType() == NcaFsHeader::EncryptionType::None || + out_header_reader->GetEncryptionType() == NcaFsHeader::EncryptionType::AesCtrEx || + out_header_reader->GetEncryptionType() == + NcaFsHeader::EncryptionType::AesCtrExSkipLayerHash); + + // Create the ex meta storage. + VirtualFile aes_ctr_ex_storage_meta_storage = patch_meta_aes_ctr_ex_meta_storage; + if (aes_ctr_ex_storage_meta_storage == nullptr) { + // If we don't have a meta storage, we must not have a patch meta hash layer. + ASSERT(!out_header_reader->ExistsPatchMetaHashLayer()); + + R_TRY(this->CreateAesCtrExStorageMetaStorage( + std::addressof(aes_ctr_ex_storage_meta_storage), storage, fs_data_offset, + out_header_reader->GetEncryptionType(), out_header_reader->GetAesCtrUpperIv(), + patch_info)); + } + + // Create the ex storage. + VirtualFile aes_ctr_ex_storage; + R_TRY(this->CreateAesCtrExStorage( + std::addressof(aes_ctr_ex_storage), + ctx != nullptr ? std::addressof(ctx->aes_ctr_ex_storage) : nullptr, std::move(storage), + aes_ctr_ex_storage_meta_storage, fs_data_offset, out_header_reader->GetAesCtrUpperIv(), + patch_info)); + + // Set the base storage as the ex storage. + storage = std::move(aes_ctr_ex_storage); + + // Potentially save storages to our context. + if (ctx != nullptr) { + ctx->aes_ctr_ex_storage_meta_storage = aes_ctr_ex_storage_meta_storage; + ctx->aes_ctr_ex_storage_data_storage = storage; + ctx->fs_data_storage = storage; + } + } else { + // Create the appropriate storage for the encryption type. + switch (out_header_reader->GetEncryptionType()) { + case NcaFsHeader::EncryptionType::None: + // If there's no encryption, use the base storage we made previously. + break; + case NcaFsHeader::EncryptionType::AesXts: + R_TRY(this->CreateAesXtsStorage(std::addressof(storage), std::move(storage), + fs_data_offset)); + break; + case NcaFsHeader::EncryptionType::AesCtr: + R_TRY(this->CreateAesCtrStorage(std::addressof(storage), std::move(storage), + fs_data_offset, out_header_reader->GetAesCtrUpperIv(), + AlignmentStorageRequirement::None)); + break; + case NcaFsHeader::EncryptionType::AesCtrSkipLayerHash: { + // Create the aes ctr storage. + VirtualFile aes_ctr_storage; + R_TRY(this->CreateAesCtrStorage(std::addressof(aes_ctr_storage), storage, + fs_data_offset, out_header_reader->GetAesCtrUpperIv(), + AlignmentStorageRequirement::None)); + + // Create region switch storage. + R_TRY(this->CreateRegionSwitchStorage(std::addressof(storage), out_header_reader, + std::move(storage), std::move(aes_ctr_storage))); + } break; + default: + R_THROW(ResultInvalidNcaFsHeaderEncryptionType); + } + + // Potentially save storages to our context. + if (ctx != nullptr) { + ctx->fs_data_storage = storage; + } + } + + // Process indirect layer. + if (patch_info.HasIndirectTable()) { + // Create the indirect meta storage + VirtualFile indirect_storage_meta_storage = patch_meta_indirect_meta_storage; + if (indirect_storage_meta_storage == nullptr) { + // If we don't have a meta storage, we must not have a patch meta hash layer. + ASSERT(!out_header_reader->ExistsPatchMetaHashLayer()); + + R_TRY(this->CreateIndirectStorageMetaStorage( + std::addressof(indirect_storage_meta_storage), storage, patch_info)); + } + + // Potentially save the indirect meta storage to our context. + if (ctx != nullptr) { + ctx->indirect_storage_meta_storage = indirect_storage_meta_storage; + } + + // Get the original indirectable storage. + VirtualFile original_indirectable_storage; + if (m_original_reader != nullptr && m_original_reader->HasFsInfo(fs_index)) { + // Create a driver for the original. + NcaFileSystemDriver original_driver(m_original_reader); + + // Create a header reader for the original. + NcaFsHeaderReader original_header_reader; + R_TRY(original_header_reader.Initialize(*m_original_reader, fs_index)); + + // Open original indirectable storage. + R_TRY(original_driver.OpenIndirectableStorageAsOriginal( + std::addressof(original_indirectable_storage), + std::addressof(original_header_reader), ctx)); + } else if (ctx != nullptr && ctx->external_original_storage != nullptr) { + // Use the external original storage. + original_indirectable_storage = ctx->external_original_storage; + } else { + // Allocate a dummy memory storage as original storage. + original_indirectable_storage = std::make_shared<VectorVfsFile>(); + R_UNLESS(original_indirectable_storage != nullptr, + ResultAllocationMemoryFailedAllocateShared); + } + + // Create the indirect storage. + VirtualFile indirect_storage; + R_TRY(this->CreateIndirectStorage( + std::addressof(indirect_storage), + ctx != nullptr ? std::addressof(ctx->indirect_storage) : nullptr, std::move(storage), + std::move(original_indirectable_storage), std::move(indirect_storage_meta_storage), + patch_info)); + + // Set storage as the indirect storage. + storage = std::move(indirect_storage); + } + + // Check if we're sparse or requested to skip the integrity layer. + if (out_header_reader->ExistsSparseLayer() || (ctx != nullptr && ctx->open_raw_storage)) { + *out = std::move(storage); + R_SUCCEED(); + } + + // Create the non-raw storage. + R_RETURN(this->CreateStorageByRawStorage(out, out_header_reader, std::move(storage), ctx)); +} + +Result NcaFileSystemDriver::CreateStorageByRawStorage(VirtualFile* out, + const NcaFsHeaderReader* header_reader, + VirtualFile raw_storage, + StorageContext* ctx) { + // Initialize storage as raw storage. + VirtualFile storage = std::move(raw_storage); + + // Process hash/integrity layer. + switch (header_reader->GetHashType()) { + case NcaFsHeader::HashType::HierarchicalSha256Hash: + R_TRY(this->CreateSha256Storage(std::addressof(storage), std::move(storage), + header_reader->GetHashData().hierarchical_sha256_data)); + break; + case NcaFsHeader::HashType::HierarchicalIntegrityHash: + R_TRY(this->CreateIntegrityVerificationStorage( + std::addressof(storage), std::move(storage), + header_reader->GetHashData().integrity_meta_info)); + break; + default: + R_THROW(ResultInvalidNcaFsHeaderHashType); + } + + // Process compression layer. + if (header_reader->ExistsCompressionLayer()) { + R_TRY(this->CreateCompressedStorage( + std::addressof(storage), + ctx != nullptr ? std::addressof(ctx->compressed_storage) : nullptr, + ctx != nullptr ? std::addressof(ctx->compressed_storage_meta_storage) : nullptr, + std::move(storage), header_reader->GetCompressionInfo())); + } + + // Set output storage. + *out = std::move(storage); + R_SUCCEED(); +} + +Result NcaFileSystemDriver::OpenIndirectableStorageAsOriginal( + VirtualFile* out, const NcaFsHeaderReader* header_reader, StorageContext* ctx) { + // Get the fs index. + const auto fs_index = header_reader->GetFsIndex(); + + // Declare the storage we're opening. + VirtualFile storage; + + // Process sparse layer. + s64 fs_data_offset = 0; + if (header_reader->ExistsSparseLayer()) { + // Get the sparse info. + const auto& sparse_info = header_reader->GetSparseInfo(); + + // Create based on whether we have a meta hash layer. + if (header_reader->ExistsSparseMetaHashLayer()) { + // Create the sparse storage with verification. + R_TRY(this->CreateSparseStorageWithVerification( + std::addressof(storage), std::addressof(fs_data_offset), + ctx != nullptr ? std::addressof(ctx->original_sparse_storage) : nullptr, + ctx != nullptr ? std::addressof(ctx->sparse_storage_meta_storage) : nullptr, + ctx != nullptr ? std::addressof(ctx->sparse_layer_info_storage) : nullptr, fs_index, + header_reader->GetAesCtrUpperIv(), sparse_info, + header_reader->GetSparseMetaDataHashDataInfo(), + header_reader->GetSparseMetaHashType())); + } else { + // Create the sparse storage. + R_TRY(this->CreateSparseStorage( + std::addressof(storage), std::addressof(fs_data_offset), + ctx != nullptr ? std::addressof(ctx->original_sparse_storage) : nullptr, + ctx != nullptr ? std::addressof(ctx->sparse_storage_meta_storage) : nullptr, + fs_index, header_reader->GetAesCtrUpperIv(), sparse_info)); + } + } else { + // Get the data offsets. + fs_data_offset = GetFsOffset(*m_reader, fs_index); + const auto fs_end_offset = GetFsEndOffset(*m_reader, fs_index); + + // Validate that we're within range. + const auto data_size = fs_end_offset - fs_data_offset; + R_UNLESS(data_size > 0, ResultInvalidNcaHeader); + + // Create the body substorage. + R_TRY(this->CreateBodySubStorage(std::addressof(storage), fs_data_offset, data_size)); + } + + // Create the appropriate storage for the encryption type. + switch (header_reader->GetEncryptionType()) { + case NcaFsHeader::EncryptionType::None: + // If there's no encryption, use the base storage we made previously. + break; + case NcaFsHeader::EncryptionType::AesXts: + R_TRY( + this->CreateAesXtsStorage(std::addressof(storage), std::move(storage), fs_data_offset)); + break; + case NcaFsHeader::EncryptionType::AesCtr: + R_TRY(this->CreateAesCtrStorage(std::addressof(storage), std::move(storage), fs_data_offset, + header_reader->GetAesCtrUpperIv(), + AlignmentStorageRequirement::CacheBlockSize)); + break; + default: + R_THROW(ResultInvalidNcaFsHeaderEncryptionType); + } + + // Set output storage. + *out = std::move(storage); + R_SUCCEED(); +} + +Result NcaFileSystemDriver::CreateBodySubStorage(VirtualFile* out, s64 offset, s64 size) { + // Create the body storage. + auto body_storage = + std::make_shared<SharedNcaBodyStorage>(m_reader->GetSharedBodyStorage(), m_reader); + R_UNLESS(body_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Get the body storage size. + s64 body_size = body_storage->GetSize(); + + // Check that we're within range. + R_UNLESS(offset + size <= body_size, ResultNcaBaseStorageOutOfRangeB); + + // Create substorage. + auto body_substorage = std::make_shared<OffsetVfsFile>(std::move(body_storage), size, offset); + R_UNLESS(body_substorage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Set the output storage. + *out = std::move(body_substorage); + R_SUCCEED(); +} + +Result NcaFileSystemDriver::CreateAesCtrStorage( + VirtualFile* out, VirtualFile base_storage, s64 offset, const NcaAesCtrUpperIv& upper_iv, + AlignmentStorageRequirement alignment_storage_requirement) { + // Check pre-conditions. + ASSERT(out != nullptr); + ASSERT(base_storage != nullptr); + + // Create the iv. + std::array<u8, AesCtrStorage::IvSize> iv{}; + AesCtrStorage::MakeIv(iv.data(), sizeof(iv), upper_iv.value, offset); + + // Create the ctr storage. + VirtualFile aes_ctr_storage; + if (m_reader->HasExternalDecryptionKey()) { + aes_ctr_storage = std::make_shared<AesCtrStorage>( + std::move(base_storage), m_reader->GetExternalDecryptionKey(), AesCtrStorage::KeySize, + iv.data(), AesCtrStorage::IvSize); + R_UNLESS(aes_ctr_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + } else { + // Create software decryption storage. + auto sw_storage = std::make_shared<AesCtrStorage>( + base_storage, m_reader->GetDecryptionKey(NcaHeader::DecryptionKey_AesCtr), + AesCtrStorage::KeySize, iv.data(), AesCtrStorage::IvSize); + R_UNLESS(sw_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + aes_ctr_storage = std::move(sw_storage); + } + + // Create alignment matching storage. + auto aligned_storage = std::make_shared<AlignmentMatchingStorage<NcaHeader::CtrBlockSize, 1>>( + std::move(aes_ctr_storage)); + R_UNLESS(aligned_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Set the out storage. + *out = std::move(aligned_storage); + R_SUCCEED(); +} + +Result NcaFileSystemDriver::CreateAesXtsStorage(VirtualFile* out, VirtualFile base_storage, + s64 offset) { + // Check pre-conditions. + ASSERT(out != nullptr); + ASSERT(base_storage != nullptr); + + // Create the iv. + std::array<u8, AesXtsStorage::IvSize> iv{}; + AesXtsStorage::MakeAesXtsIv(iv.data(), sizeof(iv), offset, NcaHeader::XtsBlockSize); + + // Make the aes xts storage. + const auto* const key1 = m_reader->GetDecryptionKey(NcaHeader::DecryptionKey_AesXts1); + const auto* const key2 = m_reader->GetDecryptionKey(NcaHeader::DecryptionKey_AesXts2); + auto xts_storage = + std::make_shared<AesXtsStorage>(std::move(base_storage), key1, key2, AesXtsStorage::KeySize, + iv.data(), AesXtsStorage::IvSize, NcaHeader::XtsBlockSize); + R_UNLESS(xts_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Create alignment matching storage. + auto aligned_storage = std::make_shared<AlignmentMatchingStorage<NcaHeader::XtsBlockSize, 1>>( + std::move(xts_storage)); + R_UNLESS(aligned_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Set the out storage. + *out = std::move(xts_storage); + R_SUCCEED(); +} + +Result NcaFileSystemDriver::CreateSparseStorageMetaStorage(VirtualFile* out, + VirtualFile base_storage, s64 offset, + const NcaAesCtrUpperIv& upper_iv, + const NcaSparseInfo& sparse_info) { + // Validate preconditions. + ASSERT(out != nullptr); + ASSERT(base_storage != nullptr); + + // Get the base storage size. + s64 base_size = base_storage->GetSize(); + + // Get the meta extents. + const auto meta_offset = sparse_info.bucket.offset; + const auto meta_size = sparse_info.bucket.size; + R_UNLESS(meta_offset + meta_size - offset <= base_size, ResultNcaBaseStorageOutOfRangeB); + + // Create the encrypted storage. + auto enc_storage = + std::make_shared<OffsetVfsFile>(std::move(base_storage), meta_size, meta_offset); + R_UNLESS(enc_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Create the decrypted storage. + VirtualFile decrypted_storage; + R_TRY(this->CreateAesCtrStorage(std::addressof(decrypted_storage), std::move(enc_storage), + offset + meta_offset, sparse_info.MakeAesCtrUpperIv(upper_iv), + AlignmentStorageRequirement::None)); + + // Create meta storage. + auto meta_storage = std::make_shared<OffsetVfsFile>(decrypted_storage, meta_size, 0); + R_UNLESS(meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Set the output. + *out = std::move(meta_storage); + R_SUCCEED(); +} + +Result NcaFileSystemDriver::CreateSparseStorageCore(std::shared_ptr<SparseStorage>* out, + VirtualFile base_storage, s64 base_size, + VirtualFile meta_storage, + const NcaSparseInfo& sparse_info, + bool external_info) { + // Validate preconditions. + ASSERT(out != nullptr); + ASSERT(base_storage != nullptr); + ASSERT(meta_storage != nullptr); + + // Read and verify the bucket tree header. + BucketTree::Header header; + std::memcpy(std::addressof(header), sparse_info.bucket.header.data(), sizeof(header)); + R_TRY(header.Verify()); + + // Determine storage extents. + const auto node_offset = 0; + const auto node_size = SparseStorage::QueryNodeStorageSize(header.entry_count); + const auto entry_offset = node_offset + node_size; + const auto entry_size = SparseStorage::QueryEntryStorageSize(header.entry_count); + + // Create the sparse storage. + auto sparse_storage = std::make_shared<SparseStorage>(); + R_UNLESS(sparse_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Sanity check that we can be doing this. + ASSERT(header.entry_count != 0); + + // Initialize the sparse storage. + R_TRY(sparse_storage->Initialize( + std::make_shared<OffsetVfsFile>(meta_storage, node_size, node_offset), + std::make_shared<OffsetVfsFile>(meta_storage, entry_size, entry_offset), + header.entry_count)); + + // If not external, set the data storage. + if (!external_info) { + sparse_storage->SetDataStorage( + std::make_shared<OffsetVfsFile>(std::move(base_storage), base_size, 0)); + } + + // Set the output. + *out = std::move(sparse_storage); + R_SUCCEED(); +} + +Result NcaFileSystemDriver::CreateSparseStorage(VirtualFile* out, s64* out_fs_data_offset, + std::shared_ptr<SparseStorage>* out_sparse_storage, + VirtualFile* out_meta_storage, s32 index, + const NcaAesCtrUpperIv& upper_iv, + const NcaSparseInfo& sparse_info) { + // Validate preconditions. + ASSERT(out != nullptr); + ASSERT(out_fs_data_offset != nullptr); + + // Check the sparse info generation. + R_UNLESS(sparse_info.generation != 0, ResultInvalidNcaHeader); + + // Read and verify the bucket tree header. + BucketTree::Header header; + std::memcpy(std::addressof(header), sparse_info.bucket.header.data(), sizeof(header)); + R_TRY(header.Verify()); + + // Determine the storage extents. + const auto fs_offset = GetFsOffset(*m_reader, index); + const auto fs_end_offset = GetFsEndOffset(*m_reader, index); + const auto fs_size = fs_end_offset - fs_offset; + + // Create the sparse storage. + std::shared_ptr<SparseStorage> sparse_storage; + if (header.entry_count != 0) { + // Create the body substorage. + VirtualFile body_substorage; + R_TRY(this->CreateBodySubStorage(std::addressof(body_substorage), + sparse_info.physical_offset, + sparse_info.GetPhysicalSize())); + + // Create the meta storage. + VirtualFile meta_storage; + R_TRY(this->CreateSparseStorageMetaStorage(std::addressof(meta_storage), body_substorage, + sparse_info.physical_offset, upper_iv, + sparse_info)); + + // Potentially set the output meta storage. + if (out_meta_storage != nullptr) { + *out_meta_storage = meta_storage; + } + + // Create the sparse storage. + R_TRY(this->CreateSparseStorageCore(std::addressof(sparse_storage), body_substorage, + sparse_info.GetPhysicalSize(), std::move(meta_storage), + sparse_info, false)); + } else { + // If there are no entries, there's nothing to actually do. + sparse_storage = std::make_shared<SparseStorage>(); + R_UNLESS(sparse_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + sparse_storage->Initialize(fs_size); + } + + // Potentially set the output sparse storage. + if (out_sparse_storage != nullptr) { + *out_sparse_storage = sparse_storage; + } + + // Set the output fs data offset. + *out_fs_data_offset = fs_offset; + + // Set the output storage. + *out = std::move(sparse_storage); + R_SUCCEED(); +} + +Result NcaFileSystemDriver::CreateSparseStorageMetaStorageWithVerification( + VirtualFile* out, VirtualFile* out_layer_info_storage, VirtualFile base_storage, s64 offset, + const NcaAesCtrUpperIv& upper_iv, const NcaSparseInfo& sparse_info, + const NcaMetaDataHashDataInfo& meta_data_hash_data_info) { + // Validate preconditions. + ASSERT(out != nullptr); + ASSERT(base_storage != nullptr); + + // Get the base storage size. + s64 base_size = base_storage->GetSize(); + + // Get the meta extents. + const auto meta_offset = sparse_info.bucket.offset; + const auto meta_size = sparse_info.bucket.size; + R_UNLESS(meta_offset + meta_size - offset <= base_size, ResultNcaBaseStorageOutOfRangeB); + + // Get the meta data hash data extents. + const s64 meta_data_hash_data_offset = meta_data_hash_data_info.offset; + const s64 meta_data_hash_data_size = + Common::AlignUp<s64>(meta_data_hash_data_info.size, NcaHeader::CtrBlockSize); + R_UNLESS(meta_data_hash_data_offset + meta_data_hash_data_size <= base_size, + ResultNcaBaseStorageOutOfRangeB); + + // Check that the meta is before the hash data. + R_UNLESS(meta_offset + meta_size <= meta_data_hash_data_offset, + ResultRomNcaInvalidSparseMetaDataHashDataOffset); + + // Check that offsets are appropriately aligned. + R_UNLESS(Common::IsAligned<s64>(meta_data_hash_data_offset, NcaHeader::CtrBlockSize), + ResultRomNcaInvalidSparseMetaDataHashDataOffset); + R_UNLESS(Common::IsAligned<s64>(meta_offset, NcaHeader::CtrBlockSize), + ResultInvalidNcaFsHeader); + + // Create the meta storage. + auto enc_storage = std::make_shared<OffsetVfsFile>( + std::move(base_storage), + meta_data_hash_data_offset + meta_data_hash_data_size - meta_offset, meta_offset); + R_UNLESS(enc_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Create the decrypted storage. + VirtualFile decrypted_storage; + R_TRY(this->CreateAesCtrStorage(std::addressof(decrypted_storage), std::move(enc_storage), + offset + meta_offset, sparse_info.MakeAesCtrUpperIv(upper_iv), + AlignmentStorageRequirement::None)); + + // Create the verification storage. + VirtualFile integrity_storage; + Result rc = this->CreateIntegrityVerificationStorageForMeta( + std::addressof(integrity_storage), out_layer_info_storage, std::move(decrypted_storage), + meta_offset, meta_data_hash_data_info); + if (rc == ResultInvalidNcaMetaDataHashDataSize) { + R_THROW(ResultRomNcaInvalidSparseMetaDataHashDataSize); + } + if (rc == ResultInvalidNcaMetaDataHashDataHash) { + R_THROW(ResultRomNcaInvalidSparseMetaDataHashDataHash); + } + R_TRY(rc); + + // Create the meta storage. + auto meta_storage = std::make_shared<OffsetVfsFile>(std::move(integrity_storage), meta_size, 0); + R_UNLESS(meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Set the output. + *out = std::move(meta_storage); + R_SUCCEED(); +} + +Result NcaFileSystemDriver::CreateSparseStorageWithVerification( + VirtualFile* out, s64* out_fs_data_offset, std::shared_ptr<SparseStorage>* out_sparse_storage, + VirtualFile* out_meta_storage, VirtualFile* out_layer_info_storage, s32 index, + const NcaAesCtrUpperIv& upper_iv, const NcaSparseInfo& sparse_info, + const NcaMetaDataHashDataInfo& meta_data_hash_data_info, + NcaFsHeader::MetaDataHashType meta_data_hash_type) { + // Validate preconditions. + ASSERT(out != nullptr); + ASSERT(out_fs_data_offset != nullptr); + + // Check the sparse info generation. + R_UNLESS(sparse_info.generation != 0, ResultInvalidNcaHeader); + + // Read and verify the bucket tree header. + BucketTree::Header header; + std::memcpy(std::addressof(header), sparse_info.bucket.header.data(), sizeof(header)); + R_TRY(header.Verify()); + + // Determine the storage extents. + const auto fs_offset = GetFsOffset(*m_reader, index); + const auto fs_end_offset = GetFsEndOffset(*m_reader, index); + const auto fs_size = fs_end_offset - fs_offset; + + // Create the sparse storage. + std::shared_ptr<SparseStorage> sparse_storage; + if (header.entry_count != 0) { + // Create the body substorage. + VirtualFile body_substorage; + R_TRY(this->CreateBodySubStorage( + std::addressof(body_substorage), sparse_info.physical_offset, + Common::AlignUp<s64>(static_cast<s64>(meta_data_hash_data_info.offset) + + static_cast<s64>(meta_data_hash_data_info.size), + NcaHeader::CtrBlockSize))); + + // Check the meta data hash type. + R_UNLESS(meta_data_hash_type == NcaFsHeader::MetaDataHashType::HierarchicalIntegrity, + ResultRomNcaInvalidSparseMetaDataHashType); + + // Create the meta storage. + VirtualFile meta_storage; + R_TRY(this->CreateSparseStorageMetaStorageWithVerification( + std::addressof(meta_storage), out_layer_info_storage, body_substorage, + sparse_info.physical_offset, upper_iv, sparse_info, meta_data_hash_data_info)); + + // Potentially set the output meta storage. + if (out_meta_storage != nullptr) { + *out_meta_storage = meta_storage; + } + + // Create the sparse storage. + R_TRY(this->CreateSparseStorageCore(std::addressof(sparse_storage), body_substorage, + sparse_info.GetPhysicalSize(), std::move(meta_storage), + sparse_info, false)); + } else { + // If there are no entries, there's nothing to actually do. + sparse_storage = std::make_shared<SparseStorage>(); + R_UNLESS(sparse_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + sparse_storage->Initialize(fs_size); + } + + // Potentially set the output sparse storage. + if (out_sparse_storage != nullptr) { + *out_sparse_storage = sparse_storage; + } + + // Set the output fs data offset. + *out_fs_data_offset = fs_offset; + + // Set the output storage. + *out = std::move(sparse_storage); + R_SUCCEED(); +} + +Result NcaFileSystemDriver::CreateAesCtrExStorageMetaStorage( + VirtualFile* out, VirtualFile base_storage, s64 offset, + NcaFsHeader::EncryptionType encryption_type, const NcaAesCtrUpperIv& upper_iv, + const NcaPatchInfo& patch_info) { + // Validate preconditions. + ASSERT(out != nullptr); + ASSERT(base_storage != nullptr); + ASSERT(encryption_type == NcaFsHeader::EncryptionType::None || + encryption_type == NcaFsHeader::EncryptionType::AesCtrEx || + encryption_type == NcaFsHeader::EncryptionType::AesCtrExSkipLayerHash); + ASSERT(patch_info.HasAesCtrExTable()); + + // Validate patch info extents. + R_UNLESS(patch_info.indirect_size > 0, ResultInvalidNcaPatchInfoIndirectSize); + R_UNLESS(patch_info.aes_ctr_ex_size > 0, ResultInvalidNcaPatchInfoAesCtrExSize); + R_UNLESS(patch_info.indirect_size + patch_info.indirect_offset <= patch_info.aes_ctr_ex_offset, + ResultInvalidNcaPatchInfoAesCtrExOffset); + + // Get the base storage size. + s64 base_size = base_storage->GetSize(); + + // Get and validate the meta extents. + const s64 meta_offset = patch_info.aes_ctr_ex_offset; + const s64 meta_size = + Common::AlignUp(static_cast<s64>(patch_info.aes_ctr_ex_size), NcaHeader::XtsBlockSize); + R_UNLESS(meta_offset + meta_size <= base_size, ResultNcaBaseStorageOutOfRangeB); + + // Create the encrypted storage. + auto enc_storage = + std::make_shared<OffsetVfsFile>(std::move(base_storage), meta_size, meta_offset); + R_UNLESS(enc_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Create the decrypted storage. + VirtualFile decrypted_storage; + if (encryption_type != NcaFsHeader::EncryptionType::None) { + R_TRY(this->CreateAesCtrStorage(std::addressof(decrypted_storage), std::move(enc_storage), + offset + meta_offset, upper_iv, + AlignmentStorageRequirement::None)); + } else { + // If encryption type is none, don't do any decryption. + decrypted_storage = std::move(enc_storage); + } + + // Create meta storage. + auto meta_storage = std::make_shared<OffsetVfsFile>(decrypted_storage, meta_size, 0); + R_UNLESS(meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Create an alignment-matching storage. + using AlignedStorage = AlignmentMatchingStorage<NcaHeader::CtrBlockSize, 1>; + auto aligned_storage = std::make_shared<AlignedStorage>(std::move(meta_storage)); + R_UNLESS(aligned_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Set the output. + *out = std::move(aligned_storage); + R_SUCCEED(); +} + +Result NcaFileSystemDriver::CreateAesCtrExStorage( + VirtualFile* out, std::shared_ptr<AesCtrCounterExtendedStorage>* out_ext, + VirtualFile base_storage, VirtualFile meta_storage, s64 counter_offset, + const NcaAesCtrUpperIv& upper_iv, const NcaPatchInfo& patch_info) { + // Validate pre-conditions. + ASSERT(out != nullptr); + ASSERT(base_storage != nullptr); + ASSERT(meta_storage != nullptr); + ASSERT(patch_info.HasAesCtrExTable()); + + // Read the bucket tree header. + BucketTree::Header header; + std::memcpy(std::addressof(header), patch_info.aes_ctr_ex_header.data(), sizeof(header)); + R_TRY(header.Verify()); + + // Determine the bucket extents. + const auto entry_count = header.entry_count; + const s64 data_offset = 0; + const s64 data_size = patch_info.aes_ctr_ex_offset; + const s64 node_offset = 0; + const s64 node_size = AesCtrCounterExtendedStorage::QueryNodeStorageSize(entry_count); + const s64 entry_offset = node_offset + node_size; + const s64 entry_size = AesCtrCounterExtendedStorage::QueryEntryStorageSize(entry_count); + + // Create bucket storages. + auto data_storage = + std::make_shared<OffsetVfsFile>(std::move(base_storage), data_size, data_offset); + auto node_storage = std::make_shared<OffsetVfsFile>(meta_storage, node_size, node_offset); + auto entry_storage = std::make_shared<OffsetVfsFile>(meta_storage, entry_size, entry_offset); + + // Get the secure value. + const auto secure_value = upper_iv.part.secure_value; + + // Create the aes ctr ex storage. + VirtualFile aes_ctr_ex_storage; + if (m_reader->HasExternalDecryptionKey()) { + // Create the decryptor. + std::unique_ptr<AesCtrCounterExtendedStorage::IDecryptor> decryptor; + R_TRY(AesCtrCounterExtendedStorage::CreateSoftwareDecryptor(std::addressof(decryptor))); + + // Create the aes ctr ex storage. + auto impl_storage = std::make_shared<AesCtrCounterExtendedStorage>(); + R_UNLESS(impl_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Initialize the aes ctr ex storage. + R_TRY(impl_storage->Initialize(m_reader->GetExternalDecryptionKey(), AesCtrStorage::KeySize, + secure_value, counter_offset, data_storage, node_storage, + entry_storage, entry_count, std::move(decryptor))); + + // Potentially set the output implementation storage. + if (out_ext != nullptr) { + *out_ext = impl_storage; + } + + // Set the implementation storage. + aes_ctr_ex_storage = std::move(impl_storage); + } else { + // Create the software decryptor. + std::unique_ptr<AesCtrCounterExtendedStorage::IDecryptor> sw_decryptor; + R_TRY(AesCtrCounterExtendedStorage::CreateSoftwareDecryptor(std::addressof(sw_decryptor))); + + // Make the software storage. + auto sw_storage = std::make_shared<AesCtrCounterExtendedStorage>(); + R_UNLESS(sw_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Initialize the software storage. + R_TRY(sw_storage->Initialize(m_reader->GetDecryptionKey(NcaHeader::DecryptionKey_AesCtr), + AesCtrStorage::KeySize, secure_value, counter_offset, + data_storage, node_storage, entry_storage, entry_count, + std::move(sw_decryptor))); + + // Potentially set the output implementation storage. + if (out_ext != nullptr) { + *out_ext = sw_storage; + } + + // Set the implementation storage. + aes_ctr_ex_storage = std::move(sw_storage); + } + + // Create an alignment-matching storage. + using AlignedStorage = AlignmentMatchingStorage<NcaHeader::CtrBlockSize, 1>; + auto aligned_storage = std::make_shared<AlignedStorage>(std::move(aes_ctr_ex_storage)); + R_UNLESS(aligned_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Set the output. + *out = std::move(aligned_storage); + R_SUCCEED(); +} + +Result NcaFileSystemDriver::CreateIndirectStorageMetaStorage(VirtualFile* out, + VirtualFile base_storage, + const NcaPatchInfo& patch_info) { + // Validate preconditions. + ASSERT(out != nullptr); + ASSERT(base_storage != nullptr); + ASSERT(patch_info.HasIndirectTable()); + + // Get the base storage size. + s64 base_size = base_storage->GetSize(); + + // Check that we're within range. + R_UNLESS(patch_info.indirect_offset + patch_info.indirect_size <= base_size, + ResultNcaBaseStorageOutOfRangeE); + + // Create the meta storage. + auto meta_storage = std::make_shared<OffsetVfsFile>(base_storage, patch_info.indirect_size, + patch_info.indirect_offset); + R_UNLESS(meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Set the output. + *out = std::move(meta_storage); + R_SUCCEED(); +} + +Result NcaFileSystemDriver::CreateIndirectStorage( + VirtualFile* out, std::shared_ptr<IndirectStorage>* out_ind, VirtualFile base_storage, + VirtualFile original_data_storage, VirtualFile meta_storage, const NcaPatchInfo& patch_info) { + // Validate preconditions. + ASSERT(out != nullptr); + ASSERT(base_storage != nullptr); + ASSERT(meta_storage != nullptr); + ASSERT(patch_info.HasIndirectTable()); + + // Read the bucket tree header. + BucketTree::Header header; + std::memcpy(std::addressof(header), patch_info.indirect_header.data(), sizeof(header)); + R_TRY(header.Verify()); + + // Determine the storage sizes. + const auto node_size = IndirectStorage::QueryNodeStorageSize(header.entry_count); + const auto entry_size = IndirectStorage::QueryEntryStorageSize(header.entry_count); + R_UNLESS(node_size + entry_size <= patch_info.indirect_size, + ResultInvalidNcaIndirectStorageOutOfRange); + + // Get the indirect data size. + const s64 indirect_data_size = patch_info.indirect_offset; + ASSERT(Common::IsAligned(indirect_data_size, NcaHeader::XtsBlockSize)); + + // Create the indirect data storage. + auto indirect_data_storage = + std::make_shared<OffsetVfsFile>(base_storage, indirect_data_size, 0); + R_UNLESS(indirect_data_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Create the indirect storage. + auto indirect_storage = std::make_shared<IndirectStorage>(); + R_UNLESS(indirect_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Initialize the indirect storage. + R_TRY(indirect_storage->Initialize( + std::make_shared<OffsetVfsFile>(meta_storage, node_size, 0), + std::make_shared<OffsetVfsFile>(meta_storage, entry_size, node_size), header.entry_count)); + + // Get the original data size. + s64 original_data_size = original_data_storage->GetSize(); + + // Set the indirect storages. + indirect_storage->SetStorage( + 0, std::make_shared<OffsetVfsFile>(original_data_storage, original_data_size, 0)); + indirect_storage->SetStorage( + 1, std::make_shared<OffsetVfsFile>(indirect_data_storage, indirect_data_size, 0)); + + // If necessary, set the output indirect storage. + if (out_ind != nullptr) { + *out_ind = indirect_storage; + } + + // Set the output. + *out = std::move(indirect_storage); + R_SUCCEED(); +} + +Result NcaFileSystemDriver::CreatePatchMetaStorage( + VirtualFile* out_aes_ctr_ex_meta, VirtualFile* out_indirect_meta, + VirtualFile* out_layer_info_storage, VirtualFile base_storage, s64 offset, + const NcaAesCtrUpperIv& upper_iv, const NcaPatchInfo& patch_info, + const NcaMetaDataHashDataInfo& meta_data_hash_data_info) { + // Validate preconditions. + ASSERT(out_aes_ctr_ex_meta != nullptr); + ASSERT(out_indirect_meta != nullptr); + ASSERT(base_storage != nullptr); + ASSERT(patch_info.HasAesCtrExTable()); + ASSERT(patch_info.HasIndirectTable()); + ASSERT(Common::IsAligned<s64>(patch_info.aes_ctr_ex_size, NcaHeader::XtsBlockSize)); + + // Validate patch info extents. + R_UNLESS(patch_info.indirect_size > 0, ResultInvalidNcaPatchInfoIndirectSize); + R_UNLESS(patch_info.aes_ctr_ex_size >= 0, ResultInvalidNcaPatchInfoAesCtrExSize); + R_UNLESS(patch_info.indirect_size + patch_info.indirect_offset <= patch_info.aes_ctr_ex_offset, + ResultInvalidNcaPatchInfoAesCtrExOffset); + R_UNLESS(patch_info.aes_ctr_ex_offset + patch_info.aes_ctr_ex_size <= + meta_data_hash_data_info.offset, + ResultRomNcaInvalidPatchMetaDataHashDataOffset); + + // Get the base storage size. + s64 base_size = base_storage->GetSize(); + + // Check that extents remain within range. + R_UNLESS(patch_info.indirect_offset + patch_info.indirect_size <= base_size, + ResultNcaBaseStorageOutOfRangeE); + R_UNLESS(patch_info.aes_ctr_ex_offset + patch_info.aes_ctr_ex_size <= base_size, + ResultNcaBaseStorageOutOfRangeB); + + // Check that metadata hash data extents remain within range. + const s64 meta_data_hash_data_offset = meta_data_hash_data_info.offset; + const s64 meta_data_hash_data_size = + Common::AlignUp<s64>(meta_data_hash_data_info.size, NcaHeader::CtrBlockSize); + R_UNLESS(meta_data_hash_data_offset + meta_data_hash_data_size <= base_size, + ResultNcaBaseStorageOutOfRangeB); + + // Create the encrypted storage. + auto enc_storage = std::make_shared<OffsetVfsFile>( + std::move(base_storage), + meta_data_hash_data_offset + meta_data_hash_data_size - patch_info.indirect_offset, + patch_info.indirect_offset); + R_UNLESS(enc_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Create the decrypted storage. + VirtualFile decrypted_storage; + R_TRY(this->CreateAesCtrStorage(std::addressof(decrypted_storage), std::move(enc_storage), + offset + patch_info.indirect_offset, upper_iv, + AlignmentStorageRequirement::None)); + + // Create the verification storage. + VirtualFile integrity_storage; + Result rc = this->CreateIntegrityVerificationStorageForMeta( + std::addressof(integrity_storage), out_layer_info_storage, std::move(decrypted_storage), + patch_info.indirect_offset, meta_data_hash_data_info); + if (rc == ResultInvalidNcaMetaDataHashDataSize) { + R_THROW(ResultRomNcaInvalidPatchMetaDataHashDataSize); + } + if (rc == ResultInvalidNcaMetaDataHashDataHash) { + R_THROW(ResultRomNcaInvalidPatchMetaDataHashDataHash); + } + R_TRY(rc); + + // Create the indirect meta storage. + auto indirect_meta_storage = + std::make_shared<OffsetVfsFile>(integrity_storage, patch_info.indirect_size, + patch_info.indirect_offset - patch_info.indirect_offset); + R_UNLESS(indirect_meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Create the aes ctr ex meta storage. + auto aes_ctr_ex_meta_storage = + std::make_shared<OffsetVfsFile>(integrity_storage, patch_info.aes_ctr_ex_size, + patch_info.aes_ctr_ex_offset - patch_info.indirect_offset); + R_UNLESS(aes_ctr_ex_meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Set the output. + *out_aes_ctr_ex_meta = std::move(aes_ctr_ex_meta_storage); + *out_indirect_meta = std::move(indirect_meta_storage); + R_SUCCEED(); +} + +Result NcaFileSystemDriver::CreateSha256Storage( + VirtualFile* out, VirtualFile base_storage, + const NcaFsHeader::HashData::HierarchicalSha256Data& hash_data) { + // Validate preconditions. + ASSERT(out != nullptr); + ASSERT(base_storage != nullptr); + + // Define storage types. + using VerificationStorage = HierarchicalSha256Storage; + using AlignedStorage = AlignmentMatchingStoragePooledBuffer<1>; + + // Validate the hash data. + R_UNLESS(Common::IsPowerOfTwo(hash_data.hash_block_size), + ResultInvalidHierarchicalSha256BlockSize); + R_UNLESS(hash_data.hash_layer_count == VerificationStorage::LayerCount - 1, + ResultInvalidHierarchicalSha256LayerCount); + + // Get the regions. + const auto& hash_region = hash_data.hash_layer_region[0]; + const auto& data_region = hash_data.hash_layer_region[1]; + + // Determine buffer sizes. + constexpr s32 CacheBlockCount = 2; + const auto hash_buffer_size = static_cast<size_t>(hash_region.size); + const auto cache_buffer_size = CacheBlockCount * hash_data.hash_block_size; + const auto total_buffer_size = hash_buffer_size + cache_buffer_size; + + // Make a buffer holder storage. + auto buffer_hold_storage = std::make_shared<MemoryResourceBufferHoldStorage>( + std::move(base_storage), total_buffer_size); + R_UNLESS(buffer_hold_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + R_UNLESS(buffer_hold_storage->IsValid(), ResultAllocationMemoryFailedInNcaFileSystemDriverI); + + // Get storage size. + s64 base_size = buffer_hold_storage->GetSize(); + + // Check that we're within range. + R_UNLESS(hash_region.offset + hash_region.size <= base_size, ResultNcaBaseStorageOutOfRangeC); + R_UNLESS(data_region.offset + data_region.size <= base_size, ResultNcaBaseStorageOutOfRangeC); + + // Create the master hash storage. + auto master_hash_storage = + std::make_shared<ArrayVfsFile<sizeof(Hash)>>(hash_data.fs_data_master_hash.value); + + // Make the verification storage. + auto verification_storage = std::make_shared<VerificationStorage>(); + R_UNLESS(verification_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Make layer storages. + std::array<VirtualFile, VerificationStorage::LayerCount> layer_storages{ + std::make_shared<OffsetVfsFile>(master_hash_storage, sizeof(Hash), 0), + std::make_shared<OffsetVfsFile>(buffer_hold_storage, hash_region.size, hash_region.offset), + std::make_shared<OffsetVfsFile>(buffer_hold_storage, data_region.size, data_region.offset), + }; + + // Initialize the verification storage. + R_TRY(verification_storage->Initialize(layer_storages.data(), VerificationStorage::LayerCount, + hash_data.hash_block_size, + buffer_hold_storage->GetBuffer(), hash_buffer_size)); + + // Make the aligned storage. + auto aligned_storage = std::make_shared<AlignedStorage>(std::move(verification_storage), + hash_data.hash_block_size); + R_UNLESS(aligned_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Set the output. + *out = std::move(aligned_storage); + R_SUCCEED(); +} + +Result NcaFileSystemDriver::CreateIntegrityVerificationStorage( + VirtualFile* out, VirtualFile base_storage, + const NcaFsHeader::HashData::IntegrityMetaInfo& meta_info) { + R_RETURN(this->CreateIntegrityVerificationStorageImpl( + out, base_storage, meta_info, 0, IntegrityDataCacheCount, IntegrityHashCacheCount, + HierarchicalIntegrityVerificationStorage::GetDefaultDataCacheBufferLevel( + meta_info.level_hash_info.max_layers))); +} + +Result NcaFileSystemDriver::CreateIntegrityVerificationStorageForMeta( + VirtualFile* out, VirtualFile* out_layer_info_storage, VirtualFile base_storage, s64 offset, + const NcaMetaDataHashDataInfo& meta_data_hash_data_info) { + // Validate preconditions. + ASSERT(out != nullptr); + + // Check the meta data hash data size. + R_UNLESS(meta_data_hash_data_info.size == sizeof(NcaMetaDataHashData), + ResultInvalidNcaMetaDataHashDataSize); + + // Read the meta data hash data. + NcaMetaDataHashData meta_data_hash_data; + base_storage->ReadObject(std::addressof(meta_data_hash_data), + meta_data_hash_data_info.offset - offset); + + // Set the out layer info storage, if necessary. + if (out_layer_info_storage != nullptr) { + auto layer_info_storage = std::make_shared<OffsetVfsFile>( + base_storage, + meta_data_hash_data_info.offset + meta_data_hash_data_info.size - + meta_data_hash_data.layer_info_offset, + meta_data_hash_data.layer_info_offset - offset); + R_UNLESS(layer_info_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + *out_layer_info_storage = std::move(layer_info_storage); + } + + // Create the meta storage. + auto meta_storage = std::make_shared<OffsetVfsFile>( + std::move(base_storage), meta_data_hash_data_info.offset - offset, 0); + R_UNLESS(meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Create the integrity verification storage. + R_RETURN(this->CreateIntegrityVerificationStorageImpl( + out, std::move(meta_storage), meta_data_hash_data.integrity_meta_info, + meta_data_hash_data.layer_info_offset - offset, IntegrityDataCacheCountForMeta, + IntegrityHashCacheCountForMeta, 0)); +} + +Result NcaFileSystemDriver::CreateIntegrityVerificationStorageImpl( + VirtualFile* out, VirtualFile base_storage, + const NcaFsHeader::HashData::IntegrityMetaInfo& meta_info, s64 layer_info_offset, + int max_data_cache_entries, int max_hash_cache_entries, s8 buffer_level) { + // Validate preconditions. + ASSERT(out != nullptr); + ASSERT(base_storage != nullptr); + ASSERT(layer_info_offset >= 0); + + // Define storage types. + using VerificationStorage = HierarchicalIntegrityVerificationStorage; + using StorageInfo = VerificationStorage::HierarchicalStorageInformation; + + // Validate the meta info. + HierarchicalIntegrityVerificationInformation level_hash_info; + std::memcpy(std::addressof(level_hash_info), std::addressof(meta_info.level_hash_info), + sizeof(level_hash_info)); + + R_UNLESS(IntegrityMinLayerCount <= level_hash_info.max_layers, + ResultInvalidNcaHierarchicalIntegrityVerificationLayerCount); + R_UNLESS(level_hash_info.max_layers <= IntegrityMaxLayerCount, + ResultInvalidNcaHierarchicalIntegrityVerificationLayerCount); + + // Get the base storage size. + s64 base_storage_size = base_storage->GetSize(); + + // Create storage info. + StorageInfo storage_info; + for (s32 i = 0; i < static_cast<s32>(level_hash_info.max_layers - 2); ++i) { + const auto& layer_info = level_hash_info.info[i]; + R_UNLESS(layer_info_offset + layer_info.offset + layer_info.size <= base_storage_size, + ResultNcaBaseStorageOutOfRangeD); + + storage_info[i + 1] = std::make_shared<OffsetVfsFile>( + base_storage, layer_info.size, layer_info_offset + layer_info.offset); + } + + // Set the last layer info. + const auto& layer_info = level_hash_info.info[level_hash_info.max_layers - 2]; + const s64 last_layer_info_offset = layer_info_offset > 0 ? 0LL : layer_info.offset.Get(); + R_UNLESS(last_layer_info_offset + layer_info.size <= base_storage_size, + ResultNcaBaseStorageOutOfRangeD); + if (layer_info_offset > 0) { + R_UNLESS(last_layer_info_offset + layer_info.size <= layer_info_offset, + ResultRomNcaInvalidIntegrityLayerInfoOffset); + } + storage_info.SetDataStorage(std::make_shared<OffsetVfsFile>( + std::move(base_storage), layer_info.size, last_layer_info_offset)); + + // Make the integrity romfs storage. + auto integrity_storage = std::make_shared<IntegrityRomFsStorage>(); + R_UNLESS(integrity_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Initialize the integrity storage. + R_TRY(integrity_storage->Initialize(level_hash_info, meta_info.master_hash, storage_info, + max_data_cache_entries, max_hash_cache_entries, + buffer_level)); + + // Set the output. + *out = std::move(integrity_storage); + R_SUCCEED(); +} + +Result NcaFileSystemDriver::CreateRegionSwitchStorage(VirtualFile* out, + const NcaFsHeaderReader* header_reader, + VirtualFile inside_storage, + VirtualFile outside_storage) { + // Check pre-conditions. + ASSERT(header_reader->GetHashType() == NcaFsHeader::HashType::HierarchicalIntegrityHash); + + // Create the region. + RegionSwitchStorage::Region region = {}; + R_TRY(header_reader->GetHashTargetOffset(std::addressof(region.size))); + + // Create the region switch storage. + auto region_switch_storage = std::make_shared<RegionSwitchStorage>( + std::move(inside_storage), std::move(outside_storage), region); + R_UNLESS(region_switch_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Set the output. + *out = std::move(region_switch_storage); + R_SUCCEED(); +} + +Result NcaFileSystemDriver::CreateCompressedStorage(VirtualFile* out, + std::shared_ptr<CompressedStorage>* out_cmp, + VirtualFile* out_meta, VirtualFile base_storage, + const NcaCompressionInfo& compression_info) { + R_RETURN(this->CreateCompressedStorage(out, out_cmp, out_meta, std::move(base_storage), + compression_info, m_reader->GetDecompressor())); +} + +Result NcaFileSystemDriver::CreateCompressedStorage(VirtualFile* out, + std::shared_ptr<CompressedStorage>* out_cmp, + VirtualFile* out_meta, VirtualFile base_storage, + const NcaCompressionInfo& compression_info, + GetDecompressorFunction get_decompressor) { + // Check pre-conditions. + ASSERT(out != nullptr); + ASSERT(base_storage != nullptr); + ASSERT(get_decompressor != nullptr); + + // Read and verify the bucket tree header. + BucketTree::Header header; + std::memcpy(std::addressof(header), compression_info.bucket.header.data(), sizeof(header)); + R_TRY(header.Verify()); + + // Determine the storage extents. + const auto table_offset = compression_info.bucket.offset; + const auto table_size = compression_info.bucket.size; + const auto node_size = CompressedStorage::QueryNodeStorageSize(header.entry_count); + const auto entry_size = CompressedStorage::QueryEntryStorageSize(header.entry_count); + R_UNLESS(node_size + entry_size <= table_size, ResultInvalidCompressedStorageSize); + + // If we should, set the output meta storage. + if (out_meta != nullptr) { + auto meta_storage = std::make_shared<OffsetVfsFile>(base_storage, table_size, table_offset); + R_UNLESS(meta_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + *out_meta = std::move(meta_storage); + } + + // Allocate the compressed storage. + auto compressed_storage = std::make_shared<CompressedStorage>(); + R_UNLESS(compressed_storage != nullptr, ResultAllocationMemoryFailedAllocateShared); + + // Initialize the compressed storage. + R_TRY(compressed_storage->Initialize( + std::make_shared<OffsetVfsFile>(base_storage, table_offset, 0), + std::make_shared<OffsetVfsFile>(base_storage, node_size, table_offset), + std::make_shared<OffsetVfsFile>(base_storage, entry_size, table_offset + node_size), + header.entry_count, 64_KiB, 640_KiB, get_decompressor, 16_KiB, 16_KiB, 32)); + + // Potentially set the output compressed storage. + if (out_cmp) { + *out_cmp = compressed_storage; + } + + // Set the output. + *out = std::move(compressed_storage); + R_SUCCEED(); +} + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.h b/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.h new file mode 100644 index 000000000..d317b35ac --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_nca_file_system_driver.h @@ -0,0 +1,360 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "core/file_sys/fssystem/fssystem_compression_common.h" +#include "core/file_sys/fssystem/fssystem_nca_header.h" +#include "core/file_sys/vfs.h" + +namespace FileSys { + +class CompressedStorage; +class AesCtrCounterExtendedStorage; +class IndirectStorage; +class SparseStorage; + +struct NcaCryptoConfiguration; + +using KeyGenerationFunction = void (*)(void* dst_key, size_t dst_key_size, const void* src_key, + size_t src_key_size, s32 key_type); +using VerifySign1Function = bool (*)(const void* sig, size_t sig_size, const void* data, + size_t data_size, u8 generation); + +struct NcaCryptoConfiguration { + static constexpr size_t Rsa2048KeyModulusSize = 2048 / 8; + static constexpr size_t Rsa2048KeyPublicExponentSize = 3; + static constexpr size_t Rsa2048KeyPrivateExponentSize = Rsa2048KeyModulusSize; + + static constexpr size_t Aes128KeySize = 128 / 8; + + static constexpr size_t Header1SignatureKeyGenerationMax = 1; + + static constexpr s32 KeyAreaEncryptionKeyIndexCount = 3; + static constexpr s32 HeaderEncryptionKeyCount = 2; + + static constexpr u8 KeyAreaEncryptionKeyIndexZeroKey = 0xFF; + + static constexpr size_t KeyGenerationMax = 32; + + const u8* header_1_sign_key_moduli[Header1SignatureKeyGenerationMax + 1]; + u8 header_1_sign_key_public_exponent[Rsa2048KeyPublicExponentSize]; + u8 key_area_encryption_key_source[KeyAreaEncryptionKeyIndexCount][Aes128KeySize]; + u8 header_encryption_key_source[Aes128KeySize]; + u8 header_encrypted_encryption_keys[HeaderEncryptionKeyCount][Aes128KeySize]; + KeyGenerationFunction generate_key; + VerifySign1Function verify_sign1; + bool is_plaintext_header_available; + bool is_available_sw_key; +}; +static_assert(std::is_trivial_v<NcaCryptoConfiguration>); + +struct NcaCompressionConfiguration { + GetDecompressorFunction get_decompressor; +}; +static_assert(std::is_trivial_v<NcaCompressionConfiguration>); + +constexpr inline s32 KeyAreaEncryptionKeyCount = + NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount * + NcaCryptoConfiguration::KeyGenerationMax; + +enum class KeyType : s32 { + ZeroKey = -2, + InvalidKey = -1, + NcaHeaderKey1 = KeyAreaEncryptionKeyCount + 0, + NcaHeaderKey2 = KeyAreaEncryptionKeyCount + 1, + NcaExternalKey = KeyAreaEncryptionKeyCount + 2, + SaveDataDeviceUniqueMac = KeyAreaEncryptionKeyCount + 3, + SaveDataSeedUniqueMac = KeyAreaEncryptionKeyCount + 4, + SaveDataTransferMac = KeyAreaEncryptionKeyCount + 5, +}; + +constexpr inline bool IsInvalidKeyTypeValue(s32 key_type) { + return key_type < 0; +} + +constexpr inline s32 GetKeyTypeValue(u8 key_index, u8 key_generation) { + if (key_index == NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexZeroKey) { + return static_cast<s32>(KeyType::ZeroKey); + } + + if (key_index >= NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount) { + return static_cast<s32>(KeyType::InvalidKey); + } + + return NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount * key_generation + key_index; +} + +class NcaReader { + YUZU_NON_COPYABLE(NcaReader); + YUZU_NON_MOVEABLE(NcaReader); + +private: + NcaHeader m_header; + u8 m_decryption_keys[NcaHeader::DecryptionKey_Count][NcaCryptoConfiguration::Aes128KeySize]; + VirtualFile m_body_storage; + VirtualFile m_header_storage; + u8 m_external_decryption_key[NcaCryptoConfiguration::Aes128KeySize]; + bool m_is_software_aes_prioritized; + bool m_is_available_sw_key; + NcaHeader::EncryptionType m_header_encryption_type; + bool m_is_header_sign1_signature_valid; + GetDecompressorFunction m_get_decompressor; + +public: + NcaReader(); + ~NcaReader(); + + Result Initialize(VirtualFile base_storage, const NcaCryptoConfiguration& crypto_cfg, + const NcaCompressionConfiguration& compression_cfg); + + VirtualFile GetSharedBodyStorage(); + u32 GetMagic() const; + NcaHeader::DistributionType GetDistributionType() const; + NcaHeader::ContentType GetContentType() const; + u8 GetHeaderSign1KeyGeneration() const; + u8 GetKeyGeneration() const; + u8 GetKeyIndex() const; + u64 GetContentSize() const; + u64 GetProgramId() const; + u32 GetContentIndex() const; + u32 GetSdkAddonVersion() const; + void GetRightsId(u8* dst, size_t dst_size) const; + bool HasFsInfo(s32 index) const; + s32 GetFsCount() const; + const Hash& GetFsHeaderHash(s32 index) const; + void GetFsHeaderHash(Hash* dst, s32 index) const; + void GetFsInfo(NcaHeader::FsInfo* dst, s32 index) const; + u64 GetFsOffset(s32 index) const; + u64 GetFsEndOffset(s32 index) const; + u64 GetFsSize(s32 index) const; + void GetEncryptedKey(void* dst, size_t size) const; + const void* GetDecryptionKey(s32 index) const; + bool HasValidInternalKey() const; + bool HasInternalDecryptionKeyForAesHw() const; + bool IsSoftwareAesPrioritized() const; + void PrioritizeSoftwareAes(); + bool IsAvailableSwKey() const; + bool HasExternalDecryptionKey() const; + const void* GetExternalDecryptionKey() const; + void SetExternalDecryptionKey(const void* src, size_t size); + void GetRawData(void* dst, size_t dst_size) const; + NcaHeader::EncryptionType GetEncryptionType() const; + Result ReadHeader(NcaFsHeader* dst, s32 index) const; + + GetDecompressorFunction GetDecompressor() const; + + bool GetHeaderSign1Valid() const; + + void GetHeaderSign2(void* dst, size_t size) const; +}; + +class NcaFsHeaderReader { + YUZU_NON_COPYABLE(NcaFsHeaderReader); + YUZU_NON_MOVEABLE(NcaFsHeaderReader); + +private: + NcaFsHeader m_data; + s32 m_fs_index; + +public: + NcaFsHeaderReader() : m_fs_index(-1) { + std::memset(std::addressof(m_data), 0, sizeof(m_data)); + } + + Result Initialize(const NcaReader& reader, s32 index); + bool IsInitialized() const { + return m_fs_index >= 0; + } + + void GetRawData(void* dst, size_t dst_size) const; + + NcaFsHeader::HashData& GetHashData(); + const NcaFsHeader::HashData& GetHashData() const; + u16 GetVersion() const; + s32 GetFsIndex() const; + NcaFsHeader::FsType GetFsType() const; + NcaFsHeader::HashType GetHashType() const; + NcaFsHeader::EncryptionType GetEncryptionType() const; + NcaPatchInfo& GetPatchInfo(); + const NcaPatchInfo& GetPatchInfo() const; + const NcaAesCtrUpperIv GetAesCtrUpperIv() const; + + bool IsSkipLayerHashEncryption() const; + Result GetHashTargetOffset(s64* out) const; + + bool ExistsSparseLayer() const; + NcaSparseInfo& GetSparseInfo(); + const NcaSparseInfo& GetSparseInfo() const; + + bool ExistsCompressionLayer() const; + NcaCompressionInfo& GetCompressionInfo(); + const NcaCompressionInfo& GetCompressionInfo() const; + + bool ExistsPatchMetaHashLayer() const; + NcaMetaDataHashDataInfo& GetPatchMetaDataHashDataInfo(); + const NcaMetaDataHashDataInfo& GetPatchMetaDataHashDataInfo() const; + NcaFsHeader::MetaDataHashType GetPatchMetaHashType() const; + + bool ExistsSparseMetaHashLayer() const; + NcaMetaDataHashDataInfo& GetSparseMetaDataHashDataInfo(); + const NcaMetaDataHashDataInfo& GetSparseMetaDataHashDataInfo() const; + NcaFsHeader::MetaDataHashType GetSparseMetaHashType() const; +}; + +class NcaFileSystemDriver { + YUZU_NON_COPYABLE(NcaFileSystemDriver); + YUZU_NON_MOVEABLE(NcaFileSystemDriver); + +public: + struct StorageContext { + bool open_raw_storage; + VirtualFile body_substorage; + std::shared_ptr<SparseStorage> current_sparse_storage; + VirtualFile sparse_storage_meta_storage; + std::shared_ptr<SparseStorage> original_sparse_storage; + void* external_current_sparse_storage; + void* external_original_sparse_storage; + VirtualFile aes_ctr_ex_storage_meta_storage; + VirtualFile aes_ctr_ex_storage_data_storage; + std::shared_ptr<AesCtrCounterExtendedStorage> aes_ctr_ex_storage; + VirtualFile indirect_storage_meta_storage; + std::shared_ptr<IndirectStorage> indirect_storage; + VirtualFile fs_data_storage; + VirtualFile compressed_storage_meta_storage; + std::shared_ptr<CompressedStorage> compressed_storage; + + VirtualFile patch_layer_info_storage; + VirtualFile sparse_layer_info_storage; + + VirtualFile external_original_storage; + }; + +private: + enum class AlignmentStorageRequirement { + CacheBlockSize = 0, + None = 1, + }; + +private: + std::shared_ptr<NcaReader> m_original_reader; + std::shared_ptr<NcaReader> m_reader; + +public: + static Result SetupFsHeaderReader(NcaFsHeaderReader* out, const NcaReader& reader, + s32 fs_index); + +public: + NcaFileSystemDriver(std::shared_ptr<NcaReader> reader) : m_original_reader(), m_reader(reader) { + ASSERT(m_reader != nullptr); + } + + NcaFileSystemDriver(std::shared_ptr<NcaReader> original_reader, + std::shared_ptr<NcaReader> reader) + : m_original_reader(original_reader), m_reader(reader) { + ASSERT(m_reader != nullptr); + } + + Result OpenStorageWithContext(VirtualFile* out, NcaFsHeaderReader* out_header_reader, + s32 fs_index, StorageContext* ctx); + + Result OpenStorage(VirtualFile* out, NcaFsHeaderReader* out_header_reader, s32 fs_index) { + // Create a storage context. + StorageContext ctx{}; + + // Open the storage. + R_RETURN(OpenStorageWithContext(out, out_header_reader, fs_index, std::addressof(ctx))); + } + +public: + Result CreateStorageByRawStorage(VirtualFile* out, const NcaFsHeaderReader* header_reader, + VirtualFile raw_storage, StorageContext* ctx); + +private: + Result OpenStorageImpl(VirtualFile* out, NcaFsHeaderReader* out_header_reader, s32 fs_index, + StorageContext* ctx); + + Result OpenIndirectableStorageAsOriginal(VirtualFile* out, + const NcaFsHeaderReader* header_reader, + StorageContext* ctx); + + Result CreateBodySubStorage(VirtualFile* out, s64 offset, s64 size); + + Result CreateAesCtrStorage(VirtualFile* out, VirtualFile base_storage, s64 offset, + const NcaAesCtrUpperIv& upper_iv, + AlignmentStorageRequirement alignment_storage_requirement); + Result CreateAesXtsStorage(VirtualFile* out, VirtualFile base_storage, s64 offset); + + Result CreateSparseStorageMetaStorage(VirtualFile* out, VirtualFile base_storage, s64 offset, + const NcaAesCtrUpperIv& upper_iv, + const NcaSparseInfo& sparse_info); + Result CreateSparseStorageCore(std::shared_ptr<SparseStorage>* out, VirtualFile base_storage, + s64 base_size, VirtualFile meta_storage, + const NcaSparseInfo& sparse_info, bool external_info); + Result CreateSparseStorage(VirtualFile* out, s64* out_fs_data_offset, + std::shared_ptr<SparseStorage>* out_sparse_storage, + VirtualFile* out_meta_storage, s32 index, + const NcaAesCtrUpperIv& upper_iv, const NcaSparseInfo& sparse_info); + + Result CreateSparseStorageMetaStorageWithVerification( + VirtualFile* out, VirtualFile* out_verification, VirtualFile base_storage, s64 offset, + const NcaAesCtrUpperIv& upper_iv, const NcaSparseInfo& sparse_info, + const NcaMetaDataHashDataInfo& meta_data_hash_data_info); + Result CreateSparseStorageWithVerification( + VirtualFile* out, s64* out_fs_data_offset, + std::shared_ptr<SparseStorage>* out_sparse_storage, VirtualFile* out_meta_storage, + VirtualFile* out_verification, s32 index, const NcaAesCtrUpperIv& upper_iv, + const NcaSparseInfo& sparse_info, const NcaMetaDataHashDataInfo& meta_data_hash_data_info, + NcaFsHeader::MetaDataHashType meta_data_hash_type); + + Result CreateAesCtrExStorageMetaStorage(VirtualFile* out, VirtualFile base_storage, s64 offset, + NcaFsHeader::EncryptionType encryption_type, + const NcaAesCtrUpperIv& upper_iv, + const NcaPatchInfo& patch_info); + Result CreateAesCtrExStorage(VirtualFile* out, + std::shared_ptr<AesCtrCounterExtendedStorage>* out_ext, + VirtualFile base_storage, VirtualFile meta_storage, + s64 counter_offset, const NcaAesCtrUpperIv& upper_iv, + const NcaPatchInfo& patch_info); + + Result CreateIndirectStorageMetaStorage(VirtualFile* out, VirtualFile base_storage, + const NcaPatchInfo& patch_info); + Result CreateIndirectStorage(VirtualFile* out, std::shared_ptr<IndirectStorage>* out_ind, + VirtualFile base_storage, VirtualFile original_data_storage, + VirtualFile meta_storage, const NcaPatchInfo& patch_info); + + Result CreatePatchMetaStorage(VirtualFile* out_aes_ctr_ex_meta, VirtualFile* out_indirect_meta, + VirtualFile* out_verification, VirtualFile base_storage, + s64 offset, const NcaAesCtrUpperIv& upper_iv, + const NcaPatchInfo& patch_info, + const NcaMetaDataHashDataInfo& meta_data_hash_data_info); + + Result CreateSha256Storage(VirtualFile* out, VirtualFile base_storage, + const NcaFsHeader::HashData::HierarchicalSha256Data& sha256_data); + + Result CreateIntegrityVerificationStorage( + VirtualFile* out, VirtualFile base_storage, + const NcaFsHeader::HashData::IntegrityMetaInfo& meta_info); + Result CreateIntegrityVerificationStorageForMeta( + VirtualFile* out, VirtualFile* out_verification, VirtualFile base_storage, s64 offset, + const NcaMetaDataHashDataInfo& meta_data_hash_data_info); + Result CreateIntegrityVerificationStorageImpl( + VirtualFile* out, VirtualFile base_storage, + const NcaFsHeader::HashData::IntegrityMetaInfo& meta_info, s64 layer_info_offset, + int max_data_cache_entries, int max_hash_cache_entries, s8 buffer_level); + + Result CreateRegionSwitchStorage(VirtualFile* out, const NcaFsHeaderReader* header_reader, + VirtualFile inside_storage, VirtualFile outside_storage); + + Result CreateCompressedStorage(VirtualFile* out, std::shared_ptr<CompressedStorage>* out_cmp, + VirtualFile* out_meta, VirtualFile base_storage, + const NcaCompressionInfo& compression_info); + +public: + Result CreateCompressedStorage(VirtualFile* out, std::shared_ptr<CompressedStorage>* out_cmp, + VirtualFile* out_meta, VirtualFile base_storage, + const NcaCompressionInfo& compression_info, + GetDecompressorFunction get_decompressor); +}; + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_nca_header.cpp b/src/core/file_sys/fssystem/fssystem_nca_header.cpp new file mode 100644 index 000000000..bf5742d39 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_nca_header.cpp @@ -0,0 +1,20 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "core/file_sys/fssystem/fssystem_nca_header.h" + +namespace FileSys { + +u8 NcaHeader::GetProperKeyGeneration() const { + return std::max(this->key_generation, this->key_generation_2); +} + +bool NcaPatchInfo::HasIndirectTable() const { + return this->indirect_size != 0; +} + +bool NcaPatchInfo::HasAesCtrExTable() const { + return this->aes_ctr_ex_size != 0; +} + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_nca_header.h b/src/core/file_sys/fssystem/fssystem_nca_header.h new file mode 100644 index 000000000..a02c5d881 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_nca_header.h @@ -0,0 +1,338 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "common/literals.h" + +#include "core/file_sys/errors.h" +#include "core/file_sys/fssystem/fs_types.h" + +namespace FileSys { + +using namespace Common::Literals; + +struct Hash { + static constexpr std::size_t Size = 256 / 8; + std::array<u8, Size> value; +}; +static_assert(sizeof(Hash) == Hash::Size); +static_assert(std::is_trivial_v<Hash>); + +using NcaDigest = Hash; + +struct NcaHeader { + enum class ContentType : u8 { + Program = 0, + Meta = 1, + Control = 2, + Manual = 3, + Data = 4, + PublicData = 5, + + Start = Program, + End = PublicData, + }; + + enum class DistributionType : u8 { + Download = 0, + GameCard = 1, + + Start = Download, + End = GameCard, + }; + + enum class EncryptionType : u8 { + Auto = 0, + None = 1, + }; + + enum DecryptionKey { + DecryptionKey_AesXts = 0, + DecryptionKey_AesXts1 = DecryptionKey_AesXts, + DecryptionKey_AesXts2 = 1, + DecryptionKey_AesCtr = 2, + DecryptionKey_AesCtrEx = 3, + DecryptionKey_AesCtrHw = 4, + DecryptionKey_Count, + }; + + struct FsInfo { + u32 start_sector; + u32 end_sector; + u32 hash_sectors; + u32 reserved; + }; + static_assert(sizeof(FsInfo) == 0x10); + static_assert(std::is_trivial_v<FsInfo>); + + static constexpr u32 Magic0 = Common::MakeMagic('N', 'C', 'A', '0'); + static constexpr u32 Magic1 = Common::MakeMagic('N', 'C', 'A', '1'); + static constexpr u32 Magic2 = Common::MakeMagic('N', 'C', 'A', '2'); + static constexpr u32 Magic3 = Common::MakeMagic('N', 'C', 'A', '3'); + + static constexpr u32 Magic = Magic3; + + static constexpr std::size_t Size = 1_KiB; + static constexpr s32 FsCountMax = 4; + static constexpr std::size_t HeaderSignCount = 2; + static constexpr std::size_t HeaderSignSize = 0x100; + static constexpr std::size_t EncryptedKeyAreaSize = 0x100; + static constexpr std::size_t SectorSize = 0x200; + static constexpr std::size_t SectorShift = 9; + static constexpr std::size_t RightsIdSize = 0x10; + static constexpr std::size_t XtsBlockSize = 0x200; + static constexpr std::size_t CtrBlockSize = 0x10; + + static_assert(SectorSize == (1 << SectorShift)); + + // Data members. + std::array<u8, HeaderSignSize> header_sign_1; + std::array<u8, HeaderSignSize> header_sign_2; + u32 magic; + DistributionType distribution_type; + ContentType content_type; + u8 key_generation; + u8 key_index; + u64 content_size; + u64 program_id; + u32 content_index; + u32 sdk_addon_version; + u8 key_generation_2; + u8 header1_signature_key_generation; + std::array<u8, 2> reserved_222; + std::array<u32, 3> reserved_224; + std::array<u8, RightsIdSize> rights_id; + std::array<FsInfo, FsCountMax> fs_info; + std::array<Hash, FsCountMax> fs_header_hash; + std::array<u8, EncryptedKeyAreaSize> encrypted_key_area; + + static constexpr u64 SectorToByte(u32 sector) { + return static_cast<u64>(sector) << SectorShift; + } + + static constexpr u32 ByteToSector(u64 byte) { + return static_cast<u32>(byte >> SectorShift); + } + + u8 GetProperKeyGeneration() const; +}; +static_assert(sizeof(NcaHeader) == NcaHeader::Size); +static_assert(std::is_trivial_v<NcaHeader>); + +struct NcaBucketInfo { + static constexpr size_t HeaderSize = 0x10; + Int64 offset; + Int64 size; + std::array<u8, HeaderSize> header; +}; +static_assert(std::is_trivial_v<NcaBucketInfo>); + +struct NcaPatchInfo { + static constexpr size_t Size = 0x40; + static constexpr size_t Offset = 0x100; + + Int64 indirect_offset; + Int64 indirect_size; + std::array<u8, NcaBucketInfo::HeaderSize> indirect_header; + Int64 aes_ctr_ex_offset; + Int64 aes_ctr_ex_size; + std::array<u8, NcaBucketInfo::HeaderSize> aes_ctr_ex_header; + + bool HasIndirectTable() const; + bool HasAesCtrExTable() const; +}; +static_assert(std::is_trivial_v<NcaPatchInfo>); + +union NcaAesCtrUpperIv { + u64 value; + struct { + u32 generation; + u32 secure_value; + } part; +}; +static_assert(std::is_trivial_v<NcaAesCtrUpperIv>); + +struct NcaSparseInfo { + NcaBucketInfo bucket; + Int64 physical_offset; + u16 generation; + std::array<u8, 6> reserved; + + s64 GetPhysicalSize() const { + return this->bucket.offset + this->bucket.size; + } + + u32 GetGeneration() const { + return static_cast<u32>(this->generation) << 16; + } + + const NcaAesCtrUpperIv MakeAesCtrUpperIv(NcaAesCtrUpperIv upper_iv) const { + NcaAesCtrUpperIv sparse_upper_iv = upper_iv; + sparse_upper_iv.part.generation = this->GetGeneration(); + return sparse_upper_iv; + } +}; +static_assert(std::is_trivial_v<NcaSparseInfo>); + +struct NcaCompressionInfo { + NcaBucketInfo bucket; + std::array<u8, 8> resreved; +}; +static_assert(std::is_trivial_v<NcaCompressionInfo>); + +struct NcaMetaDataHashDataInfo { + Int64 offset; + Int64 size; + Hash hash; +}; +static_assert(std::is_trivial_v<NcaMetaDataHashDataInfo>); + +struct NcaFsHeader { + static constexpr size_t Size = 0x200; + static constexpr size_t HashDataOffset = 0x8; + + struct Region { + Int64 offset; + Int64 size; + }; + static_assert(std::is_trivial_v<Region>); + + enum class FsType : u8 { + RomFs = 0, + PartitionFs = 1, + }; + + enum class EncryptionType : u8 { + Auto = 0, + None = 1, + AesXts = 2, + AesCtr = 3, + AesCtrEx = 4, + AesCtrSkipLayerHash = 5, + AesCtrExSkipLayerHash = 6, + }; + + enum class HashType : u8 { + Auto = 0, + None = 1, + HierarchicalSha256Hash = 2, + HierarchicalIntegrityHash = 3, + AutoSha3 = 4, + HierarchicalSha3256Hash = 5, + HierarchicalIntegritySha3Hash = 6, + }; + + enum class MetaDataHashType : u8 { + None = 0, + HierarchicalIntegrity = 1, + }; + + union HashData { + struct HierarchicalSha256Data { + static constexpr size_t HashLayerCountMax = 5; + static const size_t MasterHashOffset; + + Hash fs_data_master_hash; + s32 hash_block_size; + s32 hash_layer_count; + std::array<Region, HashLayerCountMax> hash_layer_region; + } hierarchical_sha256_data; + static_assert(std::is_trivial_v<HierarchicalSha256Data>); + + struct IntegrityMetaInfo { + static const size_t MasterHashOffset; + + u32 magic; + u32 version; + u32 master_hash_size; + + struct LevelHashInfo { + u32 max_layers; + + struct HierarchicalIntegrityVerificationLevelInformation { + static constexpr size_t IntegrityMaxLayerCount = 7; + Int64 offset; + Int64 size; + s32 block_order; + std::array<u8, 4> reserved; + }; + std::array< + HierarchicalIntegrityVerificationLevelInformation, + HierarchicalIntegrityVerificationLevelInformation::IntegrityMaxLayerCount - 1> + info; + + struct SignatureSalt { + static constexpr size_t Size = 0x20; + std::array<u8, Size> value; + }; + SignatureSalt seed; + } level_hash_info; + + Hash master_hash; + } integrity_meta_info; + static_assert(std::is_trivial_v<IntegrityMetaInfo>); + + std::array<u8, NcaPatchInfo::Offset - HashDataOffset> padding; + }; + + u16 version; + FsType fs_type; + HashType hash_type; + EncryptionType encryption_type; + MetaDataHashType meta_data_hash_type; + std::array<u8, 2> reserved; + HashData hash_data; + NcaPatchInfo patch_info; + NcaAesCtrUpperIv aes_ctr_upper_iv; + NcaSparseInfo sparse_info; + NcaCompressionInfo compression_info; + NcaMetaDataHashDataInfo meta_data_hash_data_info; + std::array<u8, 0x30> pad; + + bool IsSkipLayerHashEncryption() const { + return this->encryption_type == EncryptionType::AesCtrSkipLayerHash || + this->encryption_type == EncryptionType::AesCtrExSkipLayerHash; + } + + Result GetHashTargetOffset(s64* out) const { + switch (this->hash_type) { + case HashType::HierarchicalIntegrityHash: + case HashType::HierarchicalIntegritySha3Hash: + *out = this->hash_data.integrity_meta_info.level_hash_info + .info[this->hash_data.integrity_meta_info.level_hash_info.max_layers - 2] + .offset; + R_SUCCEED(); + case HashType::HierarchicalSha256Hash: + case HashType::HierarchicalSha3256Hash: + *out = + this->hash_data.hierarchical_sha256_data + .hash_layer_region[this->hash_data.hierarchical_sha256_data.hash_layer_count - + 1] + .offset; + R_SUCCEED(); + default: + R_THROW(ResultInvalidNcaFsHeader); + } + } +}; +static_assert(sizeof(NcaFsHeader) == NcaFsHeader::Size); +static_assert(std::is_trivial_v<NcaFsHeader>); +static_assert(offsetof(NcaFsHeader, patch_info) == NcaPatchInfo::Offset); + +inline constexpr const size_t NcaFsHeader::HashData::HierarchicalSha256Data::MasterHashOffset = + offsetof(NcaFsHeader, hash_data.hierarchical_sha256_data.fs_data_master_hash); +inline constexpr const size_t NcaFsHeader::HashData::IntegrityMetaInfo::MasterHashOffset = + offsetof(NcaFsHeader, hash_data.integrity_meta_info.master_hash); + +struct NcaMetaDataHashData { + s64 layer_info_offset; + NcaFsHeader::HashData::IntegrityMetaInfo integrity_meta_info; +}; +static_assert(sizeof(NcaMetaDataHashData) == + sizeof(NcaFsHeader::HashData::IntegrityMetaInfo) + sizeof(s64)); +static_assert(std::is_trivial_v<NcaMetaDataHashData>); + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_nca_reader.cpp b/src/core/file_sys/fssystem/fssystem_nca_reader.cpp new file mode 100644 index 000000000..cd4c49069 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_nca_reader.cpp @@ -0,0 +1,542 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "core/file_sys/fssystem/fssystem_aes_xts_storage.h" +#include "core/file_sys/fssystem/fssystem_nca_file_system_driver.h" +#include "core/file_sys/vfs_offset.h" + +namespace FileSys { + +namespace { + +constexpr inline u32 SdkAddonVersionMin = 0x000B0000; +constexpr inline size_t Aes128KeySize = 0x10; +constexpr const std::array<u8, Aes128KeySize> ZeroKey{}; + +constexpr Result CheckNcaMagic(u32 magic) { + // Verify the magic is not a deprecated one. + R_UNLESS(magic != NcaHeader::Magic0, ResultUnsupportedSdkVersion); + R_UNLESS(magic != NcaHeader::Magic1, ResultUnsupportedSdkVersion); + R_UNLESS(magic != NcaHeader::Magic2, ResultUnsupportedSdkVersion); + + // Verify the magic is the current one. + R_UNLESS(magic == NcaHeader::Magic3, ResultInvalidNcaSignature); + + R_SUCCEED(); +} + +} // namespace + +NcaReader::NcaReader() + : m_body_storage(), m_header_storage(), m_is_software_aes_prioritized(false), + m_is_available_sw_key(false), m_header_encryption_type(NcaHeader::EncryptionType::Auto), + m_get_decompressor() { + std::memset(std::addressof(m_header), 0, sizeof(m_header)); + std::memset(std::addressof(m_decryption_keys), 0, sizeof(m_decryption_keys)); + std::memset(std::addressof(m_external_decryption_key), 0, sizeof(m_external_decryption_key)); +} + +NcaReader::~NcaReader() {} + +Result NcaReader::Initialize(VirtualFile base_storage, const NcaCryptoConfiguration& crypto_cfg, + const NcaCompressionConfiguration& compression_cfg) { + // Validate preconditions. + ASSERT(base_storage != nullptr); + ASSERT(m_body_storage == nullptr); + + // Create the work header storage storage. + VirtualFile work_header_storage; + + // We need to be able to generate keys. + R_UNLESS(crypto_cfg.generate_key != nullptr, ResultInvalidArgument); + + // Generate keys for header. + using AesXtsStorageForNcaHeader = AesXtsStorage; + + constexpr const s32 HeaderKeyTypeValues[NcaCryptoConfiguration::HeaderEncryptionKeyCount] = { + static_cast<s32>(KeyType::NcaHeaderKey1), + static_cast<s32>(KeyType::NcaHeaderKey2), + }; + + u8 header_decryption_keys[NcaCryptoConfiguration::HeaderEncryptionKeyCount] + [NcaCryptoConfiguration::Aes128KeySize]; + for (size_t i = 0; i < NcaCryptoConfiguration::HeaderEncryptionKeyCount; i++) { + crypto_cfg.generate_key(header_decryption_keys[i], AesXtsStorageForNcaHeader::KeySize, + crypto_cfg.header_encrypted_encryption_keys[i], + AesXtsStorageForNcaHeader::KeySize, HeaderKeyTypeValues[i]); + } + + // Create the header storage. + const u8 header_iv[AesXtsStorageForNcaHeader::IvSize] = {}; + work_header_storage = std::make_unique<AesXtsStorageForNcaHeader>( + base_storage, header_decryption_keys[0], header_decryption_keys[1], + AesXtsStorageForNcaHeader::KeySize, header_iv, AesXtsStorageForNcaHeader::IvSize, + NcaHeader::XtsBlockSize); + + // Check that we successfully created the storage. + R_UNLESS(work_header_storage != nullptr, ResultAllocationMemoryFailedInNcaReaderA); + + // Read the header. + work_header_storage->ReadObject(std::addressof(m_header), 0); + + // Validate the magic. + if (const Result magic_result = CheckNcaMagic(m_header.magic); R_FAILED(magic_result)) { + // Try to use a plaintext header. + base_storage->ReadObject(std::addressof(m_header), 0); + R_UNLESS(R_SUCCEEDED(CheckNcaMagic(m_header.magic)), magic_result); + + // Configure to use the plaintext header. + auto base_storage_size = base_storage->GetSize(); + work_header_storage = std::make_shared<OffsetVfsFile>(base_storage, base_storage_size, 0); + R_UNLESS(work_header_storage != nullptr, ResultAllocationMemoryFailedInNcaReaderA); + + // Set encryption type as plaintext. + m_header_encryption_type = NcaHeader::EncryptionType::None; + } + + // Validate the fixed key signature. + if (m_header.header1_signature_key_generation > + NcaCryptoConfiguration::Header1SignatureKeyGenerationMax) { + LOG_CRITICAL(Frontend, + "NcaCryptoConfiguration::Header1SignatureKeyGenerationMax = {}, " + "m_header.header1_signature_key_generation = {}", + NcaCryptoConfiguration::Header1SignatureKeyGenerationMax, + m_header.header1_signature_key_generation); + } + + R_UNLESS(m_header.header1_signature_key_generation <= + NcaCryptoConfiguration::Header1SignatureKeyGenerationMax, + ResultInvalidNcaHeader1SignatureKeyGeneration); + + // Verify the header sign1. + if (crypto_cfg.verify_sign1 != nullptr) { + const u8* sig = m_header.header_sign_1.data(); + const size_t sig_size = NcaHeader::HeaderSignSize; + const u8* msg = + static_cast<const u8*>(static_cast<const void*>(std::addressof(m_header.magic))); + const size_t msg_size = + NcaHeader::Size - NcaHeader::HeaderSignSize * NcaHeader::HeaderSignCount; + + m_is_header_sign1_signature_valid = crypto_cfg.verify_sign1( + sig, sig_size, msg, msg_size, m_header.header1_signature_key_generation); + + if (!m_is_header_sign1_signature_valid) { + LOG_WARNING(Common_Filesystem, "Invalid NCA header sign1"); + } + } + + // Validate the sdk version. + R_UNLESS(m_header.sdk_addon_version >= SdkAddonVersionMin, ResultUnsupportedSdkVersion); + + // Validate the key index. + R_UNLESS(m_header.key_index < NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount || + m_header.key_index == NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexZeroKey, + ResultInvalidNcaKeyIndex); + + // Check if we have a rights id. + constexpr const std::array<u8, NcaHeader::RightsIdSize> ZeroRightsId{}; + if (std::memcmp(ZeroRightsId.data(), m_header.rights_id.data(), NcaHeader::RightsIdSize) == 0) { + // If we don't, then we don't have an external key, so we need to generate decryption keys. + crypto_cfg.generate_key( + m_decryption_keys[NcaHeader::DecryptionKey_AesCtr], Aes128KeySize, + m_header.encrypted_key_area.data() + NcaHeader::DecryptionKey_AesCtr * Aes128KeySize, + Aes128KeySize, GetKeyTypeValue(m_header.key_index, m_header.GetProperKeyGeneration())); + crypto_cfg.generate_key( + m_decryption_keys[NcaHeader::DecryptionKey_AesXts1], Aes128KeySize, + m_header.encrypted_key_area.data() + NcaHeader::DecryptionKey_AesXts1 * Aes128KeySize, + Aes128KeySize, GetKeyTypeValue(m_header.key_index, m_header.GetProperKeyGeneration())); + crypto_cfg.generate_key( + m_decryption_keys[NcaHeader::DecryptionKey_AesXts2], Aes128KeySize, + m_header.encrypted_key_area.data() + NcaHeader::DecryptionKey_AesXts2 * Aes128KeySize, + Aes128KeySize, GetKeyTypeValue(m_header.key_index, m_header.GetProperKeyGeneration())); + crypto_cfg.generate_key( + m_decryption_keys[NcaHeader::DecryptionKey_AesCtrEx], Aes128KeySize, + m_header.encrypted_key_area.data() + NcaHeader::DecryptionKey_AesCtrEx * Aes128KeySize, + Aes128KeySize, GetKeyTypeValue(m_header.key_index, m_header.GetProperKeyGeneration())); + + // Copy the hardware speed emulation key. + std::memcpy(m_decryption_keys[NcaHeader::DecryptionKey_AesCtrHw], + m_header.encrypted_key_area.data() + + NcaHeader::DecryptionKey_AesCtrHw * Aes128KeySize, + Aes128KeySize); + } + + // Clear the external decryption key. + std::memset(m_external_decryption_key, 0, sizeof(m_external_decryption_key)); + + // Set software key availability. + m_is_available_sw_key = crypto_cfg.is_available_sw_key; + + // Set our decompressor function getter. + m_get_decompressor = compression_cfg.get_decompressor; + + // Set our storages. + m_header_storage = std::move(work_header_storage); + m_body_storage = std::move(base_storage); + + R_SUCCEED(); +} + +VirtualFile NcaReader::GetSharedBodyStorage() { + ASSERT(m_body_storage != nullptr); + return m_body_storage; +} + +u32 NcaReader::GetMagic() const { + ASSERT(m_body_storage != nullptr); + return m_header.magic; +} + +NcaHeader::DistributionType NcaReader::GetDistributionType() const { + ASSERT(m_body_storage != nullptr); + return m_header.distribution_type; +} + +NcaHeader::ContentType NcaReader::GetContentType() const { + ASSERT(m_body_storage != nullptr); + return m_header.content_type; +} + +u8 NcaReader::GetHeaderSign1KeyGeneration() const { + ASSERT(m_body_storage != nullptr); + return m_header.header1_signature_key_generation; +} + +u8 NcaReader::GetKeyGeneration() const { + ASSERT(m_body_storage != nullptr); + return m_header.GetProperKeyGeneration(); +} + +u8 NcaReader::GetKeyIndex() const { + ASSERT(m_body_storage != nullptr); + return m_header.key_index; +} + +u64 NcaReader::GetContentSize() const { + ASSERT(m_body_storage != nullptr); + return m_header.content_size; +} + +u64 NcaReader::GetProgramId() const { + ASSERT(m_body_storage != nullptr); + return m_header.program_id; +} + +u32 NcaReader::GetContentIndex() const { + ASSERT(m_body_storage != nullptr); + return m_header.content_index; +} + +u32 NcaReader::GetSdkAddonVersion() const { + ASSERT(m_body_storage != nullptr); + return m_header.sdk_addon_version; +} + +void NcaReader::GetRightsId(u8* dst, size_t dst_size) const { + ASSERT(dst != nullptr); + ASSERT(dst_size >= NcaHeader::RightsIdSize); + + std::memcpy(dst, m_header.rights_id.data(), NcaHeader::RightsIdSize); +} + +bool NcaReader::HasFsInfo(s32 index) const { + ASSERT(0 <= index && index < NcaHeader::FsCountMax); + return m_header.fs_info[index].start_sector != 0 || m_header.fs_info[index].end_sector != 0; +} + +s32 NcaReader::GetFsCount() const { + ASSERT(m_body_storage != nullptr); + for (s32 i = 0; i < NcaHeader::FsCountMax; i++) { + if (!this->HasFsInfo(i)) { + return i; + } + } + return NcaHeader::FsCountMax; +} + +const Hash& NcaReader::GetFsHeaderHash(s32 index) const { + ASSERT(m_body_storage != nullptr); + ASSERT(0 <= index && index < NcaHeader::FsCountMax); + return m_header.fs_header_hash[index]; +} + +void NcaReader::GetFsHeaderHash(Hash* dst, s32 index) const { + ASSERT(m_body_storage != nullptr); + ASSERT(0 <= index && index < NcaHeader::FsCountMax); + ASSERT(dst != nullptr); + std::memcpy(dst, std::addressof(m_header.fs_header_hash[index]), sizeof(*dst)); +} + +void NcaReader::GetFsInfo(NcaHeader::FsInfo* dst, s32 index) const { + ASSERT(m_body_storage != nullptr); + ASSERT(0 <= index && index < NcaHeader::FsCountMax); + ASSERT(dst != nullptr); + std::memcpy(dst, std::addressof(m_header.fs_info[index]), sizeof(*dst)); +} + +u64 NcaReader::GetFsOffset(s32 index) const { + ASSERT(m_body_storage != nullptr); + ASSERT(0 <= index && index < NcaHeader::FsCountMax); + return NcaHeader::SectorToByte(m_header.fs_info[index].start_sector); +} + +u64 NcaReader::GetFsEndOffset(s32 index) const { + ASSERT(m_body_storage != nullptr); + ASSERT(0 <= index && index < NcaHeader::FsCountMax); + return NcaHeader::SectorToByte(m_header.fs_info[index].end_sector); +} + +u64 NcaReader::GetFsSize(s32 index) const { + ASSERT(m_body_storage != nullptr); + ASSERT(0 <= index && index < NcaHeader::FsCountMax); + return NcaHeader::SectorToByte(m_header.fs_info[index].end_sector - + m_header.fs_info[index].start_sector); +} + +void NcaReader::GetEncryptedKey(void* dst, size_t size) const { + ASSERT(m_body_storage != nullptr); + ASSERT(dst != nullptr); + ASSERT(size >= NcaHeader::EncryptedKeyAreaSize); + + std::memcpy(dst, m_header.encrypted_key_area.data(), NcaHeader::EncryptedKeyAreaSize); +} + +const void* NcaReader::GetDecryptionKey(s32 index) const { + ASSERT(m_body_storage != nullptr); + ASSERT(0 <= index && index < NcaHeader::DecryptionKey_Count); + return m_decryption_keys[index]; +} + +bool NcaReader::HasValidInternalKey() const { + for (s32 i = 0; i < NcaHeader::DecryptionKey_Count; i++) { + if (std::memcmp(ZeroKey.data(), m_header.encrypted_key_area.data() + i * Aes128KeySize, + Aes128KeySize) != 0) { + return true; + } + } + return false; +} + +bool NcaReader::HasInternalDecryptionKeyForAesHw() const { + return std::memcmp(ZeroKey.data(), this->GetDecryptionKey(NcaHeader::DecryptionKey_AesCtrHw), + Aes128KeySize) != 0; +} + +bool NcaReader::IsSoftwareAesPrioritized() const { + return m_is_software_aes_prioritized; +} + +void NcaReader::PrioritizeSoftwareAes() { + m_is_software_aes_prioritized = true; +} + +bool NcaReader::IsAvailableSwKey() const { + return m_is_available_sw_key; +} + +bool NcaReader::HasExternalDecryptionKey() const { + return std::memcmp(ZeroKey.data(), this->GetExternalDecryptionKey(), Aes128KeySize) != 0; +} + +const void* NcaReader::GetExternalDecryptionKey() const { + return m_external_decryption_key; +} + +void NcaReader::SetExternalDecryptionKey(const void* src, size_t size) { + ASSERT(src != nullptr); + ASSERT(size == sizeof(m_external_decryption_key)); + + std::memcpy(m_external_decryption_key, src, sizeof(m_external_decryption_key)); +} + +void NcaReader::GetRawData(void* dst, size_t dst_size) const { + ASSERT(m_body_storage != nullptr); + ASSERT(dst != nullptr); + ASSERT(dst_size >= sizeof(NcaHeader)); + + std::memcpy(dst, std::addressof(m_header), sizeof(NcaHeader)); +} + +GetDecompressorFunction NcaReader::GetDecompressor() const { + ASSERT(m_get_decompressor != nullptr); + return m_get_decompressor; +} + +NcaHeader::EncryptionType NcaReader::GetEncryptionType() const { + return m_header_encryption_type; +} + +Result NcaReader::ReadHeader(NcaFsHeader* dst, s32 index) const { + ASSERT(dst != nullptr); + ASSERT(0 <= index && index < NcaHeader::FsCountMax); + + const s64 offset = sizeof(NcaHeader) + sizeof(NcaFsHeader) * index; + m_header_storage->ReadObject(dst, offset); + + R_SUCCEED(); +} + +bool NcaReader::GetHeaderSign1Valid() const { + return m_is_header_sign1_signature_valid; +} + +void NcaReader::GetHeaderSign2(void* dst, size_t size) const { + ASSERT(dst != nullptr); + ASSERT(size == NcaHeader::HeaderSignSize); + + std::memcpy(dst, m_header.header_sign_2.data(), size); +} + +Result NcaFsHeaderReader::Initialize(const NcaReader& reader, s32 index) { + // Reset ourselves to uninitialized. + m_fs_index = -1; + + // Read the header. + R_TRY(reader.ReadHeader(std::addressof(m_data), index)); + + // Set our index. + m_fs_index = index; + R_SUCCEED(); +} + +void NcaFsHeaderReader::GetRawData(void* dst, size_t dst_size) const { + ASSERT(this->IsInitialized()); + ASSERT(dst != nullptr); + ASSERT(dst_size >= sizeof(NcaFsHeader)); + + std::memcpy(dst, std::addressof(m_data), sizeof(NcaFsHeader)); +} + +NcaFsHeader::HashData& NcaFsHeaderReader::GetHashData() { + ASSERT(this->IsInitialized()); + return m_data.hash_data; +} + +const NcaFsHeader::HashData& NcaFsHeaderReader::GetHashData() const { + ASSERT(this->IsInitialized()); + return m_data.hash_data; +} + +u16 NcaFsHeaderReader::GetVersion() const { + ASSERT(this->IsInitialized()); + return m_data.version; +} + +s32 NcaFsHeaderReader::GetFsIndex() const { + ASSERT(this->IsInitialized()); + return m_fs_index; +} + +NcaFsHeader::FsType NcaFsHeaderReader::GetFsType() const { + ASSERT(this->IsInitialized()); + return m_data.fs_type; +} + +NcaFsHeader::HashType NcaFsHeaderReader::GetHashType() const { + ASSERT(this->IsInitialized()); + return m_data.hash_type; +} + +NcaFsHeader::EncryptionType NcaFsHeaderReader::GetEncryptionType() const { + ASSERT(this->IsInitialized()); + return m_data.encryption_type; +} + +NcaPatchInfo& NcaFsHeaderReader::GetPatchInfo() { + ASSERT(this->IsInitialized()); + return m_data.patch_info; +} + +const NcaPatchInfo& NcaFsHeaderReader::GetPatchInfo() const { + ASSERT(this->IsInitialized()); + return m_data.patch_info; +} + +const NcaAesCtrUpperIv NcaFsHeaderReader::GetAesCtrUpperIv() const { + ASSERT(this->IsInitialized()); + return m_data.aes_ctr_upper_iv; +} + +bool NcaFsHeaderReader::IsSkipLayerHashEncryption() const { + ASSERT(this->IsInitialized()); + return m_data.IsSkipLayerHashEncryption(); +} + +Result NcaFsHeaderReader::GetHashTargetOffset(s64* out) const { + ASSERT(out != nullptr); + ASSERT(this->IsInitialized()); + + R_RETURN(m_data.GetHashTargetOffset(out)); +} + +bool NcaFsHeaderReader::ExistsSparseLayer() const { + ASSERT(this->IsInitialized()); + return m_data.sparse_info.generation != 0; +} + +NcaSparseInfo& NcaFsHeaderReader::GetSparseInfo() { + ASSERT(this->IsInitialized()); + return m_data.sparse_info; +} + +const NcaSparseInfo& NcaFsHeaderReader::GetSparseInfo() const { + ASSERT(this->IsInitialized()); + return m_data.sparse_info; +} + +bool NcaFsHeaderReader::ExistsCompressionLayer() const { + ASSERT(this->IsInitialized()); + return m_data.compression_info.bucket.offset != 0 && m_data.compression_info.bucket.size != 0; +} + +NcaCompressionInfo& NcaFsHeaderReader::GetCompressionInfo() { + ASSERT(this->IsInitialized()); + return m_data.compression_info; +} + +const NcaCompressionInfo& NcaFsHeaderReader::GetCompressionInfo() const { + ASSERT(this->IsInitialized()); + return m_data.compression_info; +} + +bool NcaFsHeaderReader::ExistsPatchMetaHashLayer() const { + ASSERT(this->IsInitialized()); + return m_data.meta_data_hash_data_info.size != 0 && this->GetPatchInfo().HasIndirectTable(); +} + +NcaMetaDataHashDataInfo& NcaFsHeaderReader::GetPatchMetaDataHashDataInfo() { + ASSERT(this->IsInitialized()); + return m_data.meta_data_hash_data_info; +} + +const NcaMetaDataHashDataInfo& NcaFsHeaderReader::GetPatchMetaDataHashDataInfo() const { + ASSERT(this->IsInitialized()); + return m_data.meta_data_hash_data_info; +} + +NcaFsHeader::MetaDataHashType NcaFsHeaderReader::GetPatchMetaHashType() const { + ASSERT(this->IsInitialized()); + return m_data.meta_data_hash_type; +} + +bool NcaFsHeaderReader::ExistsSparseMetaHashLayer() const { + ASSERT(this->IsInitialized()); + return m_data.meta_data_hash_data_info.size != 0 && this->ExistsSparseLayer(); +} + +NcaMetaDataHashDataInfo& NcaFsHeaderReader::GetSparseMetaDataHashDataInfo() { + ASSERT(this->IsInitialized()); + return m_data.meta_data_hash_data_info; +} + +const NcaMetaDataHashDataInfo& NcaFsHeaderReader::GetSparseMetaDataHashDataInfo() const { + ASSERT(this->IsInitialized()); + return m_data.meta_data_hash_data_info; +} + +NcaFsHeader::MetaDataHashType NcaFsHeaderReader::GetSparseMetaHashType() const { + ASSERT(this->IsInitialized()); + return m_data.meta_data_hash_type; +} + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_pooled_buffer.cpp b/src/core/file_sys/fssystem/fssystem_pooled_buffer.cpp new file mode 100644 index 000000000..bbfaab255 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_pooled_buffer.cpp @@ -0,0 +1,61 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "common/alignment.h" +#include "core/file_sys/fssystem/fssystem_pooled_buffer.h" + +namespace FileSys { + +namespace { + +constexpr size_t HeapBlockSize = BufferPoolAlignment; +static_assert(HeapBlockSize == 4_KiB); + +// A heap block is 4KiB. An order is a power of two. +// This gives blocks of the order 32KiB, 512KiB, 4MiB. +constexpr s32 HeapOrderMax = 7; +constexpr s32 HeapOrderMaxForLarge = HeapOrderMax + 3; + +constexpr size_t HeapAllocatableSizeMax = HeapBlockSize * (static_cast<size_t>(1) << HeapOrderMax); +constexpr size_t HeapAllocatableSizeMaxForLarge = + HeapBlockSize * (static_cast<size_t>(1) << HeapOrderMaxForLarge); + +} // namespace + +size_t PooledBuffer::GetAllocatableSizeMaxCore(bool large) { + return large ? HeapAllocatableSizeMaxForLarge : HeapAllocatableSizeMax; +} + +void PooledBuffer::AllocateCore(size_t ideal_size, size_t required_size, bool large) { + // Ensure preconditions. + ASSERT(m_buffer == nullptr); + + // Check that we can allocate this size. + ASSERT(required_size <= GetAllocatableSizeMaxCore(large)); + + const size_t target_size = + std::min(std::max(ideal_size, required_size), GetAllocatableSizeMaxCore(large)); + + // Dummy implementation for allocate. + if (target_size > 0) { + m_buffer = + reinterpret_cast<char*>(::operator new(target_size, std::align_val_t{HeapBlockSize})); + m_size = target_size; + + // Ensure postconditions. + ASSERT(m_buffer != nullptr); + } +} + +void PooledBuffer::Shrink(size_t ideal_size) { + ASSERT(ideal_size <= GetAllocatableSizeMaxCore(true)); + + // Shrinking to zero means that we have no buffer. + if (ideal_size == 0) { + ::operator delete(m_buffer, std::align_val_t{HeapBlockSize}); + m_buffer = nullptr; + m_size = ideal_size; + } +} + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_pooled_buffer.h b/src/core/file_sys/fssystem/fssystem_pooled_buffer.h new file mode 100644 index 000000000..1df3153a1 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_pooled_buffer.h @@ -0,0 +1,96 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "common/common_funcs.h" +#include "common/common_types.h" +#include "common/literals.h" +#include "core/hle/result.h" + +namespace FileSys { + +using namespace Common::Literals; + +constexpr inline size_t BufferPoolAlignment = 4_KiB; +constexpr inline size_t BufferPoolWorkSize = 320; + +class PooledBuffer { + YUZU_NON_COPYABLE(PooledBuffer); + +private: + char* m_buffer; + size_t m_size; + +private: + static size_t GetAllocatableSizeMaxCore(bool large); + +public: + static size_t GetAllocatableSizeMax() { + return GetAllocatableSizeMaxCore(false); + } + static size_t GetAllocatableParticularlyLargeSizeMax() { + return GetAllocatableSizeMaxCore(true); + } + +private: + void Swap(PooledBuffer& rhs) { + std::swap(m_buffer, rhs.m_buffer); + std::swap(m_size, rhs.m_size); + } + +public: + // Constructor/Destructor. + constexpr PooledBuffer() : m_buffer(), m_size() {} + + PooledBuffer(size_t ideal_size, size_t required_size) : m_buffer(), m_size() { + this->Allocate(ideal_size, required_size); + } + + ~PooledBuffer() { + this->Deallocate(); + } + + // Move and assignment. + explicit PooledBuffer(PooledBuffer&& rhs) : m_buffer(rhs.m_buffer), m_size(rhs.m_size) { + rhs.m_buffer = nullptr; + rhs.m_size = 0; + } + + PooledBuffer& operator=(PooledBuffer&& rhs) { + PooledBuffer(std::move(rhs)).Swap(*this); + return *this; + } + + // Allocation API. + void Allocate(size_t ideal_size, size_t required_size) { + return this->AllocateCore(ideal_size, required_size, false); + } + + void AllocateParticularlyLarge(size_t ideal_size, size_t required_size) { + return this->AllocateCore(ideal_size, required_size, true); + } + + void Shrink(size_t ideal_size); + + void Deallocate() { + // Shrink the buffer to empty. + this->Shrink(0); + ASSERT(m_buffer == nullptr); + } + + char* GetBuffer() const { + ASSERT(m_buffer != nullptr); + return m_buffer; + } + + size_t GetSize() const { + ASSERT(m_buffer != nullptr); + return m_size; + } + +private: + void AllocateCore(size_t ideal_size, size_t required_size, bool large); +}; + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_sparse_storage.cpp b/src/core/file_sys/fssystem/fssystem_sparse_storage.cpp new file mode 100644 index 000000000..05e8820f7 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_sparse_storage.cpp @@ -0,0 +1,40 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#include "core/file_sys/fssystem/fssystem_sparse_storage.h" + +namespace FileSys { + +size_t SparseStorage::Read(u8* buffer, size_t size, size_t offset) const { + // Validate preconditions. + ASSERT(offset >= 0); + ASSERT(this->IsInitialized()); + ASSERT(buffer != nullptr); + + // Allow zero size. + if (size == 0) { + return size; + } + + SparseStorage* self = const_cast<SparseStorage*>(this); + + if (self->GetEntryTable().IsEmpty()) { + BucketTree::Offsets table_offsets; + ASSERT(R_SUCCEEDED(self->GetEntryTable().GetOffsets(std::addressof(table_offsets)))); + ASSERT(table_offsets.IsInclude(offset, size)); + + std::memset(buffer, 0, size); + } else { + self->OperatePerEntry<false, true>( + offset, size, + [=](VirtualFile storage, s64 data_offset, s64 cur_offset, s64 cur_size) -> Result { + storage->Read(reinterpret_cast<u8*>(buffer) + (cur_offset - offset), + static_cast<size_t>(cur_size), data_offset); + R_SUCCEED(); + }); + } + + return size; +} + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_sparse_storage.h b/src/core/file_sys/fssystem/fssystem_sparse_storage.h new file mode 100644 index 000000000..c1ade7195 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_sparse_storage.h @@ -0,0 +1,73 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "core/file_sys/fssystem/fssystem_indirect_storage.h" + +namespace FileSys { + +class SparseStorage : public IndirectStorage { + YUZU_NON_COPYABLE(SparseStorage); + YUZU_NON_MOVEABLE(SparseStorage); + +private: + class ZeroStorage : public IReadOnlyStorage { + public: + ZeroStorage() {} + virtual ~ZeroStorage() {} + + virtual size_t GetSize() const override { + return std::numeric_limits<size_t>::max(); + } + + virtual size_t Read(u8* buffer, size_t size, size_t offset) const override { + ASSERT(offset >= 0); + ASSERT(buffer != nullptr || size == 0); + + if (size > 0) { + std::memset(buffer, 0, size); + } + + return size; + } + }; + +private: + VirtualFile m_zero_storage; + +public: + SparseStorage() : IndirectStorage(), m_zero_storage(std::make_shared<ZeroStorage>()) {} + virtual ~SparseStorage() {} + + using IndirectStorage::Initialize; + + void Initialize(s64 end_offset) { + this->GetEntryTable().Initialize(NodeSize, end_offset); + this->SetZeroStorage(); + } + + void SetDataStorage(VirtualFile storage) { + ASSERT(this->IsInitialized()); + + this->SetStorage(0, storage); + this->SetZeroStorage(); + } + + template <typename T> + void SetDataStorage(T storage, s64 offset, s64 size) { + ASSERT(this->IsInitialized()); + + this->SetStorage(0, storage, offset, size); + this->SetZeroStorage(); + } + + virtual size_t Read(u8* buffer, size_t size, size_t offset) const override; + +private: + void SetZeroStorage() { + return this->SetStorage(1, m_zero_storage, 0, std::numeric_limits<s64>::max()); + } +}; + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_switch_storage.h b/src/core/file_sys/fssystem/fssystem_switch_storage.h new file mode 100644 index 000000000..140f21ab7 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_switch_storage.h @@ -0,0 +1,80 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "core/file_sys/fssystem/fs_i_storage.h" + +namespace FileSys { + +class RegionSwitchStorage : public IReadOnlyStorage { + YUZU_NON_COPYABLE(RegionSwitchStorage); + YUZU_NON_MOVEABLE(RegionSwitchStorage); + +public: + struct Region { + s64 offset; + s64 size; + }; + +private: + VirtualFile m_inside_region_storage; + VirtualFile m_outside_region_storage; + Region m_region; + +public: + RegionSwitchStorage(VirtualFile&& i, VirtualFile&& o, Region r) + : m_inside_region_storage(std::move(i)), m_outside_region_storage(std::move(o)), + m_region(r) {} + + virtual size_t Read(u8* buffer, size_t size, size_t offset) const override { + // Process until we're done. + size_t processed = 0; + while (processed < size) { + // Process on the appropriate storage. + s64 cur_size = 0; + if (this->CheckRegions(std::addressof(cur_size), offset + processed, + size - processed)) { + m_inside_region_storage->Read(buffer + processed, cur_size, offset + processed); + } else { + m_outside_region_storage->Read(buffer + processed, cur_size, offset + processed); + } + + // Advance. + processed += cur_size; + } + + return size; + } + + virtual size_t GetSize() const override { + return m_inside_region_storage->GetSize(); + } + +private: + bool CheckRegions(s64* out_current_size, s64 offset, s64 size) const { + // Check if our region contains the access. + if (m_region.offset <= offset) { + if (offset < m_region.offset + m_region.size) { + if (m_region.offset + m_region.size <= offset + size) { + *out_current_size = m_region.offset + m_region.size - offset; + } else { + *out_current_size = size; + } + return true; + } else { + *out_current_size = size; + return false; + } + } else { + if (m_region.offset <= offset + size) { + *out_current_size = m_region.offset - offset; + } else { + *out_current_size = size; + } + return false; + } + } +}; + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_utility.cpp b/src/core/file_sys/fssystem/fssystem_utility.cpp new file mode 100644 index 000000000..4dddfd75a --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_utility.cpp @@ -0,0 +1,24 @@ +#include "core/file_sys/fssystem/fssystem_utility.h" + +namespace FileSys { + +void AddCounter(void* counter_, size_t counter_size, u64 value) { + u8* counter = static_cast<u8*>(counter_); + u64 remaining = value; + u8 carry = 0; + + for (size_t i = 0; i < counter_size; i++) { + auto sum = counter[counter_size - 1 - i] + (remaining & 0xFF) + carry; + carry = static_cast<u8>(sum >> (sizeof(u8) * 8)); + auto sum8 = static_cast<u8>(sum & 0xFF); + + counter[counter_size - 1 - i] = sum8; + + remaining >>= (sizeof(u8) * 8); + if (carry == 0 && remaining == 0) { + break; + } + } +} + +} // namespace FileSys diff --git a/src/core/file_sys/fssystem/fssystem_utility.h b/src/core/file_sys/fssystem/fssystem_utility.h new file mode 100644 index 000000000..284b8b811 --- /dev/null +++ b/src/core/file_sys/fssystem/fssystem_utility.h @@ -0,0 +1,12 @@ +// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project +// SPDX-License-Identifier: GPL-2.0-or-later + +#pragma once + +#include "common/common_funcs.h" + +namespace FileSys { + +void AddCounter(void* counter, size_t counter_size, u64 value); + +} |