diff options
author | David Marcec <dmarcecguzman@gmail.com> | 2018-04-26 23:28:54 +0200 |
---|---|---|
committer | David Marcec <dmarcecguzman@gmail.com> | 2018-04-26 23:28:54 +0200 |
commit | 7391741a204d6f25a06132eda214b2199b60a084 (patch) | |
tree | aeeb723744c4563ad608361b82dd938b062a3e09 /src/video_core | |
parent | Added PREPO to logging backend, Removed comments from SaveReportWithUser (diff) | |
parent | Merge pull request #403 from lioncash/common (diff) | |
download | yuzu-7391741a204d6f25a06132eda214b2199b60a084.tar yuzu-7391741a204d6f25a06132eda214b2199b60a084.tar.gz yuzu-7391741a204d6f25a06132eda214b2199b60a084.tar.bz2 yuzu-7391741a204d6f25a06132eda214b2199b60a084.tar.lz yuzu-7391741a204d6f25a06132eda214b2199b60a084.tar.xz yuzu-7391741a204d6f25a06132eda214b2199b60a084.tar.zst yuzu-7391741a204d6f25a06132eda214b2199b60a084.zip |
Diffstat (limited to 'src/video_core')
20 files changed, 740 insertions, 294 deletions
diff --git a/src/video_core/command_processor.cpp b/src/video_core/command_processor.cpp index d4cdb4ab2..2eaece298 100644 --- a/src/video_core/command_processor.cpp +++ b/src/video_core/command_processor.cpp @@ -24,41 +24,18 @@ namespace Tegra { enum class BufferMethods { BindObject = 0, - SetGraphMacroCode = 0x45, - SetGraphMacroCodeArg = 0x46, - SetGraphMacroEntry = 0x47, - CountBufferMethods = 0x100, + CountBufferMethods = 0x40, }; void GPU::WriteReg(u32 method, u32 subchannel, u32 value, u32 remaining_params) { - LOG_WARNING(HW_GPU, "Processing method %08X on subchannel %u value %08X remaining params %u", - method, subchannel, value, remaining_params); - - if (method == static_cast<u32>(BufferMethods::SetGraphMacroEntry)) { - // Prepare to upload a new macro, reset the upload counter. - LOG_DEBUG(HW_GPU, "Uploading GPU macro %08X", value); - current_macro_entry = value; - current_macro_code.clear(); - return; - } - - if (method == static_cast<u32>(BufferMethods::SetGraphMacroCodeArg)) { - // Append a new code word to the current macro. - current_macro_code.push_back(value); - - // There are no more params remaining, submit the code to the 3D engine. - if (remaining_params == 0) { - maxwell_3d->SubmitMacroCode(current_macro_entry, std::move(current_macro_code)); - current_macro_entry = InvalidGraphMacroEntry; - current_macro_code.clear(); - } - - return; - } + NGLOG_WARNING(HW_GPU, + "Processing method {:08X} on subchannel {} value " + "{:08X} remaining params {}", + method, subchannel, value, remaining_params); if (method == static_cast<u32>(BufferMethods::BindObject)) { // Bind the current subchannel to the desired engine id. - LOG_DEBUG(HW_GPU, "Binding subchannel %u to engine %u", subchannel, value); + NGLOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", subchannel, value); ASSERT(bound_engines.find(subchannel) == bound_engines.end()); bound_engines[subchannel] = static_cast<EngineID>(value); return; @@ -66,7 +43,7 @@ void GPU::WriteReg(u32 method, u32 subchannel, u32 value, u32 remaining_params) if (method < static_cast<u32>(BufferMethods::CountBufferMethods)) { // TODO(Subv): Research and implement these methods. - LOG_ERROR(HW_GPU, "Special buffer methods other than Bind are not implemented"); + NGLOG_ERROR(HW_GPU, "Special buffer methods other than Bind are not implemented"); return; } @@ -90,11 +67,9 @@ void GPU::WriteReg(u32 method, u32 subchannel, u32 value, u32 remaining_params) } void GPU::ProcessCommandList(GPUVAddr address, u32 size) { - // TODO(Subv): PhysicalToVirtualAddress is a misnomer, it converts a GPU VAddr into an - // application VAddr. - const VAddr head_address = memory_manager->PhysicalToVirtualAddress(address); - VAddr current_addr = head_address; - while (current_addr < head_address + size * sizeof(CommandHeader)) { + const boost::optional<VAddr> head_address = memory_manager->GpuToCpuAddress(address); + VAddr current_addr = *head_address; + while (current_addr < *head_address + size * sizeof(CommandHeader)) { const CommandHeader header = {Memory::Read32(current_addr)}; current_addr += sizeof(u32); diff --git a/src/video_core/engines/fermi_2d.cpp b/src/video_core/engines/fermi_2d.cpp index 7aab163dc..9019f2504 100644 --- a/src/video_core/engines/fermi_2d.cpp +++ b/src/video_core/engines/fermi_2d.cpp @@ -2,12 +2,71 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include "core/memory.h" #include "video_core/engines/fermi_2d.h" +#include "video_core/textures/decoders.h" namespace Tegra { namespace Engines { -void Fermi2D::WriteReg(u32 method, u32 value) {} +Fermi2D::Fermi2D(MemoryManager& memory_manager) : memory_manager(memory_manager) {} + +void Fermi2D::WriteReg(u32 method, u32 value) { + ASSERT_MSG(method < Regs::NUM_REGS, + "Invalid Fermi2D register, increase the size of the Regs structure"); + + regs.reg_array[method] = value; + + switch (method) { + case FERMI2D_REG_INDEX(trigger): { + HandleSurfaceCopy(); + break; + } + } +} + +void Fermi2D::HandleSurfaceCopy() { + NGLOG_WARNING(HW_GPU, "Requested a surface copy with operation {}", + static_cast<u32>(regs.operation)); + + const GPUVAddr source = regs.src.Address(); + const GPUVAddr dest = regs.dst.Address(); + + // TODO(Subv): Only same-format and same-size copies are allowed for now. + ASSERT(regs.src.format == regs.dst.format); + ASSERT(regs.src.width * regs.src.height == regs.dst.width * regs.dst.height); + + // TODO(Subv): Only raw copies are implemented. + ASSERT(regs.operation == Regs::Operation::SrcCopy); + + const VAddr source_cpu = *memory_manager.GpuToCpuAddress(source); + const VAddr dest_cpu = *memory_manager.GpuToCpuAddress(dest); + + u32 src_bytes_per_pixel = RenderTargetBytesPerPixel(regs.src.format); + u32 dst_bytes_per_pixel = RenderTargetBytesPerPixel(regs.dst.format); + + if (regs.src.linear == regs.dst.linear) { + // If the input layout and the output layout are the same, just perform a raw copy. + Memory::CopyBlock(dest_cpu, source_cpu, + src_bytes_per_pixel * regs.dst.width * regs.dst.height); + return; + } + + u8* src_buffer = Memory::GetPointer(source_cpu); + u8* dst_buffer = Memory::GetPointer(dest_cpu); + + if (!regs.src.linear && regs.dst.linear) { + // If the input is tiled and the output is linear, deswizzle the input and copy it over. + Texture::CopySwizzledData(regs.src.width, regs.src.height, src_bytes_per_pixel, + dst_bytes_per_pixel, src_buffer, dst_buffer, true, + regs.src.block_height); + } else { + // If the input is linear and the output is tiled, swizzle the input and copy it over. + Texture::CopySwizzledData(regs.src.width, regs.src.height, src_bytes_per_pixel, + dst_bytes_per_pixel, dst_buffer, src_buffer, false, + regs.dst.block_height); + } +} } // namespace Engines } // namespace Tegra diff --git a/src/video_core/engines/fermi_2d.h b/src/video_core/engines/fermi_2d.h index 8967ddede..0c5b413cc 100644 --- a/src/video_core/engines/fermi_2d.h +++ b/src/video_core/engines/fermi_2d.h @@ -4,19 +4,106 @@ #pragma once +#include <array> +#include "common/assert.h" +#include "common/bit_field.h" +#include "common/common_funcs.h" #include "common/common_types.h" +#include "video_core/gpu.h" +#include "video_core/memory_manager.h" namespace Tegra { namespace Engines { +#define FERMI2D_REG_INDEX(field_name) \ + (offsetof(Tegra::Engines::Fermi2D::Regs, field_name) / sizeof(u32)) + class Fermi2D final { public: - Fermi2D() = default; + explicit Fermi2D(MemoryManager& memory_manager); ~Fermi2D() = default; /// Write the value to the register identified by method. void WriteReg(u32 method, u32 value); + + struct Regs { + static constexpr size_t NUM_REGS = 0x258; + + struct Surface { + RenderTargetFormat format; + BitField<0, 1, u32> linear; + union { + BitField<0, 4, u32> block_depth; + BitField<4, 4, u32> block_height; + BitField<8, 4, u32> block_width; + }; + u32 depth; + u32 layer; + u32 pitch; + u32 width; + u32 height; + u32 address_high; + u32 address_low; + + GPUVAddr Address() const { + return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | + address_low); + } + }; + static_assert(sizeof(Surface) == 0x28, "Surface has incorrect size"); + + enum class Operation : u32 { + SrcCopyAnd = 0, + ROPAnd = 1, + Blend = 2, + SrcCopy = 3, + ROP = 4, + SrcCopyPremult = 5, + BlendPremult = 6, + }; + + union { + struct { + INSERT_PADDING_WORDS(0x80); + + Surface dst; + + INSERT_PADDING_WORDS(2); + + Surface src; + + INSERT_PADDING_WORDS(0x15); + + Operation operation; + + INSERT_PADDING_WORDS(0x9); + + // TODO(Subv): This is only a guess. + u32 trigger; + + INSERT_PADDING_WORDS(0x1A3); + }; + std::array<u32, NUM_REGS> reg_array; + }; + } regs{}; + + MemoryManager& memory_manager; + +private: + /// Performs the copy from the source surface to the destination surface as configured in the + /// registers. + void HandleSurfaceCopy(); }; +#define ASSERT_REG_POSITION(field_name, position) \ + static_assert(offsetof(Fermi2D::Regs, field_name) == position * 4, \ + "Field " #field_name " has invalid position") + +ASSERT_REG_POSITION(dst, 0x80); +ASSERT_REG_POSITION(src, 0x8C); +ASSERT_REG_POSITION(operation, 0xAB); +ASSERT_REG_POSITION(trigger, 0xB5); +#undef ASSERT_REG_POSITION + } // namespace Engines } // namespace Tegra diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp index 2a3ff234a..4306b894f 100644 --- a/src/video_core/engines/maxwell_3d.cpp +++ b/src/video_core/engines/maxwell_3d.cpp @@ -22,10 +22,6 @@ constexpr u32 MacroRegistersStart = 0xE00; Maxwell3D::Maxwell3D(MemoryManager& memory_manager) : memory_manager(memory_manager), macro_interpreter(*this) {} -void Maxwell3D::SubmitMacroCode(u32 entry, std::vector<u32> code) { - uploaded_macros[entry * 2 + MacroRegistersStart] = std::move(code); -} - void Maxwell3D::CallMacroMethod(u32 method, std::vector<u32> parameters) { auto macro_code = uploaded_macros.find(method); // The requested macro must have been uploaded already. @@ -37,9 +33,6 @@ void Maxwell3D::CallMacroMethod(u32 method, std::vector<u32> parameters) { } void Maxwell3D::WriteReg(u32 method, u32 value, u32 remaining_params) { - ASSERT_MSG(method < Regs::NUM_REGS, - "Invalid Maxwell3D register, increase the size of the Regs structure"); - auto debug_context = Core::System::GetInstance().GetGPUDebugContext(); // It is an error to write to a register other than the current macro's ARG register before it @@ -68,6 +61,9 @@ void Maxwell3D::WriteReg(u32 method, u32 value, u32 remaining_params) { return; } + ASSERT_MSG(method < Regs::NUM_REGS, + "Invalid Maxwell3D register, increase the size of the Regs structure"); + if (debug_context) { debug_context->OnEvent(Tegra::DebugContext::Event::MaxwellCommandLoaded, nullptr); } @@ -75,6 +71,10 @@ void Maxwell3D::WriteReg(u32 method, u32 value, u32 remaining_params) { regs.reg_array[method] = value; switch (method) { + case MAXWELL3D_REG_INDEX(macros.data): { + ProcessMacroUpload(value); + break; + } case MAXWELL3D_REG_INDEX(code_address.code_address_high): case MAXWELL3D_REG_INDEX(code_address.code_address_low): { // Note: For some reason games (like Puyo Puyo Tetris) seem to write 0 to the CODE_ADDRESS @@ -141,17 +141,48 @@ void Maxwell3D::WriteReg(u32 method, u32 value, u32 remaining_params) { } } +void Maxwell3D::ProcessMacroUpload(u32 data) { + // Store the uploaded macro code to interpret them when they're called. + auto& macro = uploaded_macros[regs.macros.entry * 2 + MacroRegistersStart]; + macro.push_back(data); +} + void Maxwell3D::ProcessQueryGet() { GPUVAddr sequence_address = regs.query.QueryAddress(); // Since the sequence address is given as a GPU VAddr, we have to convert it to an application // VAddr before writing. - VAddr address = memory_manager.PhysicalToVirtualAddress(sequence_address); + boost::optional<VAddr> address = memory_manager.GpuToCpuAddress(sequence_address); + + // TODO(Subv): Support the other query units. + ASSERT_MSG(regs.query.query_get.unit == Regs::QueryUnit::Crop, + "Units other than CROP are unimplemented"); + ASSERT_MSG(regs.query.query_get.short_query, + "Writing the entire query result structure is unimplemented"); + + u32 value = Memory::Read32(*address); + u32 result = 0; + + // TODO(Subv): Support the other query variables + switch (regs.query.query_get.select) { + case Regs::QuerySelect::Zero: + result = 0; + break; + default: + UNIMPLEMENTED_MSG("Unimplemented query select type %u", + static_cast<u32>(regs.query.query_get.select.Value())); + } + + // TODO(Subv): Research and implement how query sync conditions work. switch (regs.query.query_get.mode) { - case Regs::QueryMode::Write: { + case Regs::QueryMode::Write: + case Regs::QueryMode::Write2: { // Write the current query sequence to the sequence address. u32 sequence = regs.query.query_sequence; - Memory::Write32(address, sequence); + Memory::Write32(*address, sequence); + + // TODO(Subv): Write the proper query response structure to the address when not using short + // mode. break; } default: @@ -161,8 +192,8 @@ void Maxwell3D::ProcessQueryGet() { } void Maxwell3D::DrawArrays() { - LOG_DEBUG(HW_GPU, "called, topology=%d, count=%d", regs.draw.topology.Value(), - regs.vertex_buffer.count); + NGLOG_DEBUG(HW_GPU, "called, topology={}, count={}", + static_cast<u32>(regs.draw.topology.Value()), regs.vertex_buffer.count); ASSERT_MSG(!(regs.index_array.count && regs.vertex_buffer.count), "Both indexed and direct?"); auto debug_context = Core::System::GetInstance().GetGPUDebugContext(); @@ -200,10 +231,10 @@ void Maxwell3D::ProcessCBData(u32 value) { // Don't allow writing past the end of the buffer. ASSERT(regs.const_buffer.cb_pos + sizeof(u32) <= regs.const_buffer.cb_size); - VAddr address = - memory_manager.PhysicalToVirtualAddress(buffer_address + regs.const_buffer.cb_pos); + boost::optional<VAddr> address = + memory_manager.GpuToCpuAddress(buffer_address + regs.const_buffer.cb_pos); - Memory::Write32(address, value); + Memory::Write32(*address, value); // Increment the current buffer position. regs.const_buffer.cb_pos = regs.const_buffer.cb_pos + 4; @@ -213,10 +244,10 @@ Texture::TICEntry Maxwell3D::GetTICEntry(u32 tic_index) const { GPUVAddr tic_base_address = regs.tic.TICAddress(); GPUVAddr tic_address_gpu = tic_base_address + tic_index * sizeof(Texture::TICEntry); - VAddr tic_address_cpu = memory_manager.PhysicalToVirtualAddress(tic_address_gpu); + boost::optional<VAddr> tic_address_cpu = memory_manager.GpuToCpuAddress(tic_address_gpu); Texture::TICEntry tic_entry; - Memory::ReadBlock(tic_address_cpu, &tic_entry, sizeof(Texture::TICEntry)); + Memory::ReadBlock(*tic_address_cpu, &tic_entry, sizeof(Texture::TICEntry)); ASSERT_MSG(tic_entry.header_version == Texture::TICHeaderVersion::BlockLinear || tic_entry.header_version == Texture::TICHeaderVersion::Pitch, @@ -243,10 +274,10 @@ Texture::TSCEntry Maxwell3D::GetTSCEntry(u32 tsc_index) const { GPUVAddr tsc_base_address = regs.tsc.TSCAddress(); GPUVAddr tsc_address_gpu = tsc_base_address + tsc_index * sizeof(Texture::TSCEntry); - VAddr tsc_address_cpu = memory_manager.PhysicalToVirtualAddress(tsc_address_gpu); + boost::optional<VAddr> tsc_address_cpu = memory_manager.GpuToCpuAddress(tsc_address_gpu); Texture::TSCEntry tsc_entry; - Memory::ReadBlock(tsc_address_cpu, &tsc_entry, sizeof(Texture::TSCEntry)); + Memory::ReadBlock(*tsc_address_cpu, &tsc_entry, sizeof(Texture::TSCEntry)); return tsc_entry; } @@ -268,7 +299,7 @@ std::vector<Texture::FullTextureInfo> Maxwell3D::GetStageTextures(Regs::ShaderSt current_texture < tex_info_buffer_end; current_texture += sizeof(Texture::TextureHandle)) { Texture::TextureHandle tex_handle{ - Memory::Read32(memory_manager.PhysicalToVirtualAddress(current_texture))}; + Memory::Read32(*memory_manager.GpuToCpuAddress(current_texture))}; Texture::FullTextureInfo tex_info{}; // TODO(Subv): Use the shader to determine which textures are actually accessed. diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h index d4fcedace..5cf62fb01 100644 --- a/src/video_core/engines/maxwell_3d.h +++ b/src/video_core/engines/maxwell_3d.h @@ -31,7 +31,7 @@ public: /// Register structure of the Maxwell3D engine. /// TODO(Subv): This structure will need to be made bigger as more registers are discovered. struct Regs { - static constexpr size_t NUM_REGS = 0xE36; + static constexpr size_t NUM_REGS = 0xE00; static constexpr size_t NumRenderTargets = 8; static constexpr size_t NumViewports = 16; @@ -46,6 +46,29 @@ public: enum class QueryMode : u32 { Write = 0, Sync = 1, + // TODO(Subv): It is currently unknown what the difference between method 2 and method 0 + // is. + Write2 = 2, + }; + + enum class QueryUnit : u32 { + VFetch = 1, + VP = 2, + Rast = 4, + StrmOut = 5, + GP = 6, + ZCull = 7, + Prop = 10, + Crop = 15, + }; + + enum class QuerySelect : u32 { + Zero = 0, + }; + + enum class QuerySyncCondition : u32 { + NotEqual = 0, + GreaterThan = 1, }; enum class ShaderProgram : u32 { @@ -299,7 +322,15 @@ public: union { struct { - INSERT_PADDING_WORDS(0x200); + INSERT_PADDING_WORDS(0x45); + + struct { + INSERT_PADDING_WORDS(1); + u32 data; + u32 entry; + } macros; + + INSERT_PADDING_WORDS(0x1B8); struct { u32 address_high; @@ -476,7 +507,10 @@ public: u32 raw; BitField<0, 2, QueryMode> mode; BitField<4, 1, u32> fence; - BitField<12, 4, u32> unit; + BitField<12, 4, QueryUnit> unit; + BitField<16, 1, QuerySyncCondition> sync_cond; + BitField<23, 5, QuerySelect> select; + BitField<28, 1, u32> short_query; } query_get; GPUVAddr QueryAddress() const { @@ -500,6 +534,11 @@ public: return static_cast<GPUVAddr>((static_cast<GPUVAddr>(start_high) << 32) | start_low); } + + bool IsEnabled() const { + return enable != 0 && StartAddress() != 0; + } + } vertex_array[NumVertexArrays]; Blend blend; @@ -574,7 +613,7 @@ public: u32 size[MaxShaderStage]; } tex_info_buffers; - INSERT_PADDING_WORDS(0x102); + INSERT_PADDING_WORDS(0xCC); }; std::array<u32, NUM_REGS> reg_array; }; @@ -606,9 +645,6 @@ public: /// Write the value to the register identified by method. void WriteReg(u32 method, u32 value, u32 remaining_params); - /// Uploads the code for a GPU macro program associated with the specified entry. - void SubmitMacroCode(u32 entry, std::vector<u32> code); - /// Returns a list of enabled textures for the specified shader stage. std::vector<Texture::FullTextureInfo> GetStageTextures(Regs::ShaderStage stage) const; @@ -639,6 +675,9 @@ private: */ void CallMacroMethod(u32 method, std::vector<u32> parameters); + /// Handles writes to the macro uploading registers. + void ProcessMacroUpload(u32 data); + /// Handles a write to the QUERY_GET register. void ProcessQueryGet(); @@ -656,6 +695,7 @@ private: static_assert(offsetof(Maxwell3D::Regs, field_name) == position * 4, \ "Field " #field_name " has invalid position") +ASSERT_REG_POSITION(macros, 0x45); ASSERT_REG_POSITION(rt, 0x200); ASSERT_REG_POSITION(viewport_transform[0], 0x280); ASSERT_REG_POSITION(viewport, 0x300); diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h index 5a006aee5..f4d11fa5d 100644 --- a/src/video_core/engines/shader_bytecode.h +++ b/src/video_core/engines/shader_bytecode.h @@ -214,6 +214,20 @@ union Instruction { BitField<56, 1, u64> neg_b; } fsetp; + union { + BitField<39, 3, u64> pred39; + BitField<42, 1, u64> neg_pred; + BitField<43, 1, u64> neg_a; + BitField<44, 1, u64> abs_b; + BitField<45, 2, PredOperation> op; + BitField<48, 4, PredCondition> cond; + BitField<53, 1, u64> neg_b; + BitField<54, 1, u64> abs_a; + BitField<52, 1, u64> bf; + BitField<55, 1, u64> ftz; + BitField<56, 1, u64> neg_imm; + } fset; + BitField<61, 1, u64> is_b_imm; BitField<60, 1, u64> is_b_gpr; BitField<59, 1, u64> is_c_gpr; @@ -261,6 +275,9 @@ public: I2F_C, I2F_R, I2F_IMM, + I2I_C, + I2I_R, + I2I_IMM, LOP32I, MOV_C, MOV_R, @@ -272,6 +289,9 @@ public: FSETP_C, // Set Predicate FSETP_R, FSETP_IMM, + FSET_C, + FSET_R, + FSET_IMM, ISETP_C, ISETP_IMM, ISETP_R, @@ -283,8 +303,9 @@ public: Ffma, Flow, Memory, - FloatPredicate, - IntegerPredicate, + FloatSet, + FloatSetPredicate, + IntegerSetPredicate, Unknown, }; @@ -409,6 +430,9 @@ private: INST("0100110010111---", Id::I2F_C, Type::Arithmetic, "I2F_C"), INST("0101110010111---", Id::I2F_R, Type::Arithmetic, "I2F_R"), INST("0011100-10111---", Id::I2F_IMM, Type::Arithmetic, "I2F_IMM"), + INST("0100110011100---", Id::I2I_C, Type::Arithmetic, "I2I_C"), + INST("0101110011100---", Id::I2I_R, Type::Arithmetic, "I2I_R"), + INST("01110001-1000---", Id::I2I_IMM, Type::Arithmetic, "I2I_IMM"), INST("000001----------", Id::LOP32I, Type::Arithmetic, "LOP32I"), INST("0100110010011---", Id::MOV_C, Type::Arithmetic, "MOV_C"), INST("0101110010011---", Id::MOV_R, Type::Arithmetic, "MOV_R"), @@ -417,12 +441,15 @@ private: INST("0100110000101---", Id::SHR_C, Type::Arithmetic, "SHR_C"), INST("0101110000101---", Id::SHR_R, Type::Arithmetic, "SHR_R"), INST("0011100-00101---", Id::SHR_IMM, Type::Arithmetic, "SHR_IMM"), - INST("010010111011----", Id::FSETP_C, Type::FloatPredicate, "FSETP_C"), - INST("010110111011----", Id::FSETP_R, Type::FloatPredicate, "FSETP_R"), - INST("0011011-1011----", Id::FSETP_IMM, Type::FloatPredicate, "FSETP_IMM"), - INST("010010110110----", Id::ISETP_C, Type::IntegerPredicate, "ISETP_C"), - INST("010110110110----", Id::ISETP_R, Type::IntegerPredicate, "ISETP_R"), - INST("0011011-0110----", Id::ISETP_IMM, Type::IntegerPredicate, "ISETP_IMM"), + INST("01011000--------", Id::FSET_R, Type::FloatSet, "FSET_R"), + INST("0100100---------", Id::FSET_C, Type::FloatSet, "FSET_C"), + INST("0011000---------", Id::FSET_IMM, Type::FloatSet, "FSET_IMM"), + INST("010010111011----", Id::FSETP_C, Type::FloatSetPredicate, "FSETP_C"), + INST("010110111011----", Id::FSETP_R, Type::FloatSetPredicate, "FSETP_R"), + INST("0011011-1011----", Id::FSETP_IMM, Type::FloatSetPredicate, "FSETP_IMM"), + INST("010010110110----", Id::ISETP_C, Type::IntegerSetPredicate, "ISETP_C"), + INST("010110110110----", Id::ISETP_R, Type::IntegerSetPredicate, "ISETP_R"), + INST("0011011-0110----", Id::ISETP_IMM, Type::IntegerSetPredicate, "ISETP_IMM"), }; #undef INST std::stable_sort(table.begin(), table.end(), [](const auto& a, const auto& b) { diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 9463cd5d6..9eb143918 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -12,7 +12,7 @@ namespace Tegra { GPU::GPU() { memory_manager = std::make_unique<MemoryManager>(); maxwell_3d = std::make_unique<Engines::Maxwell3D>(*memory_manager); - fermi_2d = std::make_unique<Engines::Fermi2D>(); + fermi_2d = std::make_unique<Engines::Fermi2D>(*memory_manager); maxwell_compute = std::make_unique<Engines::MaxwellCompute>(); } @@ -22,4 +22,16 @@ const Tegra::Engines::Maxwell3D& GPU::Get3DEngine() const { return *maxwell_3d; } +u32 RenderTargetBytesPerPixel(RenderTargetFormat format) { + ASSERT(format != RenderTargetFormat::NONE); + + switch (format) { + case RenderTargetFormat::RGBA8_UNORM: + case RenderTargetFormat::RGB10_A2_UNORM: + return 4; + default: + UNIMPLEMENTED_MSG("Unimplemented render target format %u", static_cast<u32>(format)); + } +} + } // namespace Tegra diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h index 2888daedc..f168a5171 100644 --- a/src/video_core/gpu.h +++ b/src/video_core/gpu.h @@ -21,6 +21,9 @@ enum class RenderTargetFormat : u32 { RGBA8_SRGB = 0xD6, }; +/// Returns the number of bytes per pixel of each rendertarget format. +u32 RenderTargetBytesPerPixel(RenderTargetFormat format); + class DebugContext; /** @@ -86,8 +89,6 @@ public: } private: - static constexpr u32 InvalidGraphMacroEntry = 0xFFFFFFFF; - /// Writes a single register in the engine bound to the specified subchannel void WriteReg(u32 method, u32 subchannel, u32 value, u32 remaining_params); @@ -100,11 +101,6 @@ private: std::unique_ptr<Engines::Fermi2D> fermi_2d; /// Compute engine std::unique_ptr<Engines::MaxwellCompute> maxwell_compute; - - /// Entry of the macro that is currently being uploaded - u32 current_macro_entry = InvalidGraphMacroEntry; - /// Code being uploaded for the current macro - std::vector<u32> current_macro_code; }; } // namespace Tegra diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp index 2789a4ca1..25984439d 100644 --- a/src/video_core/memory_manager.cpp +++ b/src/video_core/memory_manager.cpp @@ -2,109 +2,118 @@ // Licensed under GPLv2 or any later version // Refer to the license.txt file included. +#include "common/alignment.h" #include "common/assert.h" #include "video_core/memory_manager.h" namespace Tegra { -PAddr MemoryManager::AllocateSpace(u64 size, u64 align) { - boost::optional<PAddr> paddr = FindFreeBlock(size, align); - ASSERT(paddr); +GPUVAddr MemoryManager::AllocateSpace(u64 size, u64 align) { + boost::optional<GPUVAddr> gpu_addr = FindFreeBlock(size, align); + ASSERT(gpu_addr); - for (u64 offset = 0; offset < size; offset += Memory::PAGE_SIZE) { - PageSlot(*paddr + offset) = static_cast<u64>(PageStatus::Allocated); + for (u64 offset = 0; offset < size; offset += PAGE_SIZE) { + ASSERT(PageSlot(*gpu_addr + offset) == static_cast<u64>(PageStatus::Unmapped)); + PageSlot(*gpu_addr + offset) = static_cast<u64>(PageStatus::Allocated); } - return *paddr; + return *gpu_addr; } -PAddr MemoryManager::AllocateSpace(PAddr paddr, u64 size, u64 align) { - for (u64 offset = 0; offset < size; offset += Memory::PAGE_SIZE) { - if (IsPageMapped(paddr + offset)) { - return AllocateSpace(size, align); - } - } - - for (u64 offset = 0; offset < size; offset += Memory::PAGE_SIZE) { - PageSlot(paddr + offset) = static_cast<u64>(PageStatus::Allocated); +GPUVAddr MemoryManager::AllocateSpace(GPUVAddr gpu_addr, u64 size, u64 align) { + for (u64 offset = 0; offset < size; offset += PAGE_SIZE) { + ASSERT(PageSlot(gpu_addr + offset) == static_cast<u64>(PageStatus::Unmapped)); + PageSlot(gpu_addr + offset) = static_cast<u64>(PageStatus::Allocated); } - return paddr; + return gpu_addr; } -PAddr MemoryManager::MapBufferEx(VAddr vaddr, u64 size) { - vaddr &= ~Memory::PAGE_MASK; - - boost::optional<PAddr> paddr = FindFreeBlock(size); - ASSERT(paddr); +GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) { + boost::optional<GPUVAddr> gpu_addr = FindFreeBlock(size, PAGE_SIZE); + ASSERT(gpu_addr); - for (u64 offset = 0; offset < size; offset += Memory::PAGE_SIZE) { - PageSlot(*paddr + offset) = vaddr + offset; + for (u64 offset = 0; offset < size; offset += PAGE_SIZE) { + ASSERT(PageSlot(*gpu_addr + offset) == static_cast<u64>(PageStatus::Unmapped)); + PageSlot(*gpu_addr + offset) = cpu_addr + offset; } - return *paddr; + MappedRegion region{cpu_addr, *gpu_addr, size}; + mapped_regions.push_back(region); + + return *gpu_addr; } -PAddr MemoryManager::MapBufferEx(VAddr vaddr, PAddr paddr, u64 size) { - vaddr &= ~Memory::PAGE_MASK; - paddr &= ~Memory::PAGE_MASK; +GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size) { + ASSERT((gpu_addr & PAGE_MASK) == 0); - for (u64 offset = 0; offset < size; offset += Memory::PAGE_SIZE) { - if (PageSlot(paddr + offset) != static_cast<u64>(PageStatus::Allocated)) { - return MapBufferEx(vaddr, size); - } + for (u64 offset = 0; offset < size; offset += PAGE_SIZE) { + ASSERT(PageSlot(gpu_addr + offset) == static_cast<u64>(PageStatus::Allocated)); + PageSlot(gpu_addr + offset) = cpu_addr + offset; } - for (u64 offset = 0; offset < size; offset += Memory::PAGE_SIZE) { - PageSlot(paddr + offset) = vaddr + offset; - } + MappedRegion region{cpu_addr, gpu_addr, size}; + mapped_regions.push_back(region); - return paddr; + return gpu_addr; } -boost::optional<PAddr> MemoryManager::FindFreeBlock(u64 size, u64 align) { - PAddr paddr{}; - u64 free_space{}; - align = (align + Memory::PAGE_MASK) & ~Memory::PAGE_MASK; +boost::optional<GPUVAddr> MemoryManager::FindFreeBlock(u64 size, u64 align) { + GPUVAddr gpu_addr = 0; + u64 free_space = 0; + align = (align + PAGE_MASK) & ~PAGE_MASK; - while (paddr + free_space < MAX_ADDRESS) { - if (!IsPageMapped(paddr + free_space)) { - free_space += Memory::PAGE_SIZE; + while (gpu_addr + free_space < MAX_ADDRESS) { + if (!IsPageMapped(gpu_addr + free_space)) { + free_space += PAGE_SIZE; if (free_space >= size) { - return paddr; + return gpu_addr; } } else { - paddr += free_space + Memory::PAGE_SIZE; + gpu_addr += free_space + PAGE_SIZE; free_space = 0; - const u64 remainder{paddr % align}; - if (!remainder) { - paddr = (paddr - remainder) + align; - } + gpu_addr = Common::AlignUp(gpu_addr, align); } } return {}; } -VAddr MemoryManager::PhysicalToVirtualAddress(PAddr paddr) { - VAddr base_addr = PageSlot(paddr); +boost::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) { + VAddr base_addr = PageSlot(gpu_addr); ASSERT(base_addr != static_cast<u64>(PageStatus::Unmapped)); - return base_addr + (paddr & Memory::PAGE_MASK); + + if (base_addr == static_cast<u64>(PageStatus::Allocated)) { + return {}; + } + + return base_addr + (gpu_addr & PAGE_MASK); +} + +std::vector<GPUVAddr> MemoryManager::CpuToGpuAddress(VAddr cpu_addr) const { + std::vector<GPUVAddr> results; + for (const auto& region : mapped_regions) { + if (cpu_addr >= region.cpu_addr && cpu_addr < (region.cpu_addr + region.size)) { + u64 offset = cpu_addr - region.cpu_addr; + results.push_back(region.gpu_addr + offset); + } + } + return results; } -bool MemoryManager::IsPageMapped(PAddr paddr) { - return PageSlot(paddr) != static_cast<u64>(PageStatus::Unmapped); +bool MemoryManager::IsPageMapped(GPUVAddr gpu_addr) { + return PageSlot(gpu_addr) != static_cast<u64>(PageStatus::Unmapped); } -VAddr& MemoryManager::PageSlot(PAddr paddr) { - auto& block = page_table[(paddr >> (Memory::PAGE_BITS + PAGE_TABLE_BITS)) & PAGE_TABLE_MASK]; +VAddr& MemoryManager::PageSlot(GPUVAddr gpu_addr) { + auto& block = page_table[(gpu_addr >> (PAGE_BITS + PAGE_TABLE_BITS)) & PAGE_TABLE_MASK]; if (!block) { block = std::make_unique<PageBlock>(); for (unsigned index = 0; index < PAGE_BLOCK_SIZE; index++) { (*block)[index] = static_cast<u64>(PageStatus::Unmapped); } } - return (*block)[(paddr >> Memory::PAGE_BITS) & PAGE_BLOCK_MASK]; + return (*block)[(gpu_addr >> PAGE_BITS) & PAGE_BLOCK_MASK]; } } // namespace Tegra diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h index 47da7acd6..08140c83a 100644 --- a/src/video_core/memory_manager.h +++ b/src/video_core/memory_manager.h @@ -6,8 +6,11 @@ #include <array> #include <memory> +#include <vector> + +#include <boost/optional.hpp> + #include "common/common_types.h" -#include "core/memory.h" namespace Tegra { @@ -18,16 +21,21 @@ class MemoryManager final { public: MemoryManager() = default; - PAddr AllocateSpace(u64 size, u64 align); - PAddr AllocateSpace(PAddr paddr, u64 size, u64 align); - PAddr MapBufferEx(VAddr vaddr, u64 size); - PAddr MapBufferEx(VAddr vaddr, PAddr paddr, u64 size); - VAddr PhysicalToVirtualAddress(PAddr paddr); + GPUVAddr AllocateSpace(u64 size, u64 align); + GPUVAddr AllocateSpace(GPUVAddr gpu_addr, u64 size, u64 align); + GPUVAddr MapBufferEx(VAddr cpu_addr, u64 size); + GPUVAddr MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size); + boost::optional<VAddr> GpuToCpuAddress(GPUVAddr gpu_addr); + std::vector<GPUVAddr> CpuToGpuAddress(VAddr cpu_addr) const; + + static constexpr u64 PAGE_BITS = 16; + static constexpr u64 PAGE_SIZE = 1 << PAGE_BITS; + static constexpr u64 PAGE_MASK = PAGE_SIZE - 1; private: - boost::optional<PAddr> FindFreeBlock(u64 size, u64 align = 1); - bool IsPageMapped(PAddr paddr); - VAddr& PageSlot(PAddr paddr); + boost::optional<GPUVAddr> FindFreeBlock(u64 size, u64 align = 1); + bool IsPageMapped(GPUVAddr gpu_addr); + VAddr& PageSlot(GPUVAddr gpu_addr); enum class PageStatus : u64 { Unmapped = 0xFFFFFFFFFFFFFFFFULL, @@ -35,7 +43,7 @@ private: }; static constexpr u64 MAX_ADDRESS{0x10000000000ULL}; - static constexpr u64 PAGE_TABLE_BITS{14}; + static constexpr u64 PAGE_TABLE_BITS{10}; static constexpr u64 PAGE_TABLE_SIZE{1 << PAGE_TABLE_BITS}; static constexpr u64 PAGE_TABLE_MASK{PAGE_TABLE_SIZE - 1}; static constexpr u64 PAGE_BLOCK_BITS{14}; @@ -44,6 +52,14 @@ private: using PageBlock = std::array<VAddr, PAGE_BLOCK_SIZE>; std::array<std::unique_ptr<PageBlock>, PAGE_TABLE_SIZE> page_table{}; + + struct MappedRegion { + VAddr cpu_addr; + GPUVAddr gpu_addr; + u64 size; + }; + + std::vector<MappedRegion> mapped_regions; }; } // namespace Tegra diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h index 36629dd11..f0e48a802 100644 --- a/src/video_core/rasterizer_interface.h +++ b/src/video_core/rasterizer_interface.h @@ -6,6 +6,7 @@ #include "common/common_types.h" #include "video_core/gpu.h" +#include "video_core/memory_manager.h" struct ScreenInfo; @@ -25,14 +26,14 @@ public: virtual void FlushAll() = 0; /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory - virtual void FlushRegion(VAddr addr, u64 size) = 0; + virtual void FlushRegion(Tegra::GPUVAddr addr, u64 size) = 0; /// Notify rasterizer that any caches of the specified region should be invalidated - virtual void InvalidateRegion(VAddr addr, u64 size) = 0; + virtual void InvalidateRegion(Tegra::GPUVAddr addr, u64 size) = 0; /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory /// and invalidated - virtual void FlushAndInvalidateRegion(VAddr addr, u64 size) = 0; + virtual void FlushAndInvalidateRegion(Tegra::GPUVAddr addr, u64 size) = 0; /// Attempt to use a faster method to perform a display transfer with is_texture_copy = 0 virtual bool AccelerateDisplayTransfer(const void* config) { diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index 2d4a0d6db..9b3542e10 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp @@ -116,7 +116,7 @@ RasterizerOpenGL::RasterizerOpenGL() { glEnable(GL_BLEND); - LOG_CRITICAL(Render_OpenGL, "Sync fixed function OpenGL state here!"); + NGLOG_CRITICAL(Render_OpenGL, "Sync fixed function OpenGL state here!"); } RasterizerOpenGL::~RasterizerOpenGL() { @@ -127,7 +127,8 @@ RasterizerOpenGL::~RasterizerOpenGL() { } } -void RasterizerOpenGL::SetupVertexArray(u8* array_ptr, GLintptr buffer_offset) { +std::pair<u8*, GLintptr> RasterizerOpenGL::SetupVertexArrays(u8* array_ptr, + GLintptr buffer_offset) { MICROPROFILE_SCOPE(OpenGL_VAO); const auto& regs = Core::System().GetInstance().GPU().Maxwell3D().regs; const auto& memory_manager = Core::System().GetInstance().GPU().memory_manager; @@ -136,43 +137,58 @@ void RasterizerOpenGL::SetupVertexArray(u8* array_ptr, GLintptr buffer_offset) { state.draw.vertex_buffer = stream_buffer->GetHandle(); state.Apply(); - // TODO(bunnei): Add support for 1+ vertex arrays - const auto& vertex_array{regs.vertex_array[0]}; - const auto& vertex_array_limit{regs.vertex_array_limit[0]}; - ASSERT_MSG(vertex_array.enable, "vertex array 0 is disabled?"); - ASSERT_MSG(!vertex_array.divisor, "vertex array 0 divisor is unimplemented!"); - for (unsigned index = 1; index < Maxwell::NumVertexArrays; ++index) { - ASSERT_MSG(!regs.vertex_array[index].enable, "vertex array %d is unimplemented!", index); + // Upload all guest vertex arrays sequentially to our buffer + for (u32 index = 0; index < Maxwell::NumVertexArrays; ++index) { + const auto& vertex_array = regs.vertex_array[index]; + if (!vertex_array.IsEnabled()) + continue; + + const Tegra::GPUVAddr start = vertex_array.StartAddress(); + const Tegra::GPUVAddr end = regs.vertex_array_limit[index].LimitAddress(); + + ASSERT(end > start); + u64 size = end - start + 1; + + // Copy vertex array data + res_cache.FlushRegion(start, size, nullptr); + Memory::ReadBlock(*memory_manager->GpuToCpuAddress(start), array_ptr, size); + + // Bind the vertex array to the buffer at the current offset. + glBindVertexBuffer(index, stream_buffer->GetHandle(), buffer_offset, vertex_array.stride); + + ASSERT_MSG(vertex_array.divisor == 0, "Vertex buffer divisor unimplemented"); + + array_ptr += size; + buffer_offset += size; } // Use the vertex array as-is, assumes that the data is formatted correctly for OpenGL. // Enables the first 16 vertex attributes always, as we don't know which ones are actually used - // until shader time. Note, Tegra technically supports 32, but we're cappinig this to 16 for now + // until shader time. Note, Tegra technically supports 32, but we're capping this to 16 for now // to avoid OpenGL errors. + // TODO(Subv): Analyze the shader to identify which attributes are actually used and don't + // assume every shader uses them all. for (unsigned index = 0; index < 16; ++index) { auto& attrib = regs.vertex_attrib_format[index]; NGLOG_DEBUG(HW_GPU, "vertex attrib {}, count={}, size={}, type={}, offset={}, normalize={}", index, attrib.ComponentCount(), attrib.SizeString(), attrib.TypeString(), attrib.offset.Value(), attrib.IsNormalized()); - glVertexAttribPointer(index, attrib.ComponentCount(), MaxwellToGL::VertexType(attrib), - attrib.IsNormalized() ? GL_TRUE : GL_FALSE, vertex_array.stride, - reinterpret_cast<GLvoid*>(buffer_offset + attrib.offset)); + auto& buffer = regs.vertex_array[attrib.buffer]; + ASSERT(buffer.IsEnabled()); + glEnableVertexAttribArray(index); + glVertexAttribFormat(index, attrib.ComponentCount(), MaxwellToGL::VertexType(attrib), + attrib.IsNormalized() ? GL_TRUE : GL_FALSE, attrib.offset); + glVertexAttribBinding(index, attrib.buffer); + hw_vao_enabled_attributes[index] = true; } - // Copy vertex array data - const u64 data_size{vertex_array_limit.LimitAddress() - vertex_array.StartAddress() + 1}; - const VAddr data_addr{memory_manager->PhysicalToVirtualAddress(vertex_array.StartAddress())}; - res_cache.FlushRegion(data_addr, data_size, nullptr); - Memory::ReadBlock(data_addr, array_ptr, data_size); - - array_ptr += data_size; - buffer_offset += data_size; + return {array_ptr, buffer_offset}; } -void RasterizerOpenGL::SetupShaders(u8* buffer_ptr, GLintptr buffer_offset, size_t ptr_pos) { +void RasterizerOpenGL::SetupShaders(u8* buffer_ptr, GLintptr buffer_offset) { // Helper function for uploading uniform data const auto copy_buffer = [&](GLuint handle, GLintptr offset, GLsizeiptr size) { if (has_ARB_direct_state_access) { @@ -190,8 +206,6 @@ void RasterizerOpenGL::SetupShaders(u8* buffer_ptr, GLintptr buffer_offset, size u32 current_constbuffer_bindpoint = 0; for (unsigned index = 1; index < Maxwell::MaxShaderProgram; ++index) { - ptr_pos += sizeof(GLShader::MaxwellUniformData); - auto& shader_config = gpu.regs.shader_config[index]; const Maxwell::ShaderProgram program{static_cast<Maxwell::ShaderProgram>(index)}; @@ -205,18 +219,21 @@ void RasterizerOpenGL::SetupShaders(u8* buffer_ptr, GLintptr buffer_offset, size } // Upload uniform data as one UBO per stage - const GLintptr ubo_offset = buffer_offset + static_cast<GLintptr>(ptr_pos); + const GLintptr ubo_offset = buffer_offset; copy_buffer(uniform_buffers[stage].handle, ubo_offset, sizeof(GLShader::MaxwellUniformData)); GLShader::MaxwellUniformData* ub_ptr = - reinterpret_cast<GLShader::MaxwellUniformData*>(&buffer_ptr[ptr_pos]); + reinterpret_cast<GLShader::MaxwellUniformData*>(buffer_ptr); ub_ptr->SetFromRegs(gpu.state.shader_stages[stage]); + buffer_ptr += sizeof(GLShader::MaxwellUniformData); + buffer_offset += sizeof(GLShader::MaxwellUniformData); + // Fetch program code from memory GLShader::ProgramCode program_code; const u64 gpu_address{gpu.regs.code_address.CodeAddress() + shader_config.offset}; - const VAddr cpu_address{gpu.memory_manager.PhysicalToVirtualAddress(gpu_address)}; - Memory::ReadBlock(cpu_address, program_code.data(), program_code.size() * sizeof(u64)); + const boost::optional<VAddr> cpu_address{gpu.memory_manager.GpuToCpuAddress(gpu_address)}; + Memory::ReadBlock(*cpu_address, program_code.data(), program_code.size() * sizeof(u64)); GLShader::ShaderSetup setup{std::move(program_code)}; GLShader::ShaderEntries shader_resources; @@ -235,8 +252,8 @@ void RasterizerOpenGL::SetupShaders(u8* buffer_ptr, GLintptr buffer_offset, size break; } default: - LOG_CRITICAL(HW_GPU, "Unimplemented shader index=%d, enable=%d, offset=0x%08X", index, - shader_config.enable.Value(), shader_config.offset); + NGLOG_CRITICAL(HW_GPU, "Unimplemented shader index={}, enable={}, offset={:#010X}", + index, shader_config.enable.Value(), shader_config.offset); UNREACHABLE(); } @@ -252,6 +269,24 @@ void RasterizerOpenGL::SetupShaders(u8* buffer_ptr, GLintptr buffer_offset, size shader_program_manager->UseTrivialGeometryShader(); } +size_t RasterizerOpenGL::CalculateVertexArraysSize() const { + const auto& regs = Core::System().GetInstance().GPU().Maxwell3D().regs; + + size_t size = 0; + for (u32 index = 0; index < Maxwell::NumVertexArrays; ++index) { + if (!regs.vertex_array[index].IsEnabled()) + continue; + + const Tegra::GPUVAddr start = regs.vertex_array[index].StartAddress(); + const Tegra::GPUVAddr end = regs.vertex_array_limit[index].LimitAddress(); + + ASSERT(end > start); + size += end - start + 1; + } + + return size; +} + bool RasterizerOpenGL::AccelerateDrawBatch(bool is_indexed) { accelerate_draw = is_indexed ? AccelDraw::Indexed : AccelDraw::Arrays; DrawArrays(); @@ -329,44 +364,49 @@ void RasterizerOpenGL::DrawArrays() { const u64 index_buffer_size{regs.index_array.count * regs.index_array.FormatSizeInBytes()}; const unsigned vertex_num{is_indexed ? regs.index_array.count : regs.vertex_buffer.count}; - // TODO(bunnei): Add support for 1+ vertex arrays - vs_input_size = vertex_num * regs.vertex_array[0].stride; - state.draw.vertex_buffer = stream_buffer->GetHandle(); state.Apply(); - size_t buffer_size = static_cast<size_t>(vs_input_size); + size_t buffer_size = CalculateVertexArraysSize(); + if (is_indexed) { - buffer_size = Common::AlignUp(buffer_size, 4) + index_buffer_size; + buffer_size = Common::AlignUp<size_t>(buffer_size, 4) + index_buffer_size; } // Uniform space for the 5 shader stages - buffer_size += sizeof(GLShader::MaxwellUniformData) * Maxwell::MaxShaderStage; + buffer_size = Common::AlignUp<size_t>(buffer_size, 4) + + sizeof(GLShader::MaxwellUniformData) * Maxwell::MaxShaderStage; - size_t ptr_pos = 0; u8* buffer_ptr; GLintptr buffer_offset; std::tie(buffer_ptr, buffer_offset) = stream_buffer->Map(static_cast<GLsizeiptr>(buffer_size), 4); - SetupVertexArray(buffer_ptr, buffer_offset); - ptr_pos += vs_input_size; + u8* offseted_buffer; + std::tie(offseted_buffer, buffer_offset) = SetupVertexArrays(buffer_ptr, buffer_offset); + + offseted_buffer = + reinterpret_cast<u8*>(Common::AlignUp(reinterpret_cast<size_t>(offseted_buffer), 4)); + buffer_offset = Common::AlignUp<size_t>(buffer_offset, 4); // If indexed mode, copy the index buffer GLintptr index_buffer_offset = 0; if (is_indexed) { - ptr_pos = Common::AlignUp(ptr_pos, 4); - const auto& memory_manager = Core::System().GetInstance().GPU().memory_manager; - const VAddr index_data_addr{ - memory_manager->PhysicalToVirtualAddress(regs.index_array.StartAddress())}; - Memory::ReadBlock(index_data_addr, &buffer_ptr[ptr_pos], index_buffer_size); + const boost::optional<VAddr> index_data_addr{ + memory_manager->GpuToCpuAddress(regs.index_array.StartAddress())}; + Memory::ReadBlock(*index_data_addr, offseted_buffer, index_buffer_size); - index_buffer_offset = buffer_offset + static_cast<GLintptr>(ptr_pos); - ptr_pos += index_buffer_size; + index_buffer_offset = buffer_offset; + offseted_buffer += index_buffer_size; + buffer_offset += index_buffer_size; } - SetupShaders(buffer_ptr, buffer_offset, ptr_pos); + offseted_buffer = + reinterpret_cast<u8*>(Common::AlignUp(reinterpret_cast<size_t>(offseted_buffer), 4)); + buffer_offset = Common::AlignUp<size_t>(buffer_offset, 4); + + SetupShaders(offseted_buffer, buffer_offset); stream_buffer->Unmap(); @@ -478,17 +518,17 @@ void RasterizerOpenGL::FlushAll() { res_cache.FlushAll(); } -void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) { +void RasterizerOpenGL::FlushRegion(Tegra::GPUVAddr addr, u64 size) { MICROPROFILE_SCOPE(OpenGL_CacheManagement); res_cache.FlushRegion(addr, size); } -void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) { +void RasterizerOpenGL::InvalidateRegion(Tegra::GPUVAddr addr, u64 size) { MICROPROFILE_SCOPE(OpenGL_CacheManagement); res_cache.InvalidateRegion(addr, size, nullptr); } -void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size) { +void RasterizerOpenGL::FlushAndInvalidateRegion(Tegra::GPUVAddr addr, u64 size) { MICROPROFILE_SCOPE(OpenGL_CacheManagement); res_cache.FlushRegion(addr, size); res_cache.InvalidateRegion(addr, size, nullptr); @@ -519,7 +559,8 @@ bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& framebu MICROPROFILE_SCOPE(OpenGL_CacheManagement); SurfaceParams src_params; - src_params.addr = framebuffer_addr; + src_params.cpu_addr = framebuffer_addr; + src_params.addr = res_cache.TryFindFramebufferGpuAddress(framebuffer_addr).get_value_or(0); src_params.width = std::min(framebuffer.width, pixel_stride); src_params.height = framebuffer.height; src_params.stride = pixel_stride; @@ -618,9 +659,9 @@ u32 RasterizerOpenGL::SetupConstBuffers(Maxwell::ShaderStage stage, GLuint progr buffer_draw_state.enabled = true; buffer_draw_state.bindpoint = current_bindpoint + bindpoint; - VAddr addr = gpu.memory_manager->PhysicalToVirtualAddress(buffer.address); + boost::optional<VAddr> addr = gpu.memory_manager->GpuToCpuAddress(buffer.address); std::vector<u8> data(used_buffer.GetSize() * sizeof(float)); - Memory::ReadBlock(addr, data.data(), data.size()); + Memory::ReadBlock(*addr, data.data(), data.size()); glBindBuffer(GL_SHADER_STORAGE_BUFFER, buffer_draw_state.ssbo); glBufferData(GL_SHADER_STORAGE_BUFFER, data.size(), data.data(), GL_DYNAMIC_DRAW); diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index 03e02b52a..9709e595e 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h @@ -11,6 +11,7 @@ #include <glad/glad.h> #include "common/common_types.h" #include "video_core/engines/maxwell_3d.h" +#include "video_core/memory_manager.h" #include "video_core/rasterizer_interface.h" #include "video_core/renderer_opengl/gl_rasterizer_cache.h" #include "video_core/renderer_opengl/gl_resource_manager.h" @@ -29,9 +30,9 @@ public: void DrawArrays() override; void NotifyMaxwellRegisterChanged(u32 method) override; void FlushAll() override; - void FlushRegion(VAddr addr, u64 size) override; - void InvalidateRegion(VAddr addr, u64 size) override; - void FlushAndInvalidateRegion(VAddr addr, u64 size) override; + void FlushRegion(Tegra::GPUVAddr addr, u64 size) override; + void InvalidateRegion(Tegra::GPUVAddr addr, u64 size) override; + void FlushAndInvalidateRegion(Tegra::GPUVAddr addr, u64 size) override; bool AccelerateDisplayTransfer(const void* config) override; bool AccelerateTextureCopy(const void* config) override; bool AccelerateFill(const void* config) override; @@ -148,13 +149,13 @@ private: static constexpr size_t STREAM_BUFFER_SIZE = 4 * 1024 * 1024; std::unique_ptr<OGLStreamBuffer> stream_buffer; - GLsizeiptr vs_input_size; + size_t CalculateVertexArraysSize() const; - void SetupVertexArray(u8* array_ptr, GLintptr buffer_offset); + std::pair<u8*, GLintptr> SetupVertexArrays(u8* array_ptr, GLintptr buffer_offset); std::array<OGLBuffer, Tegra::Engines::Maxwell3D::Regs::MaxShaderStage> uniform_buffers; - void SetupShaders(u8* buffer_ptr, GLintptr buffer_offset, size_t ptr_pos); + void SetupShaders(u8* buffer_ptr, GLintptr buffer_offset); enum class AccelDraw { Disabled, Arrays, Indexed }; AccelDraw accelerate_draw; diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index 7410471cc..501d15e98 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp @@ -41,18 +41,15 @@ struct FormatTuple { GLenum format; GLenum type; bool compressed; - // How many pixels in the original texture are equivalent to one pixel in the compressed - // texture. - u32 compression_factor; }; static constexpr std::array<FormatTuple, SurfaceParams::MaxPixelFormat> tex_format_tuples = {{ - {GL_RGBA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, false, 1}, // ABGR8 - {GL_RGB, GL_RGB, GL_UNSIGNED_SHORT_5_6_5_REV, false, 1}, // B5G6R5 - {GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, false, 1}, // A2B10G10R10 - {GL_COMPRESSED_RGB_S3TC_DXT1_EXT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, true, 16}, // DXT1 - {GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true, 16}, // DXT23 - {GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true, 16}, // DXT45 + {GL_RGBA8, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, false}, // ABGR8 + {GL_RGB, GL_RGB, GL_UNSIGNED_SHORT_5_6_5_REV, false}, // B5G6R5 + {GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, false}, // A2B10G10R10 + {GL_COMPRESSED_RGB_S3TC_DXT1_EXT, GL_RGB, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT1 + {GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT23 + {GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, true}, // DXT45 }}; static const FormatTuple& GetFormatTuple(PixelFormat pixel_format, ComponentType component_type) { @@ -83,26 +80,30 @@ static u16 GetResolutionScaleFactor() { } template <bool morton_to_gl, PixelFormat format> -void MortonCopy(u32 stride, u32 block_height, u32 height, u8* gl_buffer, VAddr base, VAddr start, - VAddr end) { - constexpr u32 bytes_per_pixel = SurfaceParams::GetFormatBpp(format) / 8; +void MortonCopy(u32 stride, u32 block_height, u32 height, u8* gl_buffer, Tegra::GPUVAddr base, + Tegra::GPUVAddr start, Tegra::GPUVAddr end) { + constexpr u32 bytes_per_pixel = SurfaceParams::GetFormatBpp(format) / CHAR_BIT; constexpr u32 gl_bytes_per_pixel = CachedSurface::GetGLBytesPerPixel(format); + const auto& gpu = Core::System::GetInstance().GPU(); if (morton_to_gl) { auto data = Tegra::Texture::UnswizzleTexture( - base, SurfaceParams::TextureFormatFromPixelFormat(format), stride, height, - block_height); + *gpu.memory_manager->GpuToCpuAddress(base), + SurfaceParams::TextureFormatFromPixelFormat(format), stride, height, block_height); std::memcpy(gl_buffer, data.data(), data.size()); } else { // TODO(bunnei): Assumes the default rendering GOB size of 16 (128 lines). We should check // the configuration for this and perform more generic un/swizzle - LOG_WARNING(Render_OpenGL, "need to use correct swizzle/GOB parameters!"); - VideoCore::MortonCopyPixels128(stride, height, bytes_per_pixel, gl_bytes_per_pixel, - Memory::GetPointer(base), gl_buffer, morton_to_gl); + NGLOG_WARNING(Render_OpenGL, "need to use correct swizzle/GOB parameters!"); + VideoCore::MortonCopyPixels128( + stride, height, bytes_per_pixel, gl_bytes_per_pixel, + Memory::GetPointer(*gpu.memory_manager->GpuToCpuAddress(base)), gl_buffer, + morton_to_gl); } } -static constexpr std::array<void (*)(u32, u32, u32, u8*, VAddr, VAddr, VAddr), +static constexpr std::array<void (*)(u32, u32, u32, u8*, Tegra::GPUVAddr, Tegra::GPUVAddr, + Tegra::GPUVAddr), SurfaceParams::MaxPixelFormat> morton_to_gl_fns = { MortonCopy<true, PixelFormat::ABGR8>, MortonCopy<true, PixelFormat::B5G6R5>, @@ -110,7 +111,8 @@ static constexpr std::array<void (*)(u32, u32, u32, u8*, VAddr, VAddr, VAddr), MortonCopy<true, PixelFormat::DXT23>, MortonCopy<true, PixelFormat::DXT45>, }; -static constexpr std::array<void (*)(u32, u32, u32, u8*, VAddr, VAddr, VAddr), +static constexpr std::array<void (*)(u32, u32, u32, u8*, Tegra::GPUVAddr, Tegra::GPUVAddr, + Tegra::GPUVAddr), SurfaceParams::MaxPixelFormat> gl_to_morton_fns = { MortonCopy<false, PixelFormat::ABGR8>, @@ -219,9 +221,9 @@ SurfaceParams SurfaceParams::FromInterval(SurfaceInterval interval) const { SurfaceParams params = *this; const u32 tiled_size = is_tiled ? 8 : 1; const u64 stride_tiled_bytes = BytesInPixels(stride * tiled_size); - VAddr aligned_start = + Tegra::GPUVAddr aligned_start = addr + Common::AlignDown(boost::icl::first(interval) - addr, stride_tiled_bytes); - VAddr aligned_end = + Tegra::GPUVAddr aligned_end = addr + Common::AlignUp(boost::icl::last_next(interval) - addr, stride_tiled_bytes); if (aligned_end - aligned_start > stride_tiled_bytes) { @@ -342,6 +344,13 @@ bool SurfaceParams::CanTexCopy(const SurfaceParams& texcopy_params) const { return FromInterval(texcopy_params.GetInterval()).GetInterval() == texcopy_params.GetInterval(); } +VAddr SurfaceParams::GetCpuAddr() const { + // When this function is used, only cpu_addr or (GPU) addr should be set, not both + ASSERT(!(cpu_addr && addr)); + const auto& gpu = Core::System::GetInstance().GPU(); + return cpu_addr.get_value_or(*gpu.memory_manager->GpuToCpuAddress(addr)); +} + bool CachedSurface::CanFill(const SurfaceParams& dest_surface, SurfaceInterval fill_interval) const { if (type == SurfaceType::Fill && IsRegionValid(fill_interval) && @@ -349,9 +358,9 @@ bool CachedSurface::CanFill(const SurfaceParams& dest_surface, boost::icl::last_next(fill_interval) <= end && // dest_surface is within our fill range dest_surface.FromInterval(fill_interval).GetInterval() == fill_interval) { // make sure interval is a rectangle in dest surface - if (fill_size * 8 != dest_surface.GetFormatBpp()) { + if (fill_size * CHAR_BIT != dest_surface.GetFormatBpp()) { // Check if bits repeat for our fill_size - const u32 dest_bytes_per_pixel = std::max(dest_surface.GetFormatBpp() / 8, 1u); + const u32 dest_bytes_per_pixel = std::max(dest_surface.GetFormatBpp() / CHAR_BIT, 1u); std::vector<u8> fill_test(fill_size * dest_bytes_per_pixel); for (u32 i = 0; i < dest_bytes_per_pixel; ++i) @@ -456,15 +465,15 @@ void RasterizerCacheOpenGL::CopySurface(const Surface& src_surface, const Surfac } MICROPROFILE_DEFINE(OpenGL_SurfaceLoad, "OpenGL", "Surface Load", MP_RGB(128, 64, 192)); -void CachedSurface::LoadGLBuffer(VAddr load_start, VAddr load_end) { +void CachedSurface::LoadGLBuffer(Tegra::GPUVAddr load_start, Tegra::GPUVAddr load_end) { ASSERT(type != SurfaceType::Fill); - u8* const texture_src_data = Memory::GetPointer(addr); + u8* const texture_src_data = Memory::GetPointer(GetCpuAddr()); if (texture_src_data == nullptr) return; if (gl_buffer == nullptr) { - gl_buffer_size = width * height * GetGLBytesPerPixel(pixel_format); + gl_buffer_size = GetActualWidth() * GetActualHeight() * GetGLBytesPerPixel(pixel_format); gl_buffer.reset(new u8[gl_buffer_size]); } @@ -479,14 +488,15 @@ void CachedSurface::LoadGLBuffer(VAddr load_start, VAddr load_end) { std::memcpy(&gl_buffer[start_offset], texture_src_data + start_offset, bytes_per_pixel * width * height); } else { - morton_to_gl_fns[static_cast<size_t>(pixel_format)]( - stride, block_height, height, &gl_buffer[0], addr, load_start, load_end); + morton_to_gl_fns[static_cast<size_t>(pixel_format)](GetActualWidth(), block_height, + GetActualHeight(), &gl_buffer[0], addr, + load_start, load_end); } } MICROPROFILE_DEFINE(OpenGL_SurfaceFlush, "OpenGL", "Surface Flush", MP_RGB(128, 192, 64)); -void CachedSurface::FlushGLBuffer(VAddr flush_start, VAddr flush_end) { - u8* const dst_buffer = Memory::GetPointer(addr); +void CachedSurface::FlushGLBuffer(Tegra::GPUVAddr flush_start, Tegra::GPUVAddr flush_end) { + u8* const dst_buffer = Memory::GetPointer(GetCpuAddr()); if (dst_buffer == nullptr) return; @@ -536,7 +546,8 @@ void CachedSurface::UploadGLTexture(const MathUtil::Rectangle<u32>& rect, GLuint MICROPROFILE_SCOPE(OpenGL_TextureUL); - ASSERT(gl_buffer_size == width * height * GetGLBytesPerPixel(pixel_format)); + ASSERT(gl_buffer_size == + GetActualWidth() * GetActualHeight() * GetGLBytesPerPixel(pixel_format)); // Load data from memory to the surface GLint x0 = static_cast<GLint>(rect.left); @@ -571,11 +582,9 @@ void CachedSurface::UploadGLTexture(const MathUtil::Rectangle<u32>& rect, GLuint glActiveTexture(GL_TEXTURE0); if (tuple.compressed) { glCompressedTexImage2D(GL_TEXTURE_2D, 0, tuple.internal_format, - static_cast<GLsizei>(rect.GetWidth()), - static_cast<GLsizei>(rect.GetHeight()), 0, - rect.GetWidth() * rect.GetHeight() * - GetGLBytesPerPixel(pixel_format) / tuple.compression_factor, - &gl_buffer[buffer_offset]); + static_cast<GLsizei>(rect.GetWidth() * GetCompresssionFactor()), + static_cast<GLsizei>(rect.GetHeight() * GetCompresssionFactor()), 0, + size, &gl_buffer[buffer_offset]); } else { glTexSubImage2D(GL_TEXTURE_2D, 0, x0, y0, static_cast<GLsizei>(rect.GetWidth()), static_cast<GLsizei>(rect.GetHeight()), tuple.format, tuple.type, @@ -945,6 +954,33 @@ Surface RasterizerCacheOpenGL::GetSurface(const SurfaceParams& params, ScaleMatc return surface; } +boost::optional<Tegra::GPUVAddr> RasterizerCacheOpenGL::TryFindFramebufferGpuAddress( + VAddr cpu_addr) const { + // Tries to find the GPU address of a framebuffer based on the CPU address. This is because + // final output framebuffers are specified by CPU address, but internally our GPU cache uses GPU + // addresses. We iterate through all cached framebuffers, and compare their starting CPU address + // to the one provided. This is obviously not great, and won't work if the framebuffer overlaps + // surfaces. + + std::vector<Tegra::GPUVAddr> gpu_addresses; + for (const auto& pair : surface_cache) { + for (const auto& surface : pair.second) { + const VAddr surface_cpu_addr = surface->GetCpuAddr(); + if (cpu_addr >= surface_cpu_addr && cpu_addr < (surface_cpu_addr + surface->size)) { + ASSERT_MSG(cpu_addr == surface_cpu_addr, "overlapping surfaces are unsupported"); + gpu_addresses.push_back(surface->addr); + } + } + } + + if (gpu_addresses.empty()) { + return {}; + } + + ASSERT_MSG(gpu_addresses.size() == 1, ">1 surface is unsupported"); + return gpu_addresses[0]; +} + SurfaceRect_Tuple RasterizerCacheOpenGL::GetSurfaceSubRect(const SurfaceParams& params, ScaleMatch match_res_scale, bool load_if_create) { @@ -1028,11 +1064,11 @@ Surface RasterizerCacheOpenGL::GetTextureSurface(const Tegra::Texture::FullTextu auto& gpu = Core::System::GetInstance().GPU(); SurfaceParams params; - params.addr = gpu.memory_manager->PhysicalToVirtualAddress(config.tic.Address()); - params.width = config.tic.Width(); - params.height = config.tic.Height(); + params.addr = config.tic.Address(); params.is_tiled = config.tic.IsTiled(); params.pixel_format = SurfaceParams::PixelFormatFromTextureFormat(config.tic.format); + params.width = config.tic.Width() / params.GetCompresssionFactor(); + params.height = config.tic.Height() / params.GetCompresssionFactor(); // TODO(Subv): Different types per component are not supported. ASSERT(config.tic.r_type.Value() == config.tic.g_type.Value() && @@ -1045,7 +1081,7 @@ Surface RasterizerCacheOpenGL::GetTextureSurface(const Tegra::Texture::FullTextu params.block_height = config.tic.BlockHeight(); } else { // Use the texture-provided stride value if the texture isn't tiled. - params.stride = params.PixelsInBytes(config.tic.Pitch()); + params.stride = static_cast<u32>(params.PixelsInBytes(config.tic.Pitch())); } params.UpdateParams(); @@ -1073,11 +1109,10 @@ Surface RasterizerCacheOpenGL::GetTextureSurface(const Tegra::Texture::FullTextu SurfaceSurfaceRect_Tuple RasterizerCacheOpenGL::GetFramebufferSurfaces( bool using_color_fb, bool using_depth_fb, const MathUtil::Rectangle<s32>& viewport) { const auto& regs = Core::System().GetInstance().GPU().Maxwell3D().regs; - const auto& memory_manager = Core::System().GetInstance().GPU().memory_manager; const auto& config = regs.rt[0]; // TODO(bunnei): This is hard corded to use just the first render buffer - LOG_WARNING(Render_OpenGL, "hard-coded for render target 0!"); + NGLOG_WARNING(Render_OpenGL, "hard-coded for render target 0!"); // update resolution_scale_factor and reset cache if changed // TODO (bunnei): This code was ported as-is from Citra, and is technically not thread-safe. We @@ -1106,7 +1141,7 @@ SurfaceSurfaceRect_Tuple RasterizerCacheOpenGL::GetFramebufferSurfaces( color_params.block_height = Tegra::Texture::TICEntry::DefaultBlockHeight; SurfaceParams depth_params = color_params; - color_params.addr = memory_manager->PhysicalToVirtualAddress(config.Address()); + color_params.addr = config.Address(); color_params.pixel_format = SurfaceParams::PixelFormatFromRenderTargetFormat(config.format); color_params.component_type = SurfaceParams::ComponentTypeFromRenderTarget(config.format); color_params.UpdateParams(); @@ -1122,8 +1157,8 @@ SurfaceSurfaceRect_Tuple RasterizerCacheOpenGL::GetFramebufferSurfaces( // Make sure that framebuffers don't overlap if both color and depth are being used if (using_color_fb && using_depth_fb && boost::icl::length(color_vp_interval & depth_vp_interval)) { - LOG_CRITICAL(Render_OpenGL, "Color and depth framebuffer memory regions overlap; " - "overlapping framebuffers not supported!"); + NGLOG_CRITICAL(Render_OpenGL, "Color and depth framebuffer memory regions overlap; " + "overlapping framebuffers not supported!"); using_depth_fb = false; } @@ -1222,7 +1257,8 @@ void RasterizerCacheOpenGL::DuplicateSurface(const Surface& src_surface, } } -void RasterizerCacheOpenGL::ValidateSurface(const Surface& surface, VAddr addr, u64 size) { +void RasterizerCacheOpenGL::ValidateSurface(const Surface& surface, Tegra::GPUVAddr addr, + u64 size) { if (size == 0) return; @@ -1261,7 +1297,7 @@ void RasterizerCacheOpenGL::ValidateSurface(const Surface& surface, VAddr addr, } } -void RasterizerCacheOpenGL::FlushRegion(VAddr addr, u64 size, Surface flush_surface) { +void RasterizerCacheOpenGL::FlushRegion(Tegra::GPUVAddr addr, u64 size, Surface flush_surface) { if (size == 0) return; @@ -1297,7 +1333,8 @@ void RasterizerCacheOpenGL::FlushAll() { FlushRegion(0, Kernel::VMManager::MAX_ADDRESS); } -void RasterizerCacheOpenGL::InvalidateRegion(VAddr addr, u64 size, const Surface& region_owner) { +void RasterizerCacheOpenGL::InvalidateRegion(Tegra::GPUVAddr addr, u64 size, + const Surface& region_owner) { if (size == 0) return; @@ -1390,10 +1427,10 @@ void RasterizerCacheOpenGL::UnregisterSurface(const Surface& surface) { surface_cache.subtract({surface->GetInterval(), SurfaceSet{surface}}); } -void RasterizerCacheOpenGL::UpdatePagesCachedCount(VAddr addr, u64 size, int delta) { - const u64 num_pages = - ((addr + size - 1) >> Memory::PAGE_BITS) - (addr >> Memory::PAGE_BITS) + 1; - const u64 page_start = addr >> Memory::PAGE_BITS; +void RasterizerCacheOpenGL::UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta) { + const u64 num_pages = ((addr + size - 1) >> Tegra::MemoryManager::PAGE_BITS) - + (addr >> Tegra::MemoryManager::PAGE_BITS) + 1; + const u64 page_start = addr >> Tegra::MemoryManager::PAGE_BITS; const u64 page_end = page_start + num_pages; // Interval maps will erase segments if count reaches 0, so if delta is negative we have to @@ -1406,8 +1443,10 @@ void RasterizerCacheOpenGL::UpdatePagesCachedCount(VAddr addr, u64 size, int del const auto interval = pair.first & pages_interval; const int count = pair.second; - const VAddr interval_start_addr = boost::icl::first(interval) << Memory::PAGE_BITS; - const VAddr interval_end_addr = boost::icl::last_next(interval) << Memory::PAGE_BITS; + const Tegra::GPUVAddr interval_start_addr = boost::icl::first(interval) + << Tegra::MemoryManager::PAGE_BITS; + const Tegra::GPUVAddr interval_end_addr = boost::icl::last_next(interval) + << Tegra::MemoryManager::PAGE_BITS; const u64 interval_size = interval_end_addr - interval_start_addr; if (delta > 0 && count == delta) diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h index e4cb3390f..55f1bdee8 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.h @@ -17,12 +17,14 @@ #ifdef __GNUC__ #pragma GCC diagnostic pop #endif +#include <boost/optional.hpp> #include <glad/glad.h> #include "common/assert.h" #include "common/common_funcs.h" #include "common/common_types.h" #include "common/math_util.h" #include "video_core/gpu.h" +#include "video_core/memory_manager.h" #include "video_core/renderer_opengl/gl_resource_manager.h" #include "video_core/textures/texture.h" @@ -30,9 +32,9 @@ struct CachedSurface; using Surface = std::shared_ptr<CachedSurface>; using SurfaceSet = std::set<Surface>; -using SurfaceRegions = boost::icl::interval_set<VAddr>; -using SurfaceMap = boost::icl::interval_map<VAddr, Surface>; -using SurfaceCache = boost::icl::interval_map<VAddr, SurfaceSet>; +using SurfaceRegions = boost::icl::interval_set<Tegra::GPUVAddr>; +using SurfaceMap = boost::icl::interval_map<Tegra::GPUVAddr, Surface>; +using SurfaceCache = boost::icl::interval_map<Tegra::GPUVAddr, SurfaceSet>; using SurfaceInterval = SurfaceCache::interval_type; static_assert(std::is_same<SurfaceRegions::interval_type, SurfaceCache::interval_type>() && @@ -82,23 +84,49 @@ struct SurfaceParams { Invalid = 4, }; - static constexpr unsigned int GetFormatBpp(PixelFormat format) { + /** + * Gets the compression factor for the specified PixelFormat. This applies to just the + * "compressed width" and "compressed height", not the overall compression factor of a + * compressed image. This is used for maintaining proper surface sizes for compressed texture + * formats. + */ + static constexpr u32 GetCompresssionFactor(PixelFormat format) { if (format == PixelFormat::Invalid) return 0; - constexpr std::array<unsigned int, MaxPixelFormat> bpp_table = { + constexpr std::array<u32, MaxPixelFormat> compression_factor_table = {{ + 1, // ABGR8 + 1, // B5G6R5 + 1, // A2B10G10R10 + 4, // DXT1 + 4, // DXT23 + 4, // DXT45 + }}; + + ASSERT(static_cast<size_t>(format) < compression_factor_table.size()); + return compression_factor_table[static_cast<size_t>(format)]; + } + u32 GetCompresssionFactor() const { + return GetCompresssionFactor(pixel_format); + } + + static constexpr u32 GetFormatBpp(PixelFormat format) { + if (format == PixelFormat::Invalid) + return 0; + + constexpr std::array<u32, MaxPixelFormat> bpp_table = {{ 32, // ABGR8 16, // B5G6R5 32, // A2B10G10R10 64, // DXT1 128, // DXT23 128, // DXT45 - }; + }}; ASSERT(static_cast<size_t>(format) < bpp_table.size()); return bpp_table[static_cast<size_t>(format)]; } - unsigned int GetFormatBpp() const { + u32 GetFormatBpp() const { return GetFormatBpp(pixel_format); } @@ -106,6 +134,8 @@ struct SurfaceParams { switch (format) { case Tegra::RenderTargetFormat::RGBA8_UNORM: return PixelFormat::ABGR8; + case Tegra::RenderTargetFormat::RGB10_A2_UNORM: + return PixelFormat::A2B10G10R10; default: NGLOG_CRITICAL(HW_GPU, "Unimplemented format={}", static_cast<u32>(format)); UNREACHABLE(); @@ -251,6 +281,24 @@ struct SurfaceParams { // Returns the region of the biggest valid rectange within interval SurfaceInterval GetCopyableInterval(const Surface& src_surface) const; + /** + * Gets the actual width (in pixels) of the surface. This is provided because `width` is used + * for tracking the surface region in memory, which may be compressed for certain formats. In + * this scenario, `width` is actually the compressed width. + */ + u32 GetActualWidth() const { + return width * GetCompresssionFactor(); + } + + /** + * Gets the actual height (in pixels) of the surface. This is provided because `height` is used + * for tracking the surface region in memory, which may be compressed for certain formats. In + * this scenario, `height` is actually the compressed height. + */ + u32 GetActualHeight() const { + return height * GetCompresssionFactor(); + } + u32 GetScaledWidth() const { return width * res_scale; } @@ -275,6 +323,8 @@ struct SurfaceParams { return pixels * GetFormatBpp(pixel_format) / CHAR_BIT; } + VAddr GetCpuAddr() const; + bool ExactMatch(const SurfaceParams& other_surface) const; bool CanSubRect(const SurfaceParams& sub_surface) const; bool CanExpand(const SurfaceParams& expanded_surface) const; @@ -283,8 +333,9 @@ struct SurfaceParams { MathUtil::Rectangle<u32> GetSubRect(const SurfaceParams& sub_surface) const; MathUtil::Rectangle<u32> GetScaledSubRect(const SurfaceParams& sub_surface) const; - VAddr addr = 0; - VAddr end = 0; + Tegra::GPUVAddr addr = 0; + Tegra::GPUVAddr end = 0; + boost::optional<VAddr> cpu_addr; u64 size = 0; u32 width = 0; @@ -323,15 +374,15 @@ struct CachedSurface : SurfaceParams { if (format == PixelFormat::Invalid) return 0; - return SurfaceParams::GetFormatBpp(format) / 8; + return SurfaceParams::GetFormatBpp(format) / CHAR_BIT; } std::unique_ptr<u8[]> gl_buffer; size_t gl_buffer_size = 0; // Read/Write data in Switch memory to/from gl_buffer - void LoadGLBuffer(VAddr load_start, VAddr load_end); - void FlushGLBuffer(VAddr flush_start, VAddr flush_end); + void LoadGLBuffer(Tegra::GPUVAddr load_start, Tegra::GPUVAddr load_end); + void FlushGLBuffer(Tegra::GPUVAddr flush_start, Tegra::GPUVAddr flush_end); // Upload/Download data in gl_buffer in/to this surface's texture void UploadGLTexture(const MathUtil::Rectangle<u32>& rect, GLuint read_fb_handle, @@ -360,6 +411,9 @@ public: Surface GetSurface(const SurfaceParams& params, ScaleMatch match_res_scale, bool load_if_create); + /// Tries to find a framebuffer GPU address based on the provided CPU address + boost::optional<Tegra::GPUVAddr> TryFindFramebufferGpuAddress(VAddr cpu_addr) const; + /// Attempt to find a subrect (resolution scaled) of a surface, otherwise loads a texture from /// Switch memory to OpenGL and caches it (if not already cached) SurfaceRect_Tuple GetSurfaceSubRect(const SurfaceParams& params, ScaleMatch match_res_scale, @@ -379,10 +433,10 @@ public: SurfaceRect_Tuple GetTexCopySurface(const SurfaceParams& params); /// Write any cached resources overlapping the region back to memory (if dirty) - void FlushRegion(VAddr addr, u64 size, Surface flush_surface = nullptr); + void FlushRegion(Tegra::GPUVAddr addr, u64 size, Surface flush_surface = nullptr); /// Mark region as being invalidated by region_owner (nullptr if Switch memory) - void InvalidateRegion(VAddr addr, u64 size, const Surface& region_owner); + void InvalidateRegion(Tegra::GPUVAddr addr, u64 size, const Surface& region_owner); /// Flush all cached resources tracked by this cache manager void FlushAll(); @@ -391,7 +445,7 @@ private: void DuplicateSurface(const Surface& src_surface, const Surface& dest_surface); /// Update surface's texture for given region when necessary - void ValidateSurface(const Surface& surface, VAddr addr, u64 size); + void ValidateSurface(const Surface& surface, Tegra::GPUVAddr addr, u64 size); /// Create a new surface Surface CreateSurface(const SurfaceParams& params); @@ -403,7 +457,7 @@ private: void UnregisterSurface(const Surface& surface); /// Increase/decrease the number of surface in pages touching the specified region - void UpdatePagesCachedCount(VAddr addr, u64 size, int delta); + void UpdatePagesCachedCount(Tegra::GPUVAddr addr, u64 size, int delta); SurfaceCache surface_cache; PageMap cached_pages; diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp index 086424395..3dffb205d 100644 --- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp +++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp @@ -519,7 +519,7 @@ private: } break; } - case OpCode::Type::FloatPredicate: { + case OpCode::Type::FloatSetPredicate: { std::string op_a = instr.fsetp.neg_a ? "-" : ""; op_a += GetRegister(instr.gpr8); @@ -570,6 +570,59 @@ private: } break; } + case OpCode::Type::FloatSet: { + std::string dest = GetRegister(instr.gpr0); + std::string op_a = instr.fset.neg_a ? "-" : ""; + op_a += GetRegister(instr.gpr8); + + if (instr.fset.abs_a) { + op_a = "abs(" + op_a + ')'; + } + + std::string op_b = instr.fset.neg_b ? "-" : ""; + + if (instr.is_b_imm) { + std::string imm = GetImmediate19(instr); + if (instr.fset.neg_imm) + op_b += "(-" + imm + ')'; + else + op_b += imm; + } else { + if (instr.is_b_gpr) { + op_b += GetRegister(instr.gpr20); + } else { + op_b += GetUniform(instr.uniform); + } + } + + if (instr.fset.abs_b) { + op_b = "abs(" + op_b + ")"; + } + + using Tegra::Shader::Pred; + ASSERT_MSG(instr.fset.pred39 == static_cast<u64>(Pred::UnusedIndex), + "Compound predicates are not implemented"); + + // The fset instruction sets a register to 1.0 if the condition is true, and to 0 + // otherwise. + using Tegra::Shader::PredCondition; + switch (instr.fset.cond) { + case PredCondition::LessThan: + SetDest(0, dest, "((" + op_a + ") < (" + op_b + ")) ? 1.0 : 0", 1, 1); + break; + case PredCondition::Equal: + SetDest(0, dest, "((" + op_a + ") == (" + op_b + ")) ? 1.0 : 0", 1, 1); + break; + case PredCondition::GreaterThan: + SetDest(0, dest, "((" + op_a + ") > (" + op_b + ")) ? 1.0 : 0", 1, 1); + break; + default: + NGLOG_CRITICAL(HW_GPU, "Unhandled predicate condition: {} (a: {}, b: {})", + static_cast<unsigned>(instr.fset.cond.Value()), op_a, op_b); + UNREACHABLE(); + } + break; + } default: { switch (opcode->GetId()) { case OpCode::Id::EXIT: { diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp index ab0acb20a..77d1692f4 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.cpp +++ b/src/video_core/renderer_opengl/renderer_opengl.cpp @@ -152,7 +152,8 @@ void RendererOpenGL::LoadFBToScreenInfo(const Tegra::FramebufferConfig& framebuf screen_info.display_texture = screen_info.texture.resource.handle; screen_info.display_texcoords = MathUtil::Rectangle<float>(0.f, 0.f, 1.f, 1.f); - Rasterizer()->FlushRegion(framebuffer_addr, size_in_bytes); + Memory::RasterizerFlushVirtualRegion(framebuffer_addr, size_in_bytes, + Memory::FlushMode::Flush); VideoCore::MortonCopyPixels128(framebuffer.width, framebuffer.height, bytes_per_pixel, 4, Memory::GetPointer(framebuffer_addr), @@ -269,10 +270,9 @@ void RendererOpenGL::ConfigureFramebufferTexture(TextureInfo& texture, GLint internal_format; switch (framebuffer.pixel_format) { case Tegra::FramebufferConfig::PixelFormat::ABGR8: - // Use RGBA8 and swap in the fragment shader internal_format = GL_RGBA; texture.gl_format = GL_RGBA; - texture.gl_type = GL_UNSIGNED_INT_8_8_8_8; + texture.gl_type = GL_UNSIGNED_INT_8_8_8_8_REV; gl_framebuffer_data.resize(texture.width * texture.height * 4); break; default: @@ -295,17 +295,18 @@ void RendererOpenGL::DrawScreenTriangles(const ScreenInfo& screen_info, float x, const auto& texcoords = screen_info.display_texcoords; auto left = texcoords.left; auto right = texcoords.right; - if (framebuffer_transform_flags != Tegra::FramebufferConfig::TransformFlags::Unset) + if (framebuffer_transform_flags != Tegra::FramebufferConfig::TransformFlags::Unset) { if (framebuffer_transform_flags == Tegra::FramebufferConfig::TransformFlags::FlipV) { // Flip the framebuffer vertically left = texcoords.right; right = texcoords.left; } else { // Other transformations are unsupported - LOG_CRITICAL(Render_OpenGL, "Unsupported framebuffer_transform_flags=%d", - framebuffer_transform_flags); + NGLOG_CRITICAL(Render_OpenGL, "Unsupported framebuffer_transform_flags={}", + static_cast<u32>(framebuffer_transform_flags)); UNIMPLEMENTED(); } + } std::array<ScreenRectVertex, 4> vertices = {{ ScreenRectVertex(x, y, texcoords.top, left), @@ -427,9 +428,9 @@ bool RendererOpenGL::Init() { const char* gpu_vendor{reinterpret_cast<char const*>(glGetString(GL_VENDOR))}; const char* gpu_model{reinterpret_cast<char const*>(glGetString(GL_RENDERER))}; - LOG_INFO(Render_OpenGL, "GL_VERSION: %s", gl_version); - LOG_INFO(Render_OpenGL, "GL_VENDOR: %s", gpu_vendor); - LOG_INFO(Render_OpenGL, "GL_RENDERER: %s", gpu_model); + NGLOG_INFO(Render_OpenGL, "GL_VERSION: {}", gl_version); + NGLOG_INFO(Render_OpenGL, "GL_VENDOR: {}", gpu_vendor); + NGLOG_INFO(Render_OpenGL, "GL_RENDERER: {}", gpu_model); Core::Telemetry().AddField(Telemetry::FieldType::UserSystem, "GPU_Vendor", gpu_vendor); Core::Telemetry().AddField(Telemetry::FieldType::UserSystem, "GPU_Model", gpu_model); diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp index e0509f0ce..8b39b2bdf 100644 --- a/src/video_core/textures/decoders.cpp +++ b/src/video_core/textures/decoders.cpp @@ -4,6 +4,7 @@ #include <cstring> #include "common/assert.h" +#include "core/memory.h" #include "video_core/textures/decoders.h" #include "video_core/textures/texture.h" @@ -26,9 +27,8 @@ static u32 GetSwizzleOffset(u32 x, u32 y, u32 image_width, u32 bytes_per_pixel, return address; } -static void CopySwizzledData(u32 width, u32 height, u32 bytes_per_pixel, u32 out_bytes_per_pixel, - u8* swizzled_data, u8* unswizzled_data, bool unswizzle, - u32 block_height) { +void CopySwizzledData(u32 width, u32 height, u32 bytes_per_pixel, u32 out_bytes_per_pixel, + u8* swizzled_data, u8* unswizzled_data, bool unswizzle, u32 block_height) { u8* data_ptrs[2]; for (unsigned y = 0; y < height; ++y) { for (unsigned x = 0; x < width; ++x) { diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h index a700911cf..2562c4b06 100644 --- a/src/video_core/textures/decoders.h +++ b/src/video_core/textures/decoders.h @@ -17,6 +17,10 @@ namespace Texture { std::vector<u8> UnswizzleTexture(VAddr address, TextureFormat format, u32 width, u32 height, u32 block_height = TICEntry::DefaultBlockHeight); +/// Copies texture data from a buffer and performs swizzling/unswizzling as necessary. +void CopySwizzledData(u32 width, u32 height, u32 bytes_per_pixel, u32 out_bytes_per_pixel, + u8* swizzled_data, u8* unswizzled_data, bool unswizzle, u32 block_height); + /** * Decodes an unswizzled texture into a A8R8G8B8 texture. */ diff --git a/src/video_core/video_core.cpp b/src/video_core/video_core.cpp index 289140f31..89dc8ed1e 100644 --- a/src/video_core/video_core.cpp +++ b/src/video_core/video_core.cpp @@ -24,9 +24,9 @@ bool Init(EmuWindow* emu_window) { g_renderer = std::make_unique<RendererOpenGL>(); g_renderer->SetWindow(g_emu_window); if (g_renderer->Init()) { - LOG_DEBUG(Render, "initialized OK"); + NGLOG_DEBUG(Render, "initialized OK"); } else { - LOG_CRITICAL(Render, "initialization failed !"); + NGLOG_CRITICAL(Render, "initialization failed !"); return false; } return true; @@ -36,7 +36,7 @@ bool Init(EmuWindow* emu_window) { void Shutdown() { g_renderer.reset(); - LOG_DEBUG(Render, "shutdown OK"); + NGLOG_DEBUG(Render, "shutdown OK"); } } // namespace VideoCore |