diff options
author | ReinUsesLisp <reinuseslisp@airmail.cc> | 2021-06-14 07:27:49 +0200 |
---|---|---|
committer | ameerj <52414509+ameerj@users.noreply.github.com> | 2021-07-23 03:51:35 +0200 |
commit | 61cd7dd30128633b656ce3264da74bef1ba00bb5 (patch) | |
tree | 42d0c4e0e5d3a7f3fc581ebb660cd14cdfcf0300 | |
parent | shader: Add shader loop safety check settings (diff) | |
download | yuzu-61cd7dd30128633b656ce3264da74bef1ba00bb5.tar yuzu-61cd7dd30128633b656ce3264da74bef1ba00bb5.tar.gz yuzu-61cd7dd30128633b656ce3264da74bef1ba00bb5.tar.bz2 yuzu-61cd7dd30128633b656ce3264da74bef1ba00bb5.tar.lz yuzu-61cd7dd30128633b656ce3264da74bef1ba00bb5.tar.xz yuzu-61cd7dd30128633b656ce3264da74bef1ba00bb5.tar.zst yuzu-61cd7dd30128633b656ce3264da74bef1ba00bb5.zip |
Diffstat (limited to '')
15 files changed, 38 insertions, 28 deletions
diff --git a/src/common/logging/filter.cpp b/src/common/logging/filter.cpp index 4f2cc29e1..f055f0e11 100644 --- a/src/common/logging/filter.cpp +++ b/src/common/logging/filter.cpp @@ -144,6 +144,10 @@ bool ParseFilterRule(Filter& instance, Iterator begin, Iterator end) { SUB(Render, Software) \ SUB(Render, OpenGL) \ SUB(Render, Vulkan) \ + CLS(Shader) \ + SUB(Shader, SPIRV) \ + SUB(Shader, GLASM) \ + SUB(Shader, GLSL) \ CLS(Audio) \ SUB(Audio, DSP) \ SUB(Audio, Sink) \ diff --git a/src/common/logging/types.h b/src/common/logging/types.h index 88b0e9c01..7ad0334fc 100644 --- a/src/common/logging/types.h +++ b/src/common/logging/types.h @@ -114,6 +114,10 @@ enum class Class : u8 { Render_Software, ///< Software renderer backend Render_OpenGL, ///< OpenGL backend Render_Vulkan, ///< Vulkan backend + Shader, ///< Shader recompiler + Shader_SPIRV, ///< Shader SPIR-V code generation + Shader_GLASM, ///< Shader GLASM code generation + Shader_GLSL, ///< Shader GLSL code generation Audio, ///< Audio emulation Audio_DSP, ///< The HLE implementation of the DSP Audio_Sink, ///< Emulator audio output backend diff --git a/src/shader_recompiler/backend/glasm/emit_glasm.cpp b/src/shader_recompiler/backend/glasm/emit_glasm.cpp index fc01797b6..832b4fd40 100644 --- a/src/shader_recompiler/backend/glasm/emit_glasm.cpp +++ b/src/shader_recompiler/backend/glasm/emit_glasm.cpp @@ -253,7 +253,7 @@ void EmitCode(EmitContext& ctx, const IR::Program& program) { } } if (!ctx.reg_alloc.IsEmpty()) { - // LOG_WARNING ...; + LOG_WARNING(Shader_GLASM, "Register leak after generating code"); } } diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp index c1df7a342..20b925877 100644 --- a/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp +++ b/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp @@ -145,14 +145,16 @@ void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, ScalarF32 value, if (ctx.stage == Stage::Geometry || ctx.profile.support_viewport_index_layer_non_geometry) { ctx.Add("MOV.F result.layer.x,{};", value); } else { - // LOG_WARNING + LOG_WARNING(Shader_GLASM, + "Layer stored outside of geometry shader not supported by device"); } break; case IR::Attribute::ViewportIndex: if (ctx.stage == Stage::Geometry || ctx.profile.support_viewport_index_layer_non_geometry) { ctx.Add("MOV.F result.viewport.x,{};", value); } else { - // LOG_WARNING + LOG_WARNING(Shader_GLASM, + "Viewport stored outside of geometry shader not supported by device"); } break; case IR::Attribute::PointSize: diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_image.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_image.cpp index 81d5fe72c..09e3a9b82 100644 --- a/src/shader_recompiler/backend/glasm/emit_glasm_image.cpp +++ b/src/shader_recompiler/backend/glasm/emit_glasm_image.cpp @@ -139,12 +139,12 @@ void SwizzleOffsets(EmitContext& ctx, Register off_x, Register off_y, const IR:: std::string GradOffset(const IR::Value& offset) { if (offset.IsImmediate()) { - // LOG_WARNING immediate + LOG_WARNING(Shader_GLASM, "Gradient offset is a scalar immediate"); return ""; } IR::Inst* const vector{offset.InstRecursive()}; if (!vector->AreAllArgsImmediates()) { - // LOG_WARNING elements not immediate + LOG_WARNING(Shader_GLASM, "Gradient offset vector is not immediate"); return ""; } switch (vector->NumArgs()) { diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp index 60735fe31..a487a0744 100644 --- a/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp +++ b/src/shader_recompiler/backend/glasm/emit_glasm_not_implemented.cpp @@ -115,7 +115,7 @@ void EmitEmitVertex(EmitContext& ctx, ScalarS32 stream) { void EmitEndPrimitive(EmitContext& ctx, const IR::Value& stream) { if (!stream.IsImmediate()) { - // LOG_WARNING not immediate + LOG_WARNING(Shader_GLASM, "Stream is not immediate"); } ctx.reg_alloc.Consume(stream); ctx.Add("ENDPRIM;"); diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_warp.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_warp.cpp index 8cec5ee7e..544d475b4 100644 --- a/src/shader_recompiler/backend/glasm/emit_glasm_warp.cpp +++ b/src/shader_recompiler/backend/glasm/emit_glasm_warp.cpp @@ -115,7 +115,7 @@ void EmitDPdxFine(EmitContext& ctx, IR::Inst& inst, ScalarF32 p) { if (ctx.profile.support_derivative_control) { ctx.Add("DDX.FINE {}.x,{};", inst, p); } else { - // LOG_WARNING + LOG_WARNING(Shader_GLASM, "Fine derivatives not supported by device"); ctx.Add("DDX {}.x,{};", inst, p); } } @@ -124,7 +124,7 @@ void EmitDPdyFine(EmitContext& ctx, IR::Inst& inst, ScalarF32 p) { if (ctx.profile.support_derivative_control) { ctx.Add("DDY.FINE {}.x,{};", inst, p); } else { - // LOG_WARNING + LOG_WARNING(Shader_GLASM, "Fine derivatives not supported by device"); ctx.Add("DDY {}.x,{};", inst, p); } } @@ -133,7 +133,7 @@ void EmitDPdxCoarse(EmitContext& ctx, IR::Inst& inst, ScalarF32 p) { if (ctx.profile.support_derivative_control) { ctx.Add("DDX.COARSE {}.x,{};", inst, p); } else { - // LOG_WARNING + LOG_WARNING(Shader_GLASM, "Coarse derivatives not supported by device"); ctx.Add("DDX {}.x,{};", inst, p); } } @@ -142,7 +142,7 @@ void EmitDPdyCoarse(EmitContext& ctx, IR::Inst& inst, ScalarF32 p) { if (ctx.profile.support_derivative_control) { ctx.Add("DDY.COARSE {}.x,{};", inst, p); } else { - // LOG_WARNING + LOG_WARNING(Shader_GLASM, "Coarse derivatives not supported by device"); ctx.Add("DDY {}.x,{};", inst, p); } } diff --git a/src/shader_recompiler/backend/spirv/emit_spirv.cpp b/src/shader_recompiler/backend/spirv/emit_spirv.cpp index cba420cda..14a99750d 100644 --- a/src/shader_recompiler/backend/spirv/emit_spirv.cpp +++ b/src/shader_recompiler/backend/spirv/emit_spirv.cpp @@ -294,7 +294,7 @@ void SetupDenormControl(const Profile& profile, const IR::Program& program, Emit Id main_func) { const Info& info{program.info}; if (info.uses_fp32_denorms_flush && info.uses_fp32_denorms_preserve) { - // LOG_ERROR(HW_GPU, "Fp32 denorm flush and preserve on the same shader"); + LOG_ERROR(Shader_SPIRV, "Fp32 denorm flush and preserve on the same shader"); } else if (info.uses_fp32_denorms_flush) { if (profile.support_fp32_denorm_flush) { ctx.AddCapability(spv::Capability::DenormFlushToZero); @@ -307,7 +307,7 @@ void SetupDenormControl(const Profile& profile, const IR::Program& program, Emit ctx.AddCapability(spv::Capability::DenormPreserve); ctx.AddExecutionMode(main_func, spv::ExecutionMode::DenormPreserve, 32U); } else { - // LOG_WARNING(HW_GPU, "Fp32 denorm preserve used in shader without host support"); + LOG_WARNING(Shader_SPIRV, "Fp32 denorm preserve used in shader without host support"); } } if (!profile.support_separate_denorm_behavior) { @@ -315,7 +315,7 @@ void SetupDenormControl(const Profile& profile, const IR::Program& program, Emit return; } if (info.uses_fp16_denorms_flush && info.uses_fp16_denorms_preserve) { - // LOG_ERROR(HW_GPU, "Fp16 denorm flush and preserve on the same shader"); + LOG_ERROR(Shader_SPIRV, "Fp16 denorm flush and preserve on the same shader"); } else if (info.uses_fp16_denorms_flush) { if (profile.support_fp16_denorm_flush) { ctx.AddCapability(spv::Capability::DenormFlushToZero); @@ -328,7 +328,7 @@ void SetupDenormControl(const Profile& profile, const IR::Program& program, Emit ctx.AddCapability(spv::Capability::DenormPreserve); ctx.AddExecutionMode(main_func, spv::ExecutionMode::DenormPreserve, 16U); } else { - // LOG_WARNING(HW_GPU, "Fp16 denorm preserve used in shader without host support"); + LOG_WARNING(Shader_SPIRV, "Fp16 denorm preserve used in shader without host support"); } } } diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp index 053800eb7..9af8bb9e1 100644 --- a/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp +++ b/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp @@ -73,7 +73,7 @@ Id StorageAtomicU64(EmitContext& ctx, const IR::Value& binding, const IR::Value& const auto [scope, semantics]{AtomicArgs(ctx)}; return (ctx.*atomic_func)(ctx.U64, pointer, scope, semantics, value); } - // LOG_WARNING(..., "Int64 Atomics not supported, fallback to non-atomic"); + LOG_ERROR(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic"); const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2, binding, offset, sizeof(u32[2]))}; const Id original_value{ctx.OpBitcast(ctx.U64, ctx.OpLoad(ctx.U32[2], pointer))}; @@ -140,7 +140,7 @@ Id EmitSharedAtomicExchange64(EmitContext& ctx, Id offset, Id value) { const auto [scope, semantics]{AtomicArgs(ctx)}; return ctx.OpAtomicExchange(ctx.U64, pointer, scope, semantics, value); } - // LOG_WARNING("Int64 Atomics not supported, fallback to non-atomic"); + LOG_ERROR(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic"); const Id pointer_1{SharedPointer(ctx, offset, 0)}; const Id pointer_2{SharedPointer(ctx, offset, 1)}; const Id value_1{ctx.OpLoad(ctx.U32[1], pointer_1)}; @@ -266,7 +266,7 @@ Id EmitStorageAtomicExchange64(EmitContext& ctx, const IR::Value& binding, const const auto [scope, semantics]{AtomicArgs(ctx)}; return ctx.OpAtomicExchange(ctx.U64, pointer, scope, semantics, value); } - // LOG_WARNING(..., "Int64 Atomics not supported, fallback to non-atomic"); + LOG_ERROR(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic"); const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2, binding, offset, sizeof(u32[2]))}; const Id original{ctx.OpBitcast(ctx.U64, ctx.OpLoad(ctx.U32[2], pointer))}; diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp index cf842e1e0..647804814 100644 --- a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp +++ b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp @@ -39,7 +39,7 @@ public: } const std::array values{offset.InstRecursive(), offset2.InstRecursive()}; if (!values[0]->AreAllArgsImmediates() || !values[1]->AreAllArgsImmediates()) { - // LOG_WARNING("Not all arguments in PTP are immediate, STUBBING"); + LOG_WARNING(Shader_SPIRV, "Not all arguments in PTP are immediate, ignoring"); return; } const IR::Opcode opcode{values[0]->GetOpcode()}; @@ -442,7 +442,7 @@ Id EmitImageGradient(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, I Id EmitImageRead(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords) { const auto info{inst->Flags<IR::TextureInstInfo>()}; if (info.image_format == ImageFormat::Typeless && !ctx.profile.support_typeless_image_loads) { - // LOG_WARNING(..., "Typeless image read not supported by host"); + LOG_WARNING(Shader_SPIRV, "Typeless image read not supported by host"); return ctx.ConstantNull(ctx.U32[4]); } return Emit(&EmitContext::OpImageSparseRead, &EmitContext::OpImageRead, ctx, inst, ctx.U32[4], diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_special.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_special.cpp index 072a3b1bd..9e7eb3cb1 100644 --- a/src/shader_recompiler/backend/spirv/emit_spirv_special.cpp +++ b/src/shader_recompiler/backend/spirv/emit_spirv_special.cpp @@ -131,7 +131,7 @@ void EmitEmitVertex(EmitContext& ctx, const IR::Value& stream) { if (stream.IsImmediate()) { ctx.OpEmitStreamVertex(ctx.Def(stream)); } else { - // LOG_WARNING(..., "EmitVertex's stream is not constant"); + LOG_WARNING(Shader_SPIRV, "Stream is not immediate"); ctx.OpEmitStreamVertex(ctx.u32_zero_value); } // Restore fixed pipeline point size after emitting the vertex @@ -142,7 +142,7 @@ void EmitEndPrimitive(EmitContext& ctx, const IR::Value& stream) { if (stream.IsImmediate()) { ctx.OpEndStreamPrimitive(ctx.Def(stream)); } else { - // LOG_WARNING(..., "EndPrimitive's stream is not constant"); + LOG_WARNING(Shader_SPIRV, "Stream is not immediate"); ctx.OpEndStreamPrimitive(ctx.u32_zero_value); } } diff --git a/src/shader_recompiler/frontend/ir/ir_emitter.cpp b/src/shader_recompiler/frontend/ir/ir_emitter.cpp index 6c37af5e7..d2ac2acac 100644 --- a/src/shader_recompiler/frontend/ir/ir_emitter.cpp +++ b/src/shader_recompiler/frontend/ir/ir_emitter.cpp @@ -270,7 +270,7 @@ static U1 GetFlowTest(IREmitter& ir, FlowTest flow_test) { case FlowTest::RGT: return ir.LogicalAnd(ir.LogicalNot(ir.GetSFlag()), ir.LogicalNot(ir.GetZFlag())); case FlowTest::FCSM_TR: - // LOG_WARNING(ShaderDecompiler, "FCSM_TR CC State (Stubbed)"); + LOG_WARNING(Shader, "(STUBBED) FCSM_TR"); return ir.Imm1(false); case FlowTest::CSM_TA: case FlowTest::CSM_TR: diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/internal_stage_buffer_entry_read.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/internal_stage_buffer_entry_read.cpp index edd6220a8..9b85f8059 100644 --- a/src/shader_recompiler/frontend/maxwell/translate/impl/internal_stage_buffer_entry_read.cpp +++ b/src/shader_recompiler/frontend/maxwell/translate/impl/internal_stage_buffer_entry_read.cpp @@ -46,7 +46,7 @@ void TranslatorVisitor::ISBERD(u64 insn) { if (isberd.shift != Shift::Default) { throw NotImplementedException("Shift {}", isberd.shift.Value()); } - // LOG_WARNING(..., "ISBERD is stubbed"); + LOG_WARNING(Shader, "(STUBBED) called"); X(isberd.dest_reg, X(isberd.src_reg)); } diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/move_special_register.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/move_special_register.cpp index fe3cdfa96..20cb2674e 100644 --- a/src/shader_recompiler/frontend/maxwell/translate/impl/move_special_register.cpp +++ b/src/shader_recompiler/frontend/maxwell/translate/impl/move_special_register.cpp @@ -118,7 +118,7 @@ enum class SpecialRegister : u64 { case SpecialRegister::SR_THREAD_KILL: return IR::U32{ir.Select(ir.IsHelperInvocation(), ir.Imm32(-1), ir.Imm32(0))}; case SpecialRegister::SR_INVOCATION_INFO: - // LOG_WARNING(..., "SR_INVOCATION_INFO is stubbed"); + LOG_WARNING(Shader, "(STUBBED) SR_INVOCATION_INFO"); return ir.Imm32(0x00ff'0000); case SpecialRegister::SR_TID: { const IR::Value tid{ir.LocalInvocationId()}; @@ -140,10 +140,10 @@ enum class SpecialRegister : u64 { case SpecialRegister::SR_CTAID_Z: return ir.WorkgroupIdZ(); case SpecialRegister::SR_WSCALEFACTOR_XY: - // LOG_WARNING(..., "SR_WSCALEFACTOR_XY is stubbed"); + LOG_WARNING(Shader, "(STUBBED) SR_WSCALEFACTOR_XY"); return ir.Imm32(Common::BitCast<u32>(1.0f)); case SpecialRegister::SR_WSCALEFACTOR_Z: - // LOG_WARNING(..., "SR_WSCALEFACTOR_Z is stubbed"); + LOG_WARNING(Shader, "(STUBBED) SR_WSCALEFACTOR_Z"); return ir.Imm32(Common::BitCast<u32>(1.0f)); case SpecialRegister::SR_LANEID: return ir.LaneId(); @@ -160,7 +160,7 @@ enum class SpecialRegister : u64 { case SpecialRegister::SR_Y_DIRECTION: return ir.BitCast<IR::U32>(ir.YDirection()); case SpecialRegister::SR_AFFINITY: - // LOG_WARNING(..., "SR_AFFINITY is stubbed"); + LOG_WARNING(Shader, "(STUBBED) SR_AFFINITY"); return ir.Imm32(0); // This is the default value hardware returns. default: throw NotImplementedException("S2R special register {}", special_register); diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/vote.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/vote.cpp index 0793611ff..7ce370f09 100644 --- a/src/shader_recompiler/frontend/maxwell/translate/impl/vote.cpp +++ b/src/shader_recompiler/frontend/maxwell/translate/impl/vote.cpp @@ -48,7 +48,7 @@ void TranslatorVisitor::VOTE(u64 insn) { } void TranslatorVisitor::VOTE_vtg(u64) { - // LOG_WARNING(ShaderDecompiler, "VOTE.VTG: Stubbed!"); + LOG_WARNING(Shader, "(STUBBED) called"); } } // namespace Shader::Maxwell |