diff options
author | David Marcec <dmarcecguzman@gmail.com> | 2020-07-30 10:16:57 +0200 |
---|---|---|
committer | David Marcec <dmarcecguzman@gmail.com> | 2020-07-30 10:16:57 +0200 |
commit | 1b8fe7073b4877b697bd4101a48d77011100579e (patch) | |
tree | e4d369207b07b25dd88997317ac8ab5584809f5d /src/audio_core | |
parent | Fix perf regression (diff) | |
download | yuzu-1b8fe7073b4877b697bd4101a48d77011100579e.tar yuzu-1b8fe7073b4877b697bd4101a48d77011100579e.tar.gz yuzu-1b8fe7073b4877b697bd4101a48d77011100579e.tar.bz2 yuzu-1b8fe7073b4877b697bd4101a48d77011100579e.tar.lz yuzu-1b8fe7073b4877b697bd4101a48d77011100579e.tar.xz yuzu-1b8fe7073b4877b697bd4101a48d77011100579e.tar.zst yuzu-1b8fe7073b4877b697bd4101a48d77011100579e.zip |
Diffstat (limited to 'src/audio_core')
-rw-r--r-- | src/audio_core/command_generator.cpp | 53 | ||||
-rw-r--r-- | src/audio_core/command_generator.h | 1 | ||||
-rw-r--r-- | src/audio_core/voice_context.cpp | 3 | ||||
-rw-r--r-- | src/audio_core/voice_context.h | 2 |
4 files changed, 32 insertions, 27 deletions
diff --git a/src/audio_core/command_generator.cpp b/src/audio_core/command_generator.cpp index 440bfc140..0c3b278ea 100644 --- a/src/audio_core/command_generator.cpp +++ b/src/audio_core/command_generator.cpp @@ -470,6 +470,7 @@ s32 CommandGenerator::DecodePcm16(ServerVoiceInfo& voice_info, VoiceState& dsp_s return samples_processed; } + s32 CommandGenerator::DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_state, s32 sample_count, s32 channel, std::size_t mix_offset) { auto& in_params = voice_info.GetInParams(); @@ -486,33 +487,45 @@ s32 CommandGenerator::DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_s const auto samples_remaining = (wave_buffer.end_sample_offset - wave_buffer.start_sample_offset) - dsp_state.offset; + const auto samples_processed = std::min(sample_count, samples_remaining); const auto start_offset = ((wave_buffer.start_sample_offset + dsp_state.offset) * in_params.channel_count); - const auto buffer_pos = wave_buffer.buffer_address + start_offset; + const auto end_offset = start_offset + samples_processed; - const auto samples_processed = std::min(sample_count, samples_remaining); + constexpr std::size_t FRAME_LEN = 8; + constexpr std::size_t SAMPLES_PER_FRAME = 14; - if (start_offset > dsp_state.adpcm_samples.size()) { - dsp_state.adpcm_samples.clear(); - } + // Base buffer position + const auto start_frame_index = start_offset / SAMPLES_PER_FRAME; + const auto start_frame_buffer = start_frame_index * FRAME_LEN; - // TODO(ogniK): Proper ADPCM streaming - if (dsp_state.adpcm_samples.empty()) { - Codec::ADPCM_Coeff coeffs; - memory.ReadBlock(in_params.additional_params_address, coeffs.data(), - sizeof(Codec::ADPCM_Coeff)); - std::vector<u8> buffer(wave_buffer.buffer_size); - memory.ReadBlock(wave_buffer.buffer_address, buffer.data(), buffer.size()); - dsp_state.adpcm_samples = - std::move(Codec::DecodeADPCM(buffer.data(), buffer.size(), coeffs, dsp_state.context)); - } + const auto end_frame_index = end_offset / SAMPLES_PER_FRAME; + const auto end_frame_buffer = end_frame_index * FRAME_LEN; + + const auto position_in_frame = start_offset % SAMPLES_PER_FRAME; + + const auto buffer_size = (1 + (end_frame_index - start_frame_index)) * FRAME_LEN; + + Codec::ADPCM_Coeff coeffs; + memory.ReadBlock(in_params.additional_params_address, coeffs.data(), + sizeof(Codec::ADPCM_Coeff)); + std::vector<u8> buffer(buffer_size); + memory.ReadBlock(wave_buffer.buffer_address + start_frame_buffer, buffer.data(), buffer.size()); + const auto adpcm_samples = + std::move(Codec::DecodeADPCM(buffer.data(), buffer.size(), coeffs, dsp_state.context)); for (std::size_t i = 0; i < samples_processed; i++) { - const auto sample_offset = i + start_offset; - sample_buffer[mix_offset + i] = - dsp_state.adpcm_samples[sample_offset * in_params.channel_count + channel]; + const auto sample_offset = position_in_frame + i * in_params.channel_count + channel; + const auto sample = adpcm_samples[sample_offset]; + sample_buffer[mix_offset + i] = sample; } + // Manually set our context + const auto frame_before_final = (end_frame_index - start_frame_index) - 1; + const auto frame_before_final_off = frame_before_final * SAMPLES_PER_FRAME; + dsp_state.context.yn2 = adpcm_samples[frame_before_final_off + 12]; + dsp_state.context.yn1 = adpcm_samples[frame_before_final_off + 13]; + return samples_processed; } @@ -628,10 +641,6 @@ void CommandGenerator::DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, s32* o dsp_state.played_sample_count = 0; } } else { - if (in_params.sample_format == SampleFormat::Adpcm) { - // TODO(ogniK): Remove this when ADPCM streaming implemented - dsp_state.adpcm_samples.clear(); - } // Update our wave buffer states dsp_state.is_wave_buffer_valid[dsp_state.wave_buffer_index] = false; diff --git a/src/audio_core/command_generator.h b/src/audio_core/command_generator.h index 3ad8973b5..3f49c1303 100644 --- a/src/audio_core/command_generator.h +++ b/src/audio_core/command_generator.h @@ -71,7 +71,6 @@ private: s32 channel, std::size_t mix_offset); void DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, s32* output, VoiceState& dsp_state, s32 channel, s32 target_sample_rate, s32 sample_count, s32 node_id); - void Resample(s32* output, s32* input, s32 pitch, s32& fraction, s32 sample_count); AudioCommon::AudioRendererParameter& worker_params; VoiceContext& voice_context; diff --git a/src/audio_core/voice_context.cpp b/src/audio_core/voice_context.cpp index 16ae1afe8..1d8f69844 100644 --- a/src/audio_core/voice_context.cpp +++ b/src/audio_core/voice_context.cpp @@ -520,8 +520,7 @@ void VoiceContext::SortInfo() { } void VoiceContext::UpdateStateByDspShared() { - std::memcpy(voice_states.data(), dsp_voice_states.data(), - sizeof(VoiceState) * dsp_voice_states.size()); + voice_states = dsp_voice_states; } } // namespace AudioCore diff --git a/src/audio_core/voice_context.h b/src/audio_core/voice_context.h index b1d554766..13b0a7f0f 100644 --- a/src/audio_core/voice_context.h +++ b/src/audio_core/voice_context.h @@ -101,8 +101,6 @@ struct VoiceState { u32 external_context_size{}; bool is_external_context_used{}; bool voice_dropped{}; - // TODO(ogniK): Hack until ADPCM streaming is implemented - std::vector<s16> adpcm_samples{}; }; class VoiceChannelResource { |