diff options
Diffstat (limited to 'servers/audio/audio_stream.cpp')
-rw-r--r-- | servers/audio/audio_stream.cpp | 145 |
1 files changed, 138 insertions, 7 deletions
diff --git a/servers/audio/audio_stream.cpp b/servers/audio/audio_stream.cpp index f3fa857682..c098a97906 100644 --- a/servers/audio/audio_stream.cpp +++ b/servers/audio/audio_stream.cpp @@ -33,6 +33,65 @@ #include "core/config/project_settings.h" #include "core/os/os.h" +void AudioStreamPlayback::start(float p_from_pos) { + if (GDVIRTUAL_CALL(_start, p_from_pos)) { + return; + } + ERR_FAIL_MSG("AudioStreamPlayback::start unimplemented!"); +} +void AudioStreamPlayback::stop() { + if (GDVIRTUAL_CALL(_stop)) { + return; + } + ERR_FAIL_MSG("AudioStreamPlayback::stop unimplemented!"); +} +bool AudioStreamPlayback::is_playing() const { + bool ret; + if (GDVIRTUAL_CALL(_is_playing, ret)) { + return ret; + } + ERR_FAIL_V_MSG(false, "AudioStreamPlayback::is_playing unimplemented!"); +} + +int AudioStreamPlayback::get_loop_count() const { + int ret; + if (GDVIRTUAL_CALL(_get_loop_count, ret)) { + return ret; + } + return 0; +} + +float AudioStreamPlayback::get_playback_position() const { + float ret; + if (GDVIRTUAL_CALL(_get_playback_position, ret)) { + return ret; + } + ERR_FAIL_V_MSG(0, "AudioStreamPlayback::get_playback_position unimplemented!"); +} +void AudioStreamPlayback::seek(float p_time) { + if (GDVIRTUAL_CALL(_seek, p_time)) { + return; + } +} + +int AudioStreamPlayback::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) { + int ret; + if (GDVIRTUAL_CALL(_mix, p_buffer, p_rate_scale, p_frames, ret)) { + return ret; + } + WARN_PRINT_ONCE("AudioStreamPlayback::mix unimplemented!"); + return 0; +} + +void AudioStreamPlayback::_bind_methods() { + GDVIRTUAL_BIND(_start, "from_pos") + GDVIRTUAL_BIND(_stop) + GDVIRTUAL_BIND(_is_playing) + GDVIRTUAL_BIND(_get_loop_count) + GDVIRTUAL_BIND(_get_playback_position) + GDVIRTUAL_BIND(_seek, "position") + GDVIRTUAL_BIND(_mix, "buffer", "rate_scale", "frames"); +} ////////////////////////////// void AudioStreamPlaybackResampled::_begin_resample() { @@ -46,12 +105,14 @@ void AudioStreamPlaybackResampled::_begin_resample() { mix_offset = 0; } -void AudioStreamPlaybackResampled::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) { +int AudioStreamPlaybackResampled::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) { float target_rate = AudioServer::get_singleton()->get_mix_rate(); float playback_speed_scale = AudioServer::get_singleton()->get_playback_speed_scale(); uint64_t mix_increment = uint64_t(((get_stream_sampling_rate() * p_rate_scale * playback_speed_scale) / double(target_rate)) * double(FP_LEN)); + int mixed_frames_total = p_frames; + for (int i = 0; i < p_frames; i++) { uint32_t idx = CUBIC_INTERP_HISTORY + uint32_t(mix_offset >> FP_BITS); //standard cubic interpolation (great quality/performance ratio) @@ -62,6 +123,11 @@ void AudioStreamPlaybackResampled::mix(AudioFrame *p_buffer, float p_rate_scale, AudioFrame y2 = internal_buffer[idx - 1]; AudioFrame y3 = internal_buffer[idx - 0]; + if (idx <= internal_buffer_end && idx >= internal_buffer_end && mixed_frames_total == p_frames) { + // The internal buffer ends somewhere in this range, and we haven't yet recorded the number of good frames we have. + mixed_frames_total = i; + } + float mu2 = mu * mu; AudioFrame a0 = 3 * y1 - 3 * y2 + y3 - y0; AudioFrame a1 = 2 * y0 - 5 * y1 + 4 * y2 - y3; @@ -78,7 +144,14 @@ void AudioStreamPlaybackResampled::mix(AudioFrame *p_buffer, float p_rate_scale, internal_buffer[2] = internal_buffer[INTERNAL_BUFFER_LEN + 2]; internal_buffer[3] = internal_buffer[INTERNAL_BUFFER_LEN + 3]; if (is_playing()) { - _mix_internal(internal_buffer + 4, INTERNAL_BUFFER_LEN); + int mixed_frames = _mix_internal(internal_buffer + 4, INTERNAL_BUFFER_LEN); + if (mixed_frames != INTERNAL_BUFFER_LEN) { + // internal_buffer[mixed_frames] is the first frame of silence. + internal_buffer_end = mixed_frames; + } else { + // The internal buffer does not contain the first frame of silence. + internal_buffer_end = -1; + } } else { //fill with silence, not playing for (int j = 0; j < INTERNAL_BUFFER_LEN; ++j) { @@ -88,12 +161,49 @@ void AudioStreamPlaybackResampled::mix(AudioFrame *p_buffer, float p_rate_scale, mix_offset -= (INTERNAL_BUFFER_LEN << FP_BITS); } } + return mixed_frames_total; } //////////////////////////////// +Ref<AudioStreamPlayback> AudioStream::instance_playback() { + Ref<AudioStreamPlayback> ret; + if (GDVIRTUAL_CALL(_instance_playback, ret)) { + return ret; + } + ERR_FAIL_V_MSG(Ref<AudioStreamPlayback>(), "Method must be implemented!"); +} +String AudioStream::get_stream_name() const { + String ret; + if (GDVIRTUAL_CALL(_get_stream_name, ret)) { + return ret; + } + return String(); +} + +float AudioStream::get_length() const { + float ret; + if (GDVIRTUAL_CALL(_get_length, ret)) { + return ret; + } + return 0; +} + +bool AudioStream::is_monophonic() const { + bool ret; + if (GDVIRTUAL_CALL(_is_monophonic, ret)) { + return ret; + } + return true; +} + void AudioStream::_bind_methods() { ClassDB::bind_method(D_METHOD("get_length"), &AudioStream::get_length); + ClassDB::bind_method(D_METHOD("is_monophonic"), &AudioStream::is_monophonic); + GDVIRTUAL_BIND(_instance_playback); + GDVIRTUAL_BIND(_get_stream_name); + GDVIRTUAL_BIND(_get_length); + GDVIRTUAL_BIND(_is_monophonic); } //////////////////////////////// @@ -121,13 +231,17 @@ float AudioStreamMicrophone::get_length() const { return 0; } +bool AudioStreamMicrophone::is_monophonic() const { + return true; +} + void AudioStreamMicrophone::_bind_methods() { } AudioStreamMicrophone::AudioStreamMicrophone() { } -void AudioStreamPlaybackMicrophone::_mix_internal(AudioFrame *p_buffer, int p_frames) { +int AudioStreamPlaybackMicrophone::_mix_internal(AudioFrame *p_buffer, int p_frames) { AudioDriver::get_singleton()->lock(); Vector<int32_t> buf = AudioDriver::get_singleton()->get_input_buffer(); @@ -138,6 +252,8 @@ void AudioStreamPlaybackMicrophone::_mix_internal(AudioFrame *p_buffer, int p_fr unsigned int input_position = AudioDriver::get_singleton()->get_input_position(); #endif + int mixed_frames = p_frames; + if (playback_delay > input_size) { for (int i = 0; i < p_frames; i++) { p_buffer[i] = AudioFrame(0.0f, 0.0f); @@ -157,6 +273,9 @@ void AudioStreamPlaybackMicrophone::_mix_internal(AudioFrame *p_buffer, int p_fr p_buffer[i] = AudioFrame(l, r); } else { + if (mixed_frames == p_frames) { + mixed_frames = i; + } p_buffer[i] = AudioFrame(0.0f, 0.0f); } } @@ -169,10 +288,12 @@ void AudioStreamPlaybackMicrophone::_mix_internal(AudioFrame *p_buffer, int p_fr #endif AudioDriver::get_singleton()->unlock(); + + return mixed_frames; } -void AudioStreamPlaybackMicrophone::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) { - AudioStreamPlaybackResampled::mix(p_buffer, p_rate_scale, p_frames); +int AudioStreamPlaybackMicrophone::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) { + return AudioStreamPlaybackResampled::mix(p_buffer, p_rate_scale, p_frames); } float AudioStreamPlaybackMicrophone::get_stream_sampling_rate() { @@ -281,6 +402,14 @@ float AudioStreamRandomPitch::get_length() const { return 0; } +bool AudioStreamRandomPitch::is_monophonic() const { + if (audio_stream.is_valid()) { + return audio_stream->is_monophonic(); + } + + return true; // It doesn't really matter what we return here, but no sense instancing a many playbacks of a null stream. +} + void AudioStreamRandomPitch::_bind_methods() { ClassDB::bind_method(D_METHOD("set_audio_stream", "stream"), &AudioStreamRandomPitch::set_audio_stream); ClassDB::bind_method(D_METHOD("get_audio_stream"), &AudioStreamRandomPitch::get_audio_stream); @@ -345,16 +474,18 @@ void AudioStreamPlaybackRandomPitch::seek(float p_time) { } } -void AudioStreamPlaybackRandomPitch::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) { +int AudioStreamPlaybackRandomPitch::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) { if (playing.is_valid()) { - playing->mix(p_buffer, p_rate_scale * pitch_scale, p_frames); + return playing->mix(p_buffer, p_rate_scale * pitch_scale, p_frames); } else { for (int i = 0; i < p_frames; i++) { p_buffer[i] = AudioFrame(0, 0); } + return p_frames; } } AudioStreamPlaybackRandomPitch::~AudioStreamPlaybackRandomPitch() { random_pitch->playbacks.erase(this); } +///////////////////////////////////////////// |