summaryrefslogtreecommitdiff
path: root/servers/audio/audio_stream.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'servers/audio/audio_stream.cpp')
-rw-r--r--servers/audio/audio_stream.cpp169
1 files changed, 150 insertions, 19 deletions
diff --git a/servers/audio/audio_stream.cpp b/servers/audio/audio_stream.cpp
index 2cc2f5c291..c098a97906 100644
--- a/servers/audio/audio_stream.cpp
+++ b/servers/audio/audio_stream.cpp
@@ -5,8 +5,8 @@
/* GODOT ENGINE */
/* https://godotengine.org */
/*************************************************************************/
-/* Copyright (c) 2007-2020 Juan Linietsky, Ariel Manzur. */
-/* Copyright (c) 2014-2020 Godot Engine contributors (cf. AUTHORS.md). */
+/* Copyright (c) 2007-2021 Juan Linietsky, Ariel Manzur. */
+/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
@@ -30,9 +30,68 @@
#include "audio_stream.h"
+#include "core/config/project_settings.h"
#include "core/os/os.h"
-#include "core/project_settings.h"
+void AudioStreamPlayback::start(float p_from_pos) {
+ if (GDVIRTUAL_CALL(_start, p_from_pos)) {
+ return;
+ }
+ ERR_FAIL_MSG("AudioStreamPlayback::start unimplemented!");
+}
+void AudioStreamPlayback::stop() {
+ if (GDVIRTUAL_CALL(_stop)) {
+ return;
+ }
+ ERR_FAIL_MSG("AudioStreamPlayback::stop unimplemented!");
+}
+bool AudioStreamPlayback::is_playing() const {
+ bool ret;
+ if (GDVIRTUAL_CALL(_is_playing, ret)) {
+ return ret;
+ }
+ ERR_FAIL_V_MSG(false, "AudioStreamPlayback::is_playing unimplemented!");
+}
+
+int AudioStreamPlayback::get_loop_count() const {
+ int ret;
+ if (GDVIRTUAL_CALL(_get_loop_count, ret)) {
+ return ret;
+ }
+ return 0;
+}
+
+float AudioStreamPlayback::get_playback_position() const {
+ float ret;
+ if (GDVIRTUAL_CALL(_get_playback_position, ret)) {
+ return ret;
+ }
+ ERR_FAIL_V_MSG(0, "AudioStreamPlayback::get_playback_position unimplemented!");
+}
+void AudioStreamPlayback::seek(float p_time) {
+ if (GDVIRTUAL_CALL(_seek, p_time)) {
+ return;
+ }
+}
+
+int AudioStreamPlayback::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) {
+ int ret;
+ if (GDVIRTUAL_CALL(_mix, p_buffer, p_rate_scale, p_frames, ret)) {
+ return ret;
+ }
+ WARN_PRINT_ONCE("AudioStreamPlayback::mix unimplemented!");
+ return 0;
+}
+
+void AudioStreamPlayback::_bind_methods() {
+ GDVIRTUAL_BIND(_start, "from_pos")
+ GDVIRTUAL_BIND(_stop)
+ GDVIRTUAL_BIND(_is_playing)
+ GDVIRTUAL_BIND(_get_loop_count)
+ GDVIRTUAL_BIND(_get_playback_position)
+ GDVIRTUAL_BIND(_seek, "position")
+ GDVIRTUAL_BIND(_mix, "buffer", "rate_scale", "frames");
+}
//////////////////////////////
void AudioStreamPlaybackResampled::_begin_resample() {
@@ -46,11 +105,13 @@ void AudioStreamPlaybackResampled::_begin_resample() {
mix_offset = 0;
}
-void AudioStreamPlaybackResampled::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) {
+int AudioStreamPlaybackResampled::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) {
float target_rate = AudioServer::get_singleton()->get_mix_rate();
- float global_rate_scale = AudioServer::get_singleton()->get_global_rate_scale();
+ float playback_speed_scale = AudioServer::get_singleton()->get_playback_speed_scale();
+
+ uint64_t mix_increment = uint64_t(((get_stream_sampling_rate() * p_rate_scale * playback_speed_scale) / double(target_rate)) * double(FP_LEN));
- uint64_t mix_increment = uint64_t(((get_stream_sampling_rate() * p_rate_scale) / double(target_rate * global_rate_scale)) * double(FP_LEN));
+ int mixed_frames_total = p_frames;
for (int i = 0; i < p_frames; i++) {
uint32_t idx = CUBIC_INTERP_HISTORY + uint32_t(mix_offset >> FP_BITS);
@@ -62,13 +123,18 @@ void AudioStreamPlaybackResampled::mix(AudioFrame *p_buffer, float p_rate_scale,
AudioFrame y2 = internal_buffer[idx - 1];
AudioFrame y3 = internal_buffer[idx - 0];
+ if (idx <= internal_buffer_end && idx >= internal_buffer_end && mixed_frames_total == p_frames) {
+ // The internal buffer ends somewhere in this range, and we haven't yet recorded the number of good frames we have.
+ mixed_frames_total = i;
+ }
+
float mu2 = mu * mu;
- AudioFrame a0 = y3 - y2 - y0 + y1;
- AudioFrame a1 = y0 - y1 - a0;
+ AudioFrame a0 = 3 * y1 - 3 * y2 + y3 - y0;
+ AudioFrame a1 = 2 * y0 - 5 * y1 + 4 * y2 - y3;
AudioFrame a2 = y2 - y0;
- AudioFrame a3 = y1;
+ AudioFrame a3 = 2 * y1;
- p_buffer[i] = (a0 * mu * mu2 + a1 * mu2 + a2 * mu + a3);
+ p_buffer[i] = (a0 * mu * mu2 + a1 * mu2 + a2 * mu + a3) / 2;
mix_offset += mix_increment;
@@ -78,7 +144,14 @@ void AudioStreamPlaybackResampled::mix(AudioFrame *p_buffer, float p_rate_scale,
internal_buffer[2] = internal_buffer[INTERNAL_BUFFER_LEN + 2];
internal_buffer[3] = internal_buffer[INTERNAL_BUFFER_LEN + 3];
if (is_playing()) {
- _mix_internal(internal_buffer + 4, INTERNAL_BUFFER_LEN);
+ int mixed_frames = _mix_internal(internal_buffer + 4, INTERNAL_BUFFER_LEN);
+ if (mixed_frames != INTERNAL_BUFFER_LEN) {
+ // internal_buffer[mixed_frames] is the first frame of silence.
+ internal_buffer_end = mixed_frames;
+ } else {
+ // The internal buffer does not contain the first frame of silence.
+ internal_buffer_end = -1;
+ }
} else {
//fill with silence, not playing
for (int j = 0; j < INTERNAL_BUFFER_LEN; ++j) {
@@ -88,19 +161,56 @@ void AudioStreamPlaybackResampled::mix(AudioFrame *p_buffer, float p_rate_scale,
mix_offset -= (INTERNAL_BUFFER_LEN << FP_BITS);
}
}
+ return mixed_frames_total;
}
////////////////////////////////
+Ref<AudioStreamPlayback> AudioStream::instance_playback() {
+ Ref<AudioStreamPlayback> ret;
+ if (GDVIRTUAL_CALL(_instance_playback, ret)) {
+ return ret;
+ }
+ ERR_FAIL_V_MSG(Ref<AudioStreamPlayback>(), "Method must be implemented!");
+}
+String AudioStream::get_stream_name() const {
+ String ret;
+ if (GDVIRTUAL_CALL(_get_stream_name, ret)) {
+ return ret;
+ }
+ return String();
+}
+
+float AudioStream::get_length() const {
+ float ret;
+ if (GDVIRTUAL_CALL(_get_length, ret)) {
+ return ret;
+ }
+ return 0;
+}
+
+bool AudioStream::is_monophonic() const {
+ bool ret;
+ if (GDVIRTUAL_CALL(_is_monophonic, ret)) {
+ return ret;
+ }
+ return true;
+}
+
void AudioStream::_bind_methods() {
ClassDB::bind_method(D_METHOD("get_length"), &AudioStream::get_length);
+ ClassDB::bind_method(D_METHOD("is_monophonic"), &AudioStream::is_monophonic);
+ GDVIRTUAL_BIND(_instance_playback);
+ GDVIRTUAL_BIND(_get_stream_name);
+ GDVIRTUAL_BIND(_get_length);
+ GDVIRTUAL_BIND(_is_monophonic);
}
////////////////////////////////
Ref<AudioStreamPlayback> AudioStreamMicrophone::instance_playback() {
Ref<AudioStreamPlaybackMicrophone> playback;
- playback.instance();
+ playback.instantiate();
playbacks.insert(playback.ptr());
@@ -121,13 +231,17 @@ float AudioStreamMicrophone::get_length() const {
return 0;
}
+bool AudioStreamMicrophone::is_monophonic() const {
+ return true;
+}
+
void AudioStreamMicrophone::_bind_methods() {
}
AudioStreamMicrophone::AudioStreamMicrophone() {
}
-void AudioStreamPlaybackMicrophone::_mix_internal(AudioFrame *p_buffer, int p_frames) {
+int AudioStreamPlaybackMicrophone::_mix_internal(AudioFrame *p_buffer, int p_frames) {
AudioDriver::get_singleton()->lock();
Vector<int32_t> buf = AudioDriver::get_singleton()->get_input_buffer();
@@ -138,6 +252,8 @@ void AudioStreamPlaybackMicrophone::_mix_internal(AudioFrame *p_buffer, int p_fr
unsigned int input_position = AudioDriver::get_singleton()->get_input_position();
#endif
+ int mixed_frames = p_frames;
+
if (playback_delay > input_size) {
for (int i = 0; i < p_frames; i++) {
p_buffer[i] = AudioFrame(0.0f, 0.0f);
@@ -157,6 +273,9 @@ void AudioStreamPlaybackMicrophone::_mix_internal(AudioFrame *p_buffer, int p_fr
p_buffer[i] = AudioFrame(l, r);
} else {
+ if (mixed_frames == p_frames) {
+ mixed_frames = i;
+ }
p_buffer[i] = AudioFrame(0.0f, 0.0f);
}
}
@@ -169,10 +288,12 @@ void AudioStreamPlaybackMicrophone::_mix_internal(AudioFrame *p_buffer, int p_fr
#endif
AudioDriver::get_singleton()->unlock();
+
+ return mixed_frames;
}
-void AudioStreamPlaybackMicrophone::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) {
- AudioStreamPlaybackResampled::mix(p_buffer, p_rate_scale, p_frames);
+int AudioStreamPlaybackMicrophone::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) {
+ return AudioStreamPlaybackResampled::mix(p_buffer, p_rate_scale, p_frames);
}
float AudioStreamPlaybackMicrophone::get_stream_sampling_rate() {
@@ -184,7 +305,7 @@ void AudioStreamPlaybackMicrophone::start(float p_from_pos) {
return;
}
- if (!GLOBAL_GET("audio/enable_audio_input")) {
+ if (!GLOBAL_GET("audio/driver/enable_input")) {
WARN_PRINT("Need to enable Project settings > Audio > Enable Audio Input option to use capturing.");
return;
}
@@ -256,7 +377,7 @@ float AudioStreamRandomPitch::get_random_pitch() const {
Ref<AudioStreamPlayback> AudioStreamRandomPitch::instance_playback() {
Ref<AudioStreamPlaybackRandomPitch> playback;
- playback.instance();
+ playback.instantiate();
if (audio_stream.is_valid()) {
playback->playback = audio_stream->instance_playback();
}
@@ -281,6 +402,14 @@ float AudioStreamRandomPitch::get_length() const {
return 0;
}
+bool AudioStreamRandomPitch::is_monophonic() const {
+ if (audio_stream.is_valid()) {
+ return audio_stream->is_monophonic();
+ }
+
+ return true; // It doesn't really matter what we return here, but no sense instancing a many playbacks of a null stream.
+}
+
void AudioStreamRandomPitch::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_audio_stream", "stream"), &AudioStreamRandomPitch::set_audio_stream);
ClassDB::bind_method(D_METHOD("get_audio_stream"), &AudioStreamRandomPitch::get_audio_stream);
@@ -345,16 +474,18 @@ void AudioStreamPlaybackRandomPitch::seek(float p_time) {
}
}
-void AudioStreamPlaybackRandomPitch::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) {
+int AudioStreamPlaybackRandomPitch::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) {
if (playing.is_valid()) {
- playing->mix(p_buffer, p_rate_scale * pitch_scale, p_frames);
+ return playing->mix(p_buffer, p_rate_scale * pitch_scale, p_frames);
} else {
for (int i = 0; i < p_frames; i++) {
p_buffer[i] = AudioFrame(0, 0);
}
+ return p_frames;
}
}
AudioStreamPlaybackRandomPitch::~AudioStreamPlaybackRandomPitch() {
random_pitch->playbacks.erase(this);
}
+/////////////////////////////////////////////