summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/base/classes.xml47
-rw-r--r--drivers/pulseaudio/audio_driver_pulseaudio.cpp170
-rw-r--r--drivers/pulseaudio/audio_driver_pulseaudio.h11
-rw-r--r--drivers/wasapi/audio_driver_wasapi.cpp8
-rw-r--r--editor/editor_audio_buses.cpp111
-rw-r--r--editor/editor_audio_buses.h19
-rw-r--r--platform/osx/audio_driver_osx.cpp11
-rw-r--r--scene/2d/audio_stream_player_2d.cpp56
-rw-r--r--scene/3d/audio_stream_player_3d.cpp154
-rw-r--r--scene/audio/audio_player.cpp14
-rw-r--r--servers/audio_server.cpp49
-rw-r--r--servers/audio_server.h24
12 files changed, 391 insertions, 283 deletions
diff --git a/doc/base/classes.xml b/doc/base/classes.xml
index 3e3a170d2a..a29300e5b9 100644
--- a/doc/base/classes.xml
+++ b/doc/base/classes.xml
@@ -79,7 +79,7 @@
Returns the arc sine of 's' in radians. Use to get the angle of sine 's'.
[codeblock]
# s is 0.523599 or 30 degrees if converted with rad2deg(s)
- s = asin(0.5)
+ s = asin(0.5)
[/codeblock]
</description>
</method>
@@ -2352,8 +2352,8 @@
</return>
<description>
Call this to initialize this interface. The first interface that is initialized is identified as the primary interface and it will be used for rendering output.
- After initializing the interface you want to use you then need to enable the AR/VR mode of a viewport and rendering should commence.
- Note that you must enable the AR/VR mode on the main viewport for any device that uses the main output of Godot such as for mobile VR.
+ After initializing the interface you want to use you then need to enable the AR/VR mode of a viewport and rendering should commence.
+ Note that you must enable the AR/VR mode on the main viewport for any device that uses the main output of Godot such as for mobile VR.
If you do this for a platform that handles its own output (such as OpenVR) Godot will show just one eye without distortion on screen. Alternatively you can add a separate viewport node to your scene and enable AR/VR on that viewport and it will be used to output to the HMD leaving you free to do anything you like in the main window such as using a separate camera as a spectator camera or render out something completely different.
While currently not used you can activate additional interfaces, you may wish to do this if you want to track controllers from other platforms. However at this point in time only one interface can render to an HMD.
</description>
@@ -2444,7 +2444,7 @@
</argument>
<description>
Changes the world scaling factor.
- Most AR/VR platforms will assume a unit size of 1 unit in your game world = 1 meter in the real world. This scale allows you to adjust this to the unit system you use in your game.
+ Most AR/VR platforms will assume a unit size of 1 unit in your game world = 1 meter in the real world. This scale allows you to adjust this to the unit system you use in your game.
Note that this method is a passthrough to the ARVRServer itself.
</description>
</method>
@@ -2867,7 +2867,7 @@
Adds a new point at the given position with the given identifier. The algorithm prefers points with lower [code]weight_scale[/code] to form a path. The [code]id[/code] must be 0 or larger, and the [code]weight_scale[/code] must be 1 or larger.
[codeblock]
var as = AStar.new()
-
+
as.add_point(1, Vector3(1,0,0), 4) # Adds the point (1,0,0) with weight_scale=4 and id=1
[/codeblock]
</description>
@@ -2903,10 +2903,10 @@
Creates a segment between the given points.
[codeblock]
var as = AStar.new()
-
+
as.add_point(1, Vector3(1,1,0))
as.add_point(2, Vector3(0,5,0))
-
+
as.connect_points(1, 2, false) # If bidirectional=false it's only possible to go from point 1 to point 2
# and not from point 2 to point 1.
[/codeblock]
@@ -2948,12 +2948,12 @@
Returns the closest position to [code]to_pos[/code] that resides inside a segment between two connected points.
[codeblock]
var as = AStar.new()
-
+
as.add_point(1, Vector3(0,0,0))
as.add_point(2, Vector3(0,5,0))
-
+
as.connect_points(1, 2)
-
+
var res = as.get_closest_pos_in_segment(Vector3(3,3,0)) # returns (0, 3, 0)
[/codeblock]
The result is in the segment that goes from [code]y=0[/code] to [code]y=5[/code]. It's the closest position in the segment to the given point.
@@ -2970,18 +2970,18 @@
Returns an array with the ids of the points that form the path found by AStar between the given points. The array is ordered from the starting point to the ending point of the path.
[codeblock]
var as = AStar.new()
-
+
as.add_point(1, Vector3(0,0,0))
as.add_point(2, Vector3(0,1,0), 1) # default weight is 1
as.add_point(3, Vector3(1,1,0))
as.add_point(4, Vector3(2,0,0))
-
+
as.connect_points(1, 2, false)
as.connect_points(2, 3, false)
as.connect_points(4, 3, false)
as.connect_points(1, 4, false)
as.connect_points(5, 4, false)
-
+
var res = as.get_id_path(1, 3) # returns [1, 2, 3]
[/codeblock]
If you change the 2nd point's weight to 3, then the result will be [code][1, 4, 3][/code] instead, because now even though the distance is longer, it's "easier" to get through point 4 than through point 2.
@@ -6390,8 +6390,11 @@
</class>
<class name="AtlasTexture" inherits="Texture" category="Core">
<brief_description>
+ Packs multiple small textures in a single, bigger one. Helps to optimize video memory costs and render calls.
</brief_description>
<description>
+ Texture resource aimed at managing big textures files that pack multiple smaller textures. Consists of a Texture, a margin that defines the border width, and a Region
+ that defines the actual area of the AtlasTexture.
</description>
<methods>
<method name="get_atlas" qualifiers="const">
@@ -6439,10 +6442,13 @@
</methods>
<members>
<member name="atlas" type="Texture" setter="set_atlas" getter="get_atlas" brief="">
+ The texture that contains the atlas. Can be any Texture subtype.
</member>
<member name="margin" type="Rect2" setter="set_margin" getter="get_margin" brief="">
+ The margin around the region. The Rect2's 'w' (width) and 'h' (height) resize the texture so it fits within the margin.
</member>
<member name="region" type="Rect2" setter="set_region" getter="get_region" brief="">
+ The AtlasTexture's used region.
</member>
</members>
<constants>
@@ -50192,18 +50198,19 @@
- Border width (individual width for each border)
- Rounded corners (individual radius for each corner)
- Shadow
- About corner radius:
- Setting corner radius to high values is allowed. As soon as corners would overlap the stylebox will switch to a relative system. Example:
- [codeblock]
+
+ About corner radius:
+ You may use high values for 'corner radius'. As soon as corners would overlap the stylebox will switch to a relative system. Example:
+ [codeblock]
height = 30
corner_radius_top_left = 50
corner_raidus_bottom_left = 100
- [/codeblock]
- The relative system now would take the 1:2 ratio of the two left corners to calculate the actual corner width. Both corners added will [b]never[/b] be more than the height. Result:
- [codeblock]
+ [/codeblock]
+ The relative system now would take the 1:2 ratio of the two left corners to calculate the actual corner width. Both corners added will [b]never[/b] be more than the height. Result:
+ [codeblock]
corner_radius_top_left: 10
corner_raidus_bottom_left: 20
- [/codeblock]
+ [/codeblock]
</description>
<methods>
<method name="get_aa_size" qualifiers="const">
diff --git a/drivers/pulseaudio/audio_driver_pulseaudio.cpp b/drivers/pulseaudio/audio_driver_pulseaudio.cpp
index 1798c84d85..e4c8641a3b 100644
--- a/drivers/pulseaudio/audio_driver_pulseaudio.cpp
+++ b/drivers/pulseaudio/audio_driver_pulseaudio.cpp
@@ -31,23 +31,144 @@
#ifdef PULSEAUDIO_ENABLED
-#include <pulse/error.h>
+#include <pulse/pulseaudio.h>
#include "os/os.h"
#include "project_settings.h"
+void pa_state_cb(pa_context *c, void *userdata) {
+ pa_context_state_t state;
+ int *pa_ready = (int *)userdata;
+
+ state = pa_context_get_state(c);
+ switch (state) {
+ case PA_CONTEXT_FAILED:
+ case PA_CONTEXT_TERMINATED:
+ *pa_ready = 2;
+ break;
+
+ case PA_CONTEXT_READY:
+ *pa_ready = 1;
+ break;
+ }
+}
+
+void sink_info_cb(pa_context *c, const pa_sink_info *l, int eol, void *userdata) {
+ unsigned int *channels = (unsigned int *)userdata;
+
+ // If eol is set to a positive number, you're at the end of the list
+ if (eol > 0) {
+ return;
+ }
+
+ *channels = l->channel_map.channels;
+}
+
+void server_info_cb(pa_context *c, const pa_server_info *i, void *userdata) {
+ char *default_output = (char *)userdata;
+
+ strncpy(default_output, i->default_sink_name, 1024);
+}
+
+static unsigned int detect_channels() {
+
+ pa_mainloop *pa_ml;
+ pa_mainloop_api *pa_mlapi;
+ pa_operation *pa_op;
+ pa_context *pa_ctx;
+
+ int state = 0;
+ int pa_ready = 0;
+
+ char default_output[1024];
+ unsigned int channels = 2;
+
+ pa_ml = pa_mainloop_new();
+ pa_mlapi = pa_mainloop_get_api(pa_ml);
+ pa_ctx = pa_context_new(pa_mlapi, "Godot");
+
+ int ret = pa_context_connect(pa_ctx, NULL, PA_CONTEXT_NOFLAGS, NULL);
+ if (ret < 0) {
+ pa_context_unref(pa_ctx);
+ pa_mainloop_free(pa_ml);
+
+ return 2;
+ }
+
+ pa_context_set_state_callback(pa_ctx, pa_state_cb, &pa_ready);
+
+ // Wait until the pa server is ready
+ while (pa_ready == 0) {
+ pa_mainloop_iterate(pa_ml, 1, NULL);
+ }
+
+ // Check if there was an error connecting to the pa server
+ if (pa_ready == 2) {
+ pa_context_disconnect(pa_ctx);
+ pa_context_unref(pa_ctx);
+ pa_mainloop_free(pa_ml);
+
+ return 2;
+ }
+
+ // Get the default output device name
+ pa_op = pa_context_get_server_info(pa_ctx, &server_info_cb, (void *)default_output);
+ if (pa_op) {
+ while (pa_operation_get_state(pa_op) == PA_OPERATION_RUNNING) {
+ ret = pa_mainloop_iterate(pa_ml, 1, NULL);
+ if (ret < 0) {
+ ERR_PRINT("pa_mainloop_iterate error");
+ }
+ }
+
+ pa_operation_unref(pa_op);
+
+ // Now using the device name get the amount of channels
+ pa_op = pa_context_get_sink_info_by_name(pa_ctx, default_output, &sink_info_cb, (void *)&channels);
+ if (pa_op) {
+ while (pa_operation_get_state(pa_op) == PA_OPERATION_RUNNING) {
+ ret = pa_mainloop_iterate(pa_ml, 1, NULL);
+ if (ret < 0) {
+ ERR_PRINT("pa_mainloop_iterate error");
+ }
+ }
+
+ pa_operation_unref(pa_op);
+ } else {
+ ERR_PRINT("pa_context_get_sink_info_by_name error");
+ }
+ } else {
+ ERR_PRINT("pa_context_get_server_info error");
+ }
+
+ pa_context_disconnect(pa_ctx);
+ pa_context_unref(pa_ctx);
+ pa_mainloop_free(pa_ml);
+
+ return channels;
+}
+
Error AudioDriverPulseAudio::init() {
active = false;
thread_exited = false;
exit_thread = false;
- pcm_open = false;
- samples_in = NULL;
- samples_out = NULL;
mix_rate = GLOBAL_DEF("audio/mix_rate", DEFAULT_MIX_RATE);
- speaker_mode = SPEAKER_MODE_STEREO;
- channels = 2;
+ channels = detect_channels();
+
+ switch (channels) {
+ case 2: // Stereo
+ case 4: // Surround 3.1
+ case 6: // Surround 5.1
+ case 8: // Surround 7.1
+ break;
+
+ default:
+ ERR_PRINTS("PulseAudio: Unsupported number of channels: " + itos(channels));
+ ERR_FAIL_V(ERR_CANT_OPEN);
+ break;
+ }
pa_sample_spec spec;
spec.format = PA_SAMPLE_S16LE;
@@ -59,7 +180,8 @@ Error AudioDriverPulseAudio::init() {
buffer_size = buffer_frames * channels;
if (OS::get_singleton()->is_stdout_verbose()) {
- print_line("audio buffer frames: " + itos(buffer_frames) + " calculated latency: " + itos(buffer_frames * 1000 / mix_rate) + "ms");
+ print_line("PulseAudio: detected " + itos(channels) + " channels");
+ print_line("PulseAudio: audio buffer frames: " + itos(buffer_frames) + " calculated latency: " + itos(buffer_frames * 1000 / mix_rate) + "ms");
}
pa_buffer_attr attr;
@@ -86,8 +208,8 @@ Error AudioDriverPulseAudio::init() {
ERR_FAIL_COND_V(pulse == NULL, ERR_CANT_OPEN);
}
- samples_in = memnew_arr(int32_t, buffer_size);
- samples_out = memnew_arr(int16_t, buffer_size);
+ samples_in.resize(buffer_size);
+ samples_out.resize(buffer_size);
mutex = Mutex::create();
thread = Thread::create(AudioDriverPulseAudio::thread_func, this);
@@ -119,7 +241,7 @@ void AudioDriverPulseAudio::thread_func(void *p_udata) {
} else {
ad->lock();
- ad->audio_server_process(ad->buffer_frames, ad->samples_in);
+ ad->audio_server_process(ad->buffer_frames, ad->samples_in.ptr());
ad->unlock();
@@ -132,7 +254,7 @@ void AudioDriverPulseAudio::thread_func(void *p_udata) {
int error_code;
int byte_size = ad->buffer_size * sizeof(int16_t);
- if (pa_simple_write(ad->pulse, ad->samples_out, byte_size, &error_code) < 0) {
+ if (pa_simple_write(ad->pulse, ad->samples_out.ptr(), byte_size, &error_code) < 0) {
// can't recover here
fprintf(stderr, "PulseAudio failed and can't recover: %s\n", pa_strerror(error_code));
ad->active = false;
@@ -156,7 +278,7 @@ int AudioDriverPulseAudio::get_mix_rate() const {
AudioDriver::SpeakerMode AudioDriverPulseAudio::get_speaker_mode() const {
- return speaker_mode;
+ return get_speaker_mode_by_total_channels(channels);
}
void AudioDriverPulseAudio::lock() {
@@ -186,16 +308,6 @@ void AudioDriverPulseAudio::finish() {
pulse = NULL;
}
- if (samples_in) {
- memdelete_arr(samples_in);
- samples_in = NULL;
- }
-
- if (samples_out) {
- memdelete_arr(samples_out);
- samples_out = NULL;
- }
-
memdelete(thread);
if (mutex) {
memdelete(mutex);
@@ -207,11 +319,21 @@ void AudioDriverPulseAudio::finish() {
AudioDriverPulseAudio::AudioDriverPulseAudio() {
- samples_in = NULL;
- samples_out = NULL;
mutex = NULL;
thread = NULL;
pulse = NULL;
+
+ samples_in.clear();
+ samples_out.clear();
+
+ mix_rate = 0;
+ buffer_size = 0;
+ channels = 0;
+
+ active = false;
+ thread_exited = false;
+ exit_thread = false;
+
latency = 0;
buffer_frames = 0;
buffer_size = 0;
diff --git a/drivers/pulseaudio/audio_driver_pulseaudio.h b/drivers/pulseaudio/audio_driver_pulseaudio.h
index 9ae0b7e50c..3ede684496 100644
--- a/drivers/pulseaudio/audio_driver_pulseaudio.h
+++ b/drivers/pulseaudio/audio_driver_pulseaudio.h
@@ -43,14 +43,10 @@ class AudioDriverPulseAudio : public AudioDriver {
pa_simple *pulse;
- int32_t *samples_in;
- int16_t *samples_out;
-
- static void thread_func(void *p_udata);
+ Vector<int32_t> samples_in;
+ Vector<int16_t> samples_out;
unsigned int mix_rate;
- SpeakerMode speaker_mode;
-
unsigned int buffer_frames;
unsigned int buffer_size;
int channels;
@@ -58,10 +54,11 @@ class AudioDriverPulseAudio : public AudioDriver {
bool active;
bool thread_exited;
mutable bool exit_thread;
- bool pcm_open;
float latency;
+ static void thread_func(void *p_udata);
+
public:
const char *get_name() const {
return "PulseAudio";
diff --git a/drivers/wasapi/audio_driver_wasapi.cpp b/drivers/wasapi/audio_driver_wasapi.cpp
index 29d1e5deed..eb86491dec 100644
--- a/drivers/wasapi/audio_driver_wasapi.cpp
+++ b/drivers/wasapi/audio_driver_wasapi.cpp
@@ -67,12 +67,13 @@ Error AudioDriverWASAPI::init_device() {
switch (channels) {
case 2: // Stereo
+ case 4: // Surround 3.1
case 6: // Surround 5.1
case 8: // Surround 7.1
break;
default:
- ERR_PRINT("WASAPI: Unsupported number of channels");
+ ERR_PRINTS("WASAPI: Unsupported number of channels: " + itos(channels));
ERR_FAIL_V(ERR_CANT_OPEN);
break;
}
@@ -119,7 +120,8 @@ Error AudioDriverWASAPI::init_device() {
samples_in.resize(buffer_size);
if (OS::get_singleton()->is_stdout_verbose()) {
- print_line("audio buffer frames: " + itos(buffer_frames) + " calculated latency: " + itos(buffer_frames * 1000 / mix_rate) + "ms");
+ print_line("WASAPI: detected " + itos(channels) + " channels");
+ print_line("WASAPI: audio buffer frames: " + itos(buffer_frames) + " calculated latency: " + itos(buffer_frames * 1000 / mix_rate) + "ms");
}
return OK;
@@ -185,7 +187,7 @@ int AudioDriverWASAPI::get_mix_rate() const {
AudioDriver::SpeakerMode AudioDriverWASAPI::get_speaker_mode() const {
- return SPEAKER_MODE_STEREO;
+ return get_speaker_mode_by_total_channels(channels);
}
void AudioDriverWASAPI::thread_func(void *p_udata) {
diff --git a/editor/editor_audio_buses.cpp b/editor/editor_audio_buses.cpp
index 29159c7ad5..a36faeb0de 100644
--- a/editor/editor_audio_buses.cpp
+++ b/editor/editor_audio_buses.cpp
@@ -39,10 +39,13 @@ void EditorAudioBus::_notification(int p_what) {
if (p_what == NOTIFICATION_READY) {
- vu_l->set_under_texture(get_icon("BusVuEmpty", "EditorIcons"));
- vu_l->set_progress_texture(get_icon("BusVuFull", "EditorIcons"));
- vu_r->set_under_texture(get_icon("BusVuEmpty", "EditorIcons"));
- vu_r->set_progress_texture(get_icon("BusVuFull", "EditorIcons"));
+ for (int i = 0; i < cc; i++) {
+ channel[i].vu_l->set_under_texture(get_icon("BusVuEmpty", "EditorIcons"));
+ channel[i].vu_l->set_progress_texture(get_icon("BusVuFull", "EditorIcons"));
+ channel[i].vu_r->set_under_texture(get_icon("BusVuEmpty", "EditorIcons"));
+ channel[i].vu_r->set_progress_texture(get_icon("BusVuFull", "EditorIcons"));
+ channel[i].prev_active = true;
+ }
scale->set_texture(get_icon("BusVuDb", "EditorIcons"));
disabled_vu = get_icon("BusVuFrozen", "EditorIcons");
@@ -53,7 +56,6 @@ void EditorAudioBus::_notification(int p_what) {
bus_options->set_icon(get_icon("GuiMiniTabMenu", "EditorIcons"));
- prev_active = true;
update_bus();
set_process(true);
}
@@ -67,60 +69,52 @@ void EditorAudioBus::_notification(int p_what) {
if (p_what == NOTIFICATION_PROCESS) {
- float real_peak[2] = { -100, -100 };
- bool activity_found = false;
-
- int cc = 0;
- switch (AudioServer::get_singleton()->get_speaker_mode()) {
- case AudioServer::SPEAKER_MODE_STEREO: cc = 1; break;
- case AudioServer::SPEAKER_SURROUND_51: cc = 4; break;
- case AudioServer::SPEAKER_SURROUND_71: cc = 5; break;
- default:
- ERR_PRINT("Unknown speaker_mode");
- break;
- }
-
for (int i = 0; i < cc; i++) {
+ float real_peak[2] = { -100, -100 };
+ bool activity_found = false;
+
if (AudioServer::get_singleton()->is_bus_channel_active(get_index(), i)) {
activity_found = true;
real_peak[0] = MAX(real_peak[0], AudioServer::get_singleton()->get_bus_peak_volume_left_db(get_index(), i));
real_peak[1] = MAX(real_peak[1], AudioServer::get_singleton()->get_bus_peak_volume_right_db(get_index(), i));
}
- }
- if (real_peak[0] > peak_l) {
- peak_l = real_peak[0];
- } else {
- peak_l -= get_process_delta_time() * 60.0;
- }
-
- if (real_peak[1] > peak_r) {
- peak_r = real_peak[1];
- } else {
- peak_r -= get_process_delta_time() * 60.0;
- }
-
- vu_l->set_value(peak_l);
- vu_r->set_value(peak_r);
+ if (real_peak[0] > channel[i].peak_l) {
+ channel[i].peak_l = real_peak[0];
+ } else {
+ channel[i].peak_l -= get_process_delta_time() * 60.0;
+ }
- if (activity_found != prev_active) {
- if (activity_found) {
- vu_l->set_over_texture(Ref<Texture>());
- vu_r->set_over_texture(Ref<Texture>());
+ if (real_peak[1] > channel[i].peak_r) {
+ channel[i].peak_r = real_peak[1];
} else {
- vu_l->set_over_texture(disabled_vu);
- vu_r->set_over_texture(disabled_vu);
+ channel[i].peak_r -= get_process_delta_time() * 60.0;
}
- prev_active = activity_found;
+ channel[i].vu_l->set_value(channel[i].peak_l);
+ channel[i].vu_r->set_value(channel[i].peak_r);
+
+ if (activity_found != channel[i].prev_active) {
+ if (activity_found) {
+ channel[i].vu_l->set_over_texture(Ref<Texture>());
+ channel[i].vu_r->set_over_texture(Ref<Texture>());
+ } else {
+ channel[i].vu_l->set_over_texture(disabled_vu);
+ channel[i].vu_r->set_over_texture(disabled_vu);
+ }
+
+ channel[i].prev_active = activity_found;
+ }
}
}
if (p_what == NOTIFICATION_VISIBILITY_CHANGED) {
- peak_l = -100;
- peak_r = -100;
- prev_active = true;
+ for (int i = 0; i < 4; i++) {
+ channel[i].peak_l = -100;
+ channel[i].peak_r = -100;
+ channel[i].prev_active = true;
+ }
set_process(is_visible_in_tree());
}
@@ -683,19 +677,24 @@ EditorAudioBus::EditorAudioBus(EditorAudioBuses *p_buses) {
slider->connect("value_changed", this, "_volume_db_changed");
hb->add_child(slider);
- vu_l = memnew(TextureProgress);
- vu_l->set_fill_mode(TextureProgress::FILL_BOTTOM_TO_TOP);
- hb->add_child(vu_l);
- vu_l->set_min(-80);
- vu_l->set_max(24);
- vu_l->set_step(0.1);
-
- vu_r = memnew(TextureProgress);
- vu_r->set_fill_mode(TextureProgress::FILL_BOTTOM_TO_TOP);
- hb->add_child(vu_r);
- vu_r->set_min(-80);
- vu_r->set_max(24);
- vu_r->set_step(0.1);
+
+ cc = AudioServer::get_singleton()->get_channel_count();
+
+ for (int i = 0; i < cc; i++) {
+ channel[i].vu_l = memnew(TextureProgress);
+ channel[i].vu_l->set_fill_mode(TextureProgress::FILL_BOTTOM_TO_TOP);
+ hb->add_child(channel[i].vu_l);
+ channel[i].vu_l->set_min(-80);
+ channel[i].vu_l->set_max(24);
+ channel[i].vu_l->set_step(0.1);
+
+ channel[i].vu_r = memnew(TextureProgress);
+ channel[i].vu_r->set_fill_mode(TextureProgress::FILL_BOTTOM_TO_TOP);
+ hb->add_child(channel[i].vu_r);
+ channel[i].vu_r->set_min(-80);
+ channel[i].vu_r->set_max(24);
+ channel[i].vu_r->set_step(0.1);
+ }
scale = memnew(TextureRect);
hb->add_child(scale);
diff --git a/editor/editor_audio_buses.h b/editor/editor_audio_buses.h
index 2c61228bde..dba1b73295 100644
--- a/editor/editor_audio_buses.h
+++ b/editor/editor_audio_buses.h
@@ -52,16 +52,23 @@ class EditorAudioBus : public PanelContainer {
GDCLASS(EditorAudioBus, PanelContainer)
- bool prev_active;
- float peak_l;
- float peak_r;
-
Ref<Texture> disabled_vu;
LineEdit *track_name;
MenuButton *bus_options;
VSlider *slider;
- TextureProgress *vu_l;
- TextureProgress *vu_r;
+
+ int cc;
+
+ struct {
+ bool prev_active;
+
+ float peak_l;
+ float peak_r;
+
+ TextureProgress *vu_l;
+ TextureProgress *vu_r;
+ } channel[4];
+
TextureRect *scale;
OptionButton *send;
diff --git a/platform/osx/audio_driver_osx.cpp b/platform/osx/audio_driver_osx.cpp
index 78c52af201..3b3ba60507 100644
--- a/platform/osx/audio_driver_osx.cpp
+++ b/platform/osx/audio_driver_osx.cpp
@@ -58,14 +58,14 @@ Error AudioDriverOSX::initDevice() {
AudioStreamBasicDescription strdesc;
- // TODO: Implement this
- /*zeromem(&strdesc, sizeof(strdesc));
+ zeromem(&strdesc, sizeof(strdesc));
UInt32 size = sizeof(strdesc);
result = AudioUnitGetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kOutputBus, &strdesc, &size);
ERR_FAIL_COND_V(result != noErr, FAILED);
switch (strdesc.mChannelsPerFrame) {
case 2: // Stereo
+ case 4: // Surround 3.1
case 6: // Surround 5.1
case 8: // Surround 7.1
channels = strdesc.mChannelsPerFrame;
@@ -75,7 +75,7 @@ Error AudioDriverOSX::initDevice() {
// Unknown number of channels, default to stereo
channels = 2;
break;
- }*/
+ }
mix_rate = GLOBAL_DEF("audio/mix_rate", DEFAULT_MIX_RATE);
@@ -103,7 +103,8 @@ Error AudioDriverOSX::initDevice() {
samples_in.resize(buffer_size);
if (OS::get_singleton()->is_stdout_verbose()) {
- print_line("audio buffer frames: " + itos(buffer_frames) + " calculated latency: " + itos(buffer_frames * 1000 / mix_rate) + "ms");
+ print_line("CoreAudio: detected " + itos(channels) + " channels");
+ print_line("CoreAudio: audio buffer frames: " + itos(buffer_frames) + " calculated latency: " + itos(buffer_frames * 1000 / mix_rate) + "ms");
}
AURenderCallbackStruct callback;
@@ -242,7 +243,7 @@ int AudioDriverOSX::get_mix_rate() const {
};
AudioDriver::SpeakerMode AudioDriverOSX::get_speaker_mode() const {
- return SPEAKER_MODE_STEREO;
+ return get_speaker_mode_by_total_channels(channels);
};
void AudioDriverOSX::lock() {
diff --git a/scene/2d/audio_stream_player_2d.cpp b/scene/2d/audio_stream_player_2d.cpp
index d2a8e3315a..1120aa0366 100644
--- a/scene/2d/audio_stream_player_2d.cpp
+++ b/scene/2d/audio_stream_player_2d.cpp
@@ -57,52 +57,32 @@ void AudioStreamPlayer2D::_mix_audio() {
AudioFrame vol_inc = (current.vol - prev_outputs[i].vol) / float(buffer_size);
AudioFrame vol = current.vol;
- switch (AudioServer::get_singleton()->get_speaker_mode()) {
+ int cc = AudioServer::get_singleton()->get_channel_count();
- case AudioServer::SPEAKER_MODE_STEREO: {
- AudioFrame *target = AudioServer::get_singleton()->thread_get_channel_mix_buffer(current.bus_index, 0);
+ if (cc == 1) {
+ AudioFrame *target = AudioServer::get_singleton()->thread_get_channel_mix_buffer(current.bus_index, 0);
- for (int j = 0; j < buffer_size; j++) {
+ for (int j = 0; j < buffer_size; j++) {
- target[j] += buffer[j] * vol;
- vol += vol_inc;
- }
-
- } break;
- case AudioServer::SPEAKER_SURROUND_51: {
-
- AudioFrame *targets[2] = {
- AudioServer::get_singleton()->thread_get_channel_mix_buffer(current.bus_index, 1),
- AudioServer::get_singleton()->thread_get_channel_mix_buffer(current.bus_index, 2),
- };
-
- for (int j = 0; j < buffer_size; j++) {
-
- AudioFrame frame = buffer[j] * vol;
- targets[0][j] += frame;
- targets[1][j] += frame;
- vol += vol_inc;
- }
+ target[j] += buffer[j] * vol;
+ vol += vol_inc;
+ }
- } break;
- case AudioServer::SPEAKER_SURROUND_71: {
+ } else {
+ AudioFrame *targets[4];
- AudioFrame *targets[3] = {
- AudioServer::get_singleton()->thread_get_channel_mix_buffer(current.bus_index, 1),
- AudioServer::get_singleton()->thread_get_channel_mix_buffer(current.bus_index, 2),
- AudioServer::get_singleton()->thread_get_channel_mix_buffer(current.bus_index, 3)
- };
+ for (int k = 0; k < cc; k++) {
+ targets[k] = AudioServer::get_singleton()->thread_get_channel_mix_buffer(current.bus_index, k);
+ }
- for (int j = 0; j < buffer_size; j++) {
+ for (int j = 0; j < buffer_size; j++) {
- AudioFrame frame = buffer[j] * vol;
- targets[0][j] += frame;
- targets[1][j] += frame;
- targets[2][j] += frame;
- vol += vol_inc;
+ AudioFrame frame = buffer[j] * vol;
+ for (int k = 0; k < cc; k++) {
+ targets[k][j] += frame;
}
-
- } break;
+ vol += vol_inc;
+ }
}
prev_outputs[i] = current;
diff --git a/scene/3d/audio_stream_player_3d.cpp b/scene/3d/audio_stream_player_3d.cpp
index 2073ebf94e..50c9321b83 100644
--- a/scene/3d/audio_stream_player_3d.cpp
+++ b/scene/3d/audio_stream_player_3d.cpp
@@ -72,34 +72,13 @@ void AudioStreamPlayer3D::_mix_audio() {
//mix!
- int buffers = 0;
- int first = 0;
-
- switch (AudioServer::get_singleton()->get_speaker_mode()) {
-
- case AudioServer::SPEAKER_MODE_STEREO: {
- buffers = 1;
- first = 0;
-
- } break;
- case AudioServer::SPEAKER_SURROUND_51: {
- buffers = 2;
- first = 1;
-
- } break;
- case AudioServer::SPEAKER_SURROUND_71: {
-
- buffers = 3;
- first = 1;
-
- } break;
- }
+ int buffers = AudioServer::get_singleton()->get_channel_count();
for (int k = 0; k < buffers; k++) {
AudioFrame vol_inc = (current.vol[k] - prev_outputs[i].vol[k]) / float(buffer_size);
AudioFrame vol = current.vol[k];
- AudioFrame *target = AudioServer::get_singleton()->thread_get_channel_mix_buffer(current.bus_index, first + k);
+ AudioFrame *target = AudioServer::get_singleton()->thread_get_channel_mix_buffer(current.bus_index, k);
current.filter.set_mode(AudioFilterSW::HIGHSHELF);
current.filter.set_sampling_rate(AudioServer::get_singleton()->get_mix_rate());
@@ -146,7 +125,7 @@ void AudioStreamPlayer3D::_mix_audio() {
if (current.reverb_bus_index >= 0) {
- AudioFrame *rtarget = AudioServer::get_singleton()->thread_get_channel_mix_buffer(current.reverb_bus_index, first + k);
+ AudioFrame *rtarget = AudioServer::get_singleton()->thread_get_channel_mix_buffer(current.reverb_bus_index, k);
if (current.reverb_bus_index == prev_outputs[i].reverb_bus_index) {
AudioFrame rvol_inc = (current.reverb_vol[k] - prev_outputs[i].reverb_vol[k]) / float(buffer_size);
@@ -341,49 +320,57 @@ void AudioStreamPlayer3D::_notification(int p_what) {
flat_pos.y = 0;
flat_pos.normalize();
- switch (AudioServer::get_singleton()->get_speaker_mode()) {
+ unsigned int cc = AudioServer::get_singleton()->get_channel_count();
+ if (cc == 1) {
+ // Stereo pair
+ float c = flat_pos.x * 0.5 + 0.5;
- case AudioServer::SPEAKER_MODE_STEREO: {
-
- float c = flat_pos.x * 0.5 + 0.5;
- output.vol[0].l = 1.0 - c;
- output.vol[0].r = c;
-
- output.vol[0] *= multiplier;
+ output.vol[0].l = 1.0 - c;
+ output.vol[0].r = c;
+ } else {
+ Vector3 camtopos = global_pos - camera->get_global_transform().origin;
+ float c = camtopos.normalized().dot(get_global_transform().basis.get_axis(2).normalized()); //it's z negative
+ float angle = Math::rad2deg(Math::acos(c));
+ float av = angle * (flat_pos.x < 0 ? -1 : 1) / 180.0;
- } break;
- case AudioServer::SPEAKER_SURROUND_51: {
+ if (cc >= 1) {
+ // Stereo pair
+ float fl = Math::abs(1.0 - Math::abs(-0.8 - av));
+ float fr = Math::abs(1.0 - Math::abs(0.8 - av));
- float xl = Vector3(-1, 0, -1).normalized().dot(flat_pos) * 0.5 + 0.5;
- float xr = Vector3(1, 0, -1).normalized().dot(flat_pos) * 0.5 + 0.5;
+ output.vol[0].l = fl;
+ output.vol[0].r = fr;
+ }
- output.vol[0].l = xl;
- output.vol[1].r = 1.0 - xl;
- output.vol[0].r = xr;
- output.vol[1].l = 1.0 - xr;
+ if (cc >= 2) {
+ // Center pair
+ float center = 1.0 - Math::sin(Math::acos(c));
- output.vol[0] *= multiplier;
- output.vol[1] *= multiplier;
- } break;
- case AudioServer::SPEAKER_SURROUND_71: {
+ output.vol[1].l = center;
+ output.vol[1].r = center;
+ }
- float xl = Vector3(-1, 0, -1).normalized().dot(flat_pos) * 0.5 + 0.5;
- float xr = Vector3(1, 0, -1).normalized().dot(flat_pos) * 0.5 + 0.5;
+ if (cc >= 3) {
+ // Side pair
+ float sl = Math::abs(1.0 - Math::abs(-0.4 - av));
+ float sr = Math::abs(1.0 - Math::abs(0.4 - av));
- output.vol[0].l = xl;
- output.vol[1].r = 1.0 - xl;
- output.vol[0].r = xr;
- output.vol[1].l = 1.0 - xr;
+ output.vol[2].l = sl;
+ output.vol[2].r = sr;
+ }
- float c = flat_pos.x * 0.5 + 0.5;
- output.vol[2].l = 1.0 - c;
- output.vol[2].r = c;
+ if (cc >= 4) {
+ // Rear pair
+ float rl = Math::abs(1.0 - Math::abs(-0.2 - av));
+ float rr = Math::abs(1.0 - Math::abs(0.2 - av));
- output.vol[0] *= multiplier;
- output.vol[1] *= multiplier;
- output.vol[2] *= multiplier;
+ output.vol[3].l = rl;
+ output.vol[3].r = rr;
+ }
+ }
- } break;
+ for (int k = 0; k < cc; k++) {
+ output.vol[k] *= multiplier;
}
bool filled_reverb = false;
@@ -422,41 +409,30 @@ void AudioStreamPlayer3D::_notification(int p_what) {
rev_pos.y = 0;
rev_pos.normalize();
- switch (AudioServer::get_singleton()->get_speaker_mode()) {
-
- case AudioServer::SPEAKER_MODE_STEREO: {
-
- float c = rev_pos.x * 0.5 + 0.5;
- output.reverb_vol[0].l = 1.0 - c;
- output.reverb_vol[0].r = c;
-
- } break;
- case AudioServer::SPEAKER_SURROUND_51: {
-
- float xl = Vector3(-1, 0, -1).normalized().dot(rev_pos) * 0.5 + 0.5;
- float xr = Vector3(1, 0, -1).normalized().dot(rev_pos) * 0.5 + 0.5;
-
- output.reverb_vol[0].l = xl;
- output.reverb_vol[1].r = 1.0 - xl;
- output.reverb_vol[0].r = xr;
- output.reverb_vol[1].l = 1.0 - xr;
-
- } break;
- case AudioServer::SPEAKER_SURROUND_71: {
-
- float xl = Vector3(-1, 0, -1).normalized().dot(rev_pos) * 0.5 + 0.5;
- float xr = Vector3(1, 0, -1).normalized().dot(rev_pos) * 0.5 + 0.5;
+ if (cc >= 1) {
+ // Stereo pair
+ float c = rev_pos.x * 0.5 + 0.5;
+ output.reverb_vol[0].l = 1.0 - c;
+ output.reverb_vol[0].r = c;
+ }
- output.reverb_vol[0].l = xl;
- output.reverb_vol[1].r = 1.0 - xl;
- output.reverb_vol[0].r = xr;
- output.reverb_vol[1].l = 1.0 - xr;
+ if (cc >= 3) {
+ // Center pair + Side pair
+ float xl = Vector3(-1, 0, -1).normalized().dot(rev_pos) * 0.5 + 0.5;
+ float xr = Vector3(1, 0, -1).normalized().dot(rev_pos) * 0.5 + 0.5;
- float c = rev_pos.x * 0.5 + 0.5;
- output.reverb_vol[2].l = 1.0 - c;
- output.reverb_vol[2].r = c;
+ output.reverb_vol[1].l = xl;
+ output.reverb_vol[1].r = xr;
+ output.reverb_vol[2].l = 1.0 - xr;
+ output.reverb_vol[2].r = 1.0 - xl;
+ }
- } break;
+ if (cc >= 4) {
+ // Rear pair
+ // FIXME: Not sure what math should be done here
+ float c = rev_pos.x * 0.5 + 0.5;
+ output.reverb_vol[3].l = 1.0 - c;
+ output.reverb_vol[3].r = c;
}
for (int i = 0; i < vol_index_max; i++) {
diff --git a/scene/audio/audio_player.cpp b/scene/audio/audio_player.cpp
index 341ae45ce8..fb12867156 100644
--- a/scene/audio/audio_player.cpp
+++ b/scene/audio/audio_player.cpp
@@ -66,29 +66,27 @@ void AudioStreamPlayer::_mix_audio() {
//set volume for next mix
mix_volume_db = volume_db;
- AudioFrame *targets[3] = { NULL, NULL, NULL };
+ AudioFrame *targets[4] = { NULL, NULL, NULL, NULL };
if (AudioServer::get_singleton()->get_speaker_mode() == AudioServer::SPEAKER_MODE_STEREO) {
targets[0] = AudioServer::get_singleton()->thread_get_channel_mix_buffer(bus_index, 0);
} else {
switch (mix_target) {
case MIX_TARGET_STEREO: {
- targets[0] = AudioServer::get_singleton()->thread_get_channel_mix_buffer(bus_index, 1);
+ targets[0] = AudioServer::get_singleton()->thread_get_channel_mix_buffer(bus_index, 0);
} break;
case MIX_TARGET_SURROUND: {
- targets[0] = AudioServer::get_singleton()->thread_get_channel_mix_buffer(bus_index, 1);
- targets[1] = AudioServer::get_singleton()->thread_get_channel_mix_buffer(bus_index, 2);
- if (AudioServer::get_singleton()->get_speaker_mode() == AudioServer::SPEAKER_SURROUND_71) {
- targets[2] = AudioServer::get_singleton()->thread_get_channel_mix_buffer(bus_index, 3);
+ for (int i = 0; i < AudioServer::get_singleton()->get_channel_count(); i++) {
+ targets[i] = AudioServer::get_singleton()->thread_get_channel_mix_buffer(bus_index, i);
}
} break;
case MIX_TARGET_CENTER: {
- targets[0] = AudioServer::get_singleton()->thread_get_channel_mix_buffer(bus_index, 0);
+ targets[0] = AudioServer::get_singleton()->thread_get_channel_mix_buffer(bus_index, 1);
} break;
}
}
- for (int c = 0; c < 3; c++) {
+ for (int c = 0; c < 4; c++) {
if (!targets[c])
break;
for (int i = 0; i < buffer_size; i++) {
diff --git a/servers/audio_server.cpp b/servers/audio_server.cpp
index 29014a7ced..ed62e65846 100644
--- a/servers/audio_server.cpp
+++ b/servers/audio_server.cpp
@@ -77,6 +77,28 @@ double AudioDriver::get_mix_time() const {
return total;
}
+AudioDriver::SpeakerMode AudioDriver::get_speaker_mode_by_total_channels(int p_channels) const {
+ switch (p_channels) {
+ case 4: return SPEAKER_SURROUND_31;
+ case 6: return SPEAKER_SURROUND_51;
+ case 8: return SPEAKER_SURROUND_71;
+ }
+
+ // Default to STEREO
+ return SPEAKER_MODE_STEREO;
+}
+
+int AudioDriver::get_total_channels_by_speaker_mode(AudioDriver::SpeakerMode p_mode) const {
+ switch (p_mode) {
+ case SPEAKER_MODE_STEREO: return 2;
+ case SPEAKER_SURROUND_31: return 4;
+ case SPEAKER_SURROUND_51: return 6;
+ case SPEAKER_SURROUND_71: return 8;
+ }
+
+ ERR_FAIL_V(2);
+}
+
AudioDriver::AudioDriver() {
_last_mix_time = 0;
@@ -424,8 +446,8 @@ void AudioServer::set_bus_count(int p_count) {
}
buses[i] = memnew(Bus);
- buses[i]->channels.resize(_get_channel_count());
- for (int j = 0; j < _get_channel_count(); j++) {
+ buses[i]->channels.resize(get_channel_count());
+ for (int j = 0; j < get_channel_count(); j++) {
buses[i]->channels[j].buffer.resize(buffer_size);
}
buses[i]->name = attempt;
@@ -494,8 +516,8 @@ void AudioServer::add_bus(int p_at_pos) {
}
Bus *bus = memnew(Bus);
- bus->channels.resize(_get_channel_count());
- for (int j = 0; j < _get_channel_count(); j++) {
+ bus->channels.resize(get_channel_count());
+ for (int j = 0; j < get_channel_count(); j++) {
bus->channels[j].buffer.resize(buffer_size);
}
bus->name = attempt;
@@ -798,17 +820,8 @@ void AudioServer::init() {
channel_disable_threshold_db = GLOBAL_DEF("audio/channel_disable_threshold_db", -60.0);
channel_disable_frames = float(GLOBAL_DEF("audio/channel_disable_time", 2.0)) * get_mix_rate();
buffer_size = 1024; //harcoded for now
- switch (get_speaker_mode()) {
- case SPEAKER_MODE_STEREO: {
- temp_buffer.resize(1);
- } break;
- case SPEAKER_SURROUND_51: {
- temp_buffer.resize(3);
- } break;
- case SPEAKER_SURROUND_71: {
- temp_buffer.resize(4);
- } break;
- }
+
+ temp_buffer.resize(get_channel_count());
for (int i = 0; i < temp_buffer.size(); i++) {
temp_buffer[i].resize(buffer_size);
@@ -816,11 +829,11 @@ void AudioServer::init() {
mix_count = 0;
set_bus_count(1);
- ;
set_bus_name(0, "Master");
if (AudioDriver::get_singleton())
AudioDriver::get_singleton()->start();
+
#ifdef TOOLS_ENABLED
set_edited(false); //avoid editors from thinking this was edited
#endif
@@ -992,8 +1005,8 @@ void AudioServer::set_bus_layout(const Ref<AudioBusLayout> &p_bus_layout) {
bus_map[bus->name] = bus;
buses[i] = bus;
- buses[i]->channels.resize(_get_channel_count());
- for (int j = 0; j < _get_channel_count(); j++) {
+ buses[i]->channels.resize(get_channel_count());
+ for (int j = 0; j < get_channel_count(); j++) {
buses[i]->channels[j].buffer.resize(buffer_size);
}
_update_bus_effects(i);
diff --git a/servers/audio_server.h b/servers/audio_server.h
index 13a74856c8..c479d09a3c 100644
--- a/servers/audio_server.h
+++ b/servers/audio_server.h
@@ -50,6 +50,7 @@ public:
enum SpeakerMode {
SPEAKER_MODE_STEREO,
+ SPEAKER_SURROUND_31,
SPEAKER_SURROUND_51,
SPEAKER_SURROUND_71,
};
@@ -72,6 +73,9 @@ public:
virtual float get_latency() { return 0; }
+ SpeakerMode get_speaker_mode_by_total_channels(int p_channels) const;
+ int get_total_channels_by_speaker_mode(SpeakerMode) const;
+
AudioDriver();
virtual ~AudioDriver() {}
};
@@ -101,6 +105,7 @@ public:
//re-expose this her, as AudioDriver is not exposed to script
enum SpeakerMode {
SPEAKER_MODE_STEREO,
+ SPEAKER_SURROUND_31,
SPEAKER_SURROUND_51,
SPEAKER_SURROUND_71,
};
@@ -163,15 +168,6 @@ private:
Vector<Bus *> buses;
Map<StringName, Bus *> bus_map;
- _FORCE_INLINE_ int _get_channel_count() const {
- switch (AudioDriver::get_singleton()->get_speaker_mode()) {
- case AudioDriver::SPEAKER_MODE_STEREO: return 1;
- case AudioDriver::SPEAKER_SURROUND_51: return 3;
- case AudioDriver::SPEAKER_SURROUND_71: return 4;
- }
- ERR_FAIL_V(1);
- }
-
void _update_bus_effects(int p_bus);
static AudioServer *singleton;
@@ -205,6 +201,16 @@ protected:
static void _bind_methods();
public:
+ _FORCE_INLINE_ int get_channel_count() const {
+ switch (get_speaker_mode()) {
+ case SPEAKER_MODE_STEREO: return 1;
+ case SPEAKER_SURROUND_31: return 2;
+ case SPEAKER_SURROUND_51: return 3;
+ case SPEAKER_SURROUND_71: return 4;
+ }
+ ERR_FAIL_V(1);
+ }
+
//do not use from outside audio thread
AudioFrame *thread_get_channel_mix_buffer(int p_bus, int p_buffer);
int thread_get_mix_buffer_size() const;