summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/convex_decomp/b2d_decompose.h2
-rw-r--r--drivers/coreaudio/audio_driver_coreaudio.cpp304
-rw-r--r--drivers/coreaudio/audio_driver_coreaudio.h36
-rw-r--r--drivers/gles2/rasterizer_scene_gles2.cpp138
-rw-r--r--drivers/gles2/rasterizer_scene_gles2.h2
-rw-r--r--drivers/gles2/rasterizer_storage_gles2.cpp115
-rw-r--r--drivers/gles2/rasterizer_storage_gles2.h34
-rw-r--r--drivers/pulseaudio/audio_driver_pulseaudio.cpp350
-rw-r--r--drivers/pulseaudio/audio_driver_pulseaudio.h24
-rw-r--r--drivers/wasapi/audio_driver_wasapi.cpp529
-rw-r--r--drivers/wasapi/audio_driver_wasapi.h64
11 files changed, 1275 insertions, 323 deletions
diff --git a/drivers/convex_decomp/b2d_decompose.h b/drivers/convex_decomp/b2d_decompose.h
index 068689d73d..b21792047e 100644
--- a/drivers/convex_decomp/b2d_decompose.h
+++ b/drivers/convex_decomp/b2d_decompose.h
@@ -31,8 +31,8 @@
#ifndef B2D_DECOMPOSE_H
#define B2D_DECOMPOSE_H
-#include "math_2d.h"
#include "vector.h"
+#include "vector2.h"
Vector<Vector<Vector2> > b2d_decompose(const Vector<Vector2> &p_polygon);
#endif // B2D_DECOMPOSE_H
diff --git a/drivers/coreaudio/audio_driver_coreaudio.cpp b/drivers/coreaudio/audio_driver_coreaudio.cpp
index ac21de91e4..e1f47cb8c2 100644
--- a/drivers/coreaudio/audio_driver_coreaudio.cpp
+++ b/drivers/coreaudio/audio_driver_coreaudio.cpp
@@ -35,8 +35,23 @@
#include "os/os.h"
#define kOutputBus 0
+#define kInputBus 1
#ifdef OSX_ENABLED
+OSStatus AudioDriverCoreAudio::input_device_address_cb(AudioObjectID inObjectID,
+ UInt32 inNumberAddresses, const AudioObjectPropertyAddress *inAddresses,
+ void *inClientData) {
+ AudioDriverCoreAudio *driver = (AudioDriverCoreAudio *)inClientData;
+
+ // If our selected device is the Default call set_device to update the
+ // kAudioOutputUnitProperty_CurrentDevice property
+ if (driver->capture_device_name == "Default") {
+ driver->capture_set_device("Default");
+ }
+
+ return noErr;
+}
+
OSStatus AudioDriverCoreAudio::output_device_address_cb(AudioObjectID inObjectID,
UInt32 inNumberAddresses, const AudioObjectPropertyAddress *inAddresses,
void *inClientData) {
@@ -79,6 +94,11 @@ Error AudioDriverCoreAudio::init() {
result = AudioObjectAddPropertyListener(kAudioObjectSystemObject, &prop, &output_device_address_cb, this);
ERR_FAIL_COND_V(result != noErr, FAILED);
+
+ prop.mSelector = kAudioHardwarePropertyDefaultInputDevice;
+
+ result = AudioObjectAddPropertyListener(kAudioObjectSystemObject, &prop, &input_device_address_cb, this);
+ ERR_FAIL_COND_V(result != noErr, FAILED);
#endif
AudioStreamBasicDescription strdesc;
@@ -102,6 +122,26 @@ Error AudioDriverCoreAudio::init() {
break;
}
+ zeromem(&strdesc, sizeof(strdesc));
+ size = sizeof(strdesc);
+ result = AudioUnitGetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &strdesc, &size);
+ ERR_FAIL_COND_V(result != noErr, FAILED);
+
+ switch (strdesc.mChannelsPerFrame) {
+ case 1: // Mono
+ capture_channels = 1;
+ break;
+
+ case 2: // Stereo
+ capture_channels = 2;
+ break;
+
+ default:
+ // Unknown number of channels, default to stereo
+ capture_channels = 2;
+ break;
+ }
+
mix_rate = GLOBAL_DEF_RST("audio/mix_rate", DEFAULT_MIX_RATE);
zeromem(&strdesc, sizeof(strdesc));
@@ -117,6 +157,11 @@ Error AudioDriverCoreAudio::init() {
result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &strdesc, sizeof(strdesc));
ERR_FAIL_COND_V(result != noErr, FAILED);
+ strdesc.mChannelsPerFrame = capture_channels;
+
+ result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &strdesc, sizeof(strdesc));
+ ERR_FAIL_COND_V(result != noErr, FAILED);
+
int latency = GLOBAL_DEF_RST("audio/output_latency", DEFAULT_OUTPUT_LATENCY);
// Sample rate is independent of channels (ref: https://stackoverflow.com/questions/11048825/audio-sample-frequency-rely-on-channels)
buffer_frames = closest_power_of_2(latency * mix_rate / 1000);
@@ -126,8 +171,12 @@ Error AudioDriverCoreAudio::init() {
ERR_FAIL_COND_V(result != noErr, FAILED);
#endif
- buffer_size = buffer_frames * channels;
+ unsigned int buffer_size = buffer_frames * channels;
samples_in.resize(buffer_size);
+ input_buf.resize(buffer_size);
+ input_buffer.resize(buffer_size * 8);
+ input_position = 0;
+ input_size = 0;
if (OS::get_singleton()->is_stdout_verbose()) {
print_line("CoreAudio: detected " + itos(channels) + " channels");
@@ -141,6 +190,12 @@ Error AudioDriverCoreAudio::init() {
result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, kOutputBus, &callback, sizeof(callback));
ERR_FAIL_COND_V(result != noErr, FAILED);
+ zeromem(&callback, sizeof(AURenderCallbackStruct));
+ callback.inputProc = &AudioDriverCoreAudio::input_callback;
+ callback.inputProcRefCon = this;
+ result = AudioUnitSetProperty(audio_unit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callback, sizeof(callback));
+ ERR_FAIL_COND_V(result != noErr, FAILED);
+
result = AudioUnitInitialize(audio_unit);
ERR_FAIL_COND_V(result != noErr, FAILED);
@@ -192,6 +247,45 @@ OSStatus AudioDriverCoreAudio::output_callback(void *inRefCon,
return 0;
};
+OSStatus AudioDriverCoreAudio::input_callback(void *inRefCon,
+ AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *inTimeStamp,
+ UInt32 inBusNumber, UInt32 inNumberFrames,
+ AudioBufferList *ioData) {
+
+ AudioDriverCoreAudio *ad = (AudioDriverCoreAudio *)inRefCon;
+ if (!ad->active) {
+ return 0;
+ }
+
+ ad->lock();
+
+ AudioBufferList bufferList;
+ bufferList.mNumberBuffers = 1;
+ bufferList.mBuffers[0].mData = ad->input_buf.ptrw();
+ bufferList.mBuffers[0].mNumberChannels = ad->capture_channels;
+ bufferList.mBuffers[0].mDataByteSize = ad->input_buf.size() * sizeof(int16_t);
+
+ OSStatus result = AudioUnitRender(ad->audio_unit, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, &bufferList);
+ if (result == noErr) {
+ for (int i = 0; i < inNumberFrames * ad->capture_channels; i++) {
+ int32_t sample = ad->input_buf[i] << 16;
+ ad->input_buffer_write(sample);
+
+ if (ad->capture_channels == 1) {
+ // In case input device is single channel convert it to Stereo
+ ad->input_buffer_write(sample);
+ }
+ }
+ } else {
+ ERR_PRINT(("AudioUnitRender failed, code: " + itos(result)).utf8().get_data());
+ }
+
+ ad->unlock();
+
+ return result;
+}
+
void AudioDriverCoreAudio::start() {
if (!active) {
OSStatus result = AudioOutputUnitStart(audio_unit);
@@ -222,9 +316,94 @@ AudioDriver::SpeakerMode AudioDriverCoreAudio::get_speaker_mode() const {
return get_speaker_mode_by_total_channels(channels);
};
+void AudioDriverCoreAudio::lock() {
+ if (mutex)
+ mutex->lock();
+};
+
+void AudioDriverCoreAudio::unlock() {
+ if (mutex)
+ mutex->unlock();
+};
+
+bool AudioDriverCoreAudio::try_lock() {
+ if (mutex)
+ return mutex->try_lock() == OK;
+ return true;
+}
+
+void AudioDriverCoreAudio::finish() {
+ OSStatus result;
+
+ lock();
+
+ AURenderCallbackStruct callback;
+ zeromem(&callback, sizeof(AURenderCallbackStruct));
+ result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, kOutputBus, &callback, sizeof(callback));
+ if (result != noErr) {
+ ERR_PRINT("AudioUnitSetProperty failed");
+ }
+
+ if (active) {
+ result = AudioOutputUnitStop(audio_unit);
+ if (result != noErr) {
+ ERR_PRINT("AudioOutputUnitStop failed");
+ }
+
+ active = false;
+ }
+
+ result = AudioUnitUninitialize(audio_unit);
+ if (result != noErr) {
+ ERR_PRINT("AudioUnitUninitialize failed");
+ }
+
#ifdef OSX_ENABLED
+ AudioObjectPropertyAddress prop;
+ prop.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
+ prop.mScope = kAudioObjectPropertyScopeGlobal;
+ prop.mElement = kAudioObjectPropertyElementMaster;
-Array AudioDriverCoreAudio::get_device_list() {
+ result = AudioObjectRemovePropertyListener(kAudioObjectSystemObject, &prop, &output_device_address_cb, this);
+ if (result != noErr) {
+ ERR_PRINT("AudioObjectRemovePropertyListener failed");
+ }
+#endif
+
+ result = AudioComponentInstanceDispose(audio_unit);
+ if (result != noErr) {
+ ERR_PRINT("AudioComponentInstanceDispose failed");
+ }
+
+ unlock();
+
+ if (mutex) {
+ memdelete(mutex);
+ mutex = NULL;
+ }
+};
+
+Error AudioDriverCoreAudio::capture_start() {
+
+ UInt32 flag = 1;
+ OSStatus result = AudioUnitSetProperty(audio_unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag));
+ ERR_FAIL_COND_V(result != noErr, FAILED);
+
+ return OK;
+}
+
+Error AudioDriverCoreAudio::capture_stop() {
+
+ UInt32 flag = 0;
+ OSStatus result = AudioUnitSetProperty(audio_unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag));
+ ERR_FAIL_COND_V(result != noErr, FAILED);
+
+ return OK;
+}
+
+#ifdef OSX_ENABLED
+
+Array AudioDriverCoreAudio::_get_device_list(bool capture) {
Array list;
@@ -243,20 +422,20 @@ Array AudioDriverCoreAudio::get_device_list() {
UInt32 deviceCount = size / sizeof(AudioDeviceID);
for (UInt32 i = 0; i < deviceCount; i++) {
- prop.mScope = kAudioDevicePropertyScopeOutput;
+ prop.mScope = capture ? kAudioDevicePropertyScopeInput : kAudioDevicePropertyScopeOutput;
prop.mSelector = kAudioDevicePropertyStreamConfiguration;
AudioObjectGetPropertyDataSize(audioDevices[i], &prop, 0, NULL, &size);
AudioBufferList *bufferList = (AudioBufferList *)malloc(size);
AudioObjectGetPropertyData(audioDevices[i], &prop, 0, NULL, &size, bufferList);
- UInt32 outputChannelCount = 0;
+ UInt32 channelCount = 0;
for (UInt32 j = 0; j < bufferList->mNumberBuffers; j++)
- outputChannelCount += bufferList->mBuffers[j].mNumberChannels;
+ channelCount += bufferList->mBuffers[j].mNumberChannels;
free(bufferList);
- if (outputChannelCount >= 1) {
+ if (channelCount >= 1) {
CFStringRef cfname;
size = sizeof(CFStringRef);
@@ -281,21 +460,11 @@ Array AudioDriverCoreAudio::get_device_list() {
return list;
}
-String AudioDriverCoreAudio::get_device() {
-
- return device_name;
-}
-
-void AudioDriverCoreAudio::set_device(String device) {
-
- device_name = device;
- if (!active) {
- return;
- }
+void AudioDriverCoreAudio::_set_device(const String &device, bool capture) {
AudioDeviceID deviceId;
bool found = false;
- if (device_name != "Default") {
+ if (device != "Default") {
AudioObjectPropertyAddress prop;
prop.mSelector = kAudioHardwarePropertyDevices;
@@ -309,20 +478,20 @@ void AudioDriverCoreAudio::set_device(String device) {
UInt32 deviceCount = size / sizeof(AudioDeviceID);
for (UInt32 i = 0; i < deviceCount && !found; i++) {
- prop.mScope = kAudioDevicePropertyScopeOutput;
+ prop.mScope = capture ? kAudioDevicePropertyScopeInput : kAudioDevicePropertyScopeOutput;
prop.mSelector = kAudioDevicePropertyStreamConfiguration;
AudioObjectGetPropertyDataSize(audioDevices[i], &prop, 0, NULL, &size);
AudioBufferList *bufferList = (AudioBufferList *)malloc(size);
AudioObjectGetPropertyData(audioDevices[i], &prop, 0, NULL, &size, bufferList);
- UInt32 outputChannelCount = 0;
+ UInt32 channelCount = 0;
for (UInt32 j = 0; j < bufferList->mNumberBuffers; j++)
- outputChannelCount += bufferList->mBuffers[j].mNumberChannels;
+ channelCount += bufferList->mBuffers[j].mNumberChannels;
free(bufferList);
- if (outputChannelCount >= 1) {
+ if (channelCount >= 1) {
CFStringRef cfname;
size = sizeof(CFStringRef);
@@ -335,7 +504,7 @@ void AudioDriverCoreAudio::set_device(String device) {
char *buffer = (char *)malloc(maxSize);
if (CFStringGetCString(cfname, buffer, maxSize, kCFStringEncodingUTF8)) {
String name = String(buffer) + " (" + itos(audioDevices[i]) + ")";
- if (name == device_name) {
+ if (name == device) {
deviceId = audioDevices[i];
found = true;
}
@@ -351,7 +520,8 @@ void AudioDriverCoreAudio::set_device(String device) {
if (!found) {
// If we haven't found the desired device get the system default one
UInt32 size = sizeof(AudioDeviceID);
- AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
+ UInt32 elem = capture ? kAudioHardwarePropertyDefaultInputDevice : kAudioHardwarePropertyDefaultOutputDevice;
+ AudioObjectPropertyAddress property = { elem, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &property, 0, NULL, &size, &deviceId);
ERR_FAIL_COND(result != noErr);
@@ -360,79 +530,52 @@ void AudioDriverCoreAudio::set_device(String device) {
}
if (found) {
- OSStatus result = AudioUnitSetProperty(audio_unit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &deviceId, sizeof(AudioDeviceID));
+ OSStatus result = AudioUnitSetProperty(audio_unit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, capture ? kInputBus : kOutputBus, &deviceId, sizeof(AudioDeviceID));
ERR_FAIL_COND(result != noErr);
+
+ // Reset audio input to keep synchronisation.
+ input_position = 0;
+ input_size = 0;
}
}
-#endif
-
-void AudioDriverCoreAudio::lock() {
- if (mutex)
- mutex->lock();
-};
-
-void AudioDriverCoreAudio::unlock() {
- if (mutex)
- mutex->unlock();
-};
+Array AudioDriverCoreAudio::get_device_list() {
-bool AudioDriverCoreAudio::try_lock() {
- if (mutex)
- return mutex->try_lock() == OK;
- return true;
+ return _get_device_list();
}
-void AudioDriverCoreAudio::finish() {
- OSStatus result;
+String AudioDriverCoreAudio::get_device() {
- lock();
+ return device_name;
+}
- AURenderCallbackStruct callback;
- zeromem(&callback, sizeof(AURenderCallbackStruct));
- result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, kOutputBus, &callback, sizeof(callback));
- if (result != noErr) {
- ERR_PRINT("AudioUnitSetProperty failed");
- }
+void AudioDriverCoreAudio::set_device(String device) {
+ device_name = device;
if (active) {
- result = AudioOutputUnitStop(audio_unit);
- if (result != noErr) {
- ERR_PRINT("AudioOutputUnitStop failed");
- }
-
- active = false;
+ _set_device(device_name);
}
+}
- result = AudioUnitUninitialize(audio_unit);
- if (result != noErr) {
- ERR_PRINT("AudioUnitUninitialize failed");
+void AudioDriverCoreAudio::capture_set_device(const String &p_name) {
+
+ capture_device_name = p_name;
+ if (active) {
+ _set_device(capture_device_name, true);
}
+}
-#ifdef OSX_ENABLED
- AudioObjectPropertyAddress prop;
- prop.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
- prop.mScope = kAudioObjectPropertyScopeGlobal;
- prop.mElement = kAudioObjectPropertyElementMaster;
+Array AudioDriverCoreAudio::capture_get_device_list() {
- result = AudioObjectRemovePropertyListener(kAudioObjectSystemObject, &prop, &output_device_address_cb, this);
- if (result != noErr) {
- ERR_PRINT("AudioObjectRemovePropertyListener failed");
- }
-#endif
+ return _get_device_list(true);
+}
- result = AudioComponentInstanceDispose(audio_unit);
- if (result != noErr) {
- ERR_PRINT("AudioComponentInstanceDispose failed");
- }
+String AudioDriverCoreAudio::capture_get_device() {
- unlock();
+ return capture_device_name;
+}
- if (mutex) {
- memdelete(mutex);
- mutex = NULL;
- }
-};
+#endif
AudioDriverCoreAudio::AudioDriverCoreAudio() {
active = false;
@@ -440,14 +583,15 @@ AudioDriverCoreAudio::AudioDriverCoreAudio() {
mix_rate = 0;
channels = 2;
+ capture_channels = 2;
- buffer_size = 0;
buffer_frames = 0;
samples_in.clear();
device_name = "Default";
-};
+ capture_device_name = "Default";
+}
AudioDriverCoreAudio::~AudioDriverCoreAudio(){};
diff --git a/drivers/coreaudio/audio_driver_coreaudio.h b/drivers/coreaudio/audio_driver_coreaudio.h
index 99c910498e..d3f7c8d596 100644
--- a/drivers/coreaudio/audio_driver_coreaudio.h
+++ b/drivers/coreaudio/audio_driver_coreaudio.h
@@ -48,15 +48,24 @@ class AudioDriverCoreAudio : public AudioDriver {
Mutex *mutex;
String device_name;
+ String capture_device_name;
int mix_rate;
unsigned int channels;
+ unsigned int capture_channels;
unsigned int buffer_frames;
- unsigned int buffer_size;
Vector<int32_t> samples_in;
+ Vector<int16_t> input_buf;
#ifdef OSX_ENABLED
+ Array _get_device_list(bool capture = false);
+ void _set_device(const String &device, bool capture = false);
+
+ static OSStatus input_device_address_cb(AudioObjectID inObjectID,
+ UInt32 inNumberAddresses, const AudioObjectPropertyAddress *inAddresses,
+ void *inClientData);
+
static OSStatus output_device_address_cb(AudioObjectID inObjectID,
UInt32 inNumberAddresses, const AudioObjectPropertyAddress *inAddresses,
void *inClientData);
@@ -68,6 +77,12 @@ class AudioDriverCoreAudio : public AudioDriver {
UInt32 inBusNumber, UInt32 inNumberFrames,
AudioBufferList *ioData);
+ static OSStatus input_callback(void *inRefCon,
+ AudioUnitRenderActionFlags *ioActionFlags,
+ const AudioTimeStamp *inTimeStamp,
+ UInt32 inBusNumber, UInt32 inNumberFrames,
+ AudioBufferList *ioData);
+
public:
const char *get_name() const {
return "CoreAudio";
@@ -77,18 +92,27 @@ public:
virtual void start();
virtual int get_mix_rate() const;
virtual SpeakerMode get_speaker_mode() const;
-#ifdef OSX_ENABLED
- virtual Array get_device_list();
- virtual String get_device();
- virtual void set_device(String device);
-#endif
+
virtual void lock();
virtual void unlock();
virtual void finish();
+ virtual Error capture_start();
+ virtual Error capture_stop();
+
bool try_lock();
void stop();
+#ifdef OSX_ENABLED
+ virtual Array get_device_list();
+ virtual String get_device();
+ virtual void set_device(String device);
+
+ virtual Array capture_get_device_list();
+ virtual void capture_set_device(const String &p_name);
+ virtual String capture_get_device();
+#endif
+
AudioDriverCoreAudio();
~AudioDriverCoreAudio();
};
diff --git a/drivers/gles2/rasterizer_scene_gles2.cpp b/drivers/gles2/rasterizer_scene_gles2.cpp
index 5f31bfe209..288a144b32 100644
--- a/drivers/gles2/rasterizer_scene_gles2.cpp
+++ b/drivers/gles2/rasterizer_scene_gles2.cpp
@@ -812,6 +812,14 @@ void RasterizerSceneGLES2::_fill_render_list(InstanceBase **p_cull_result, int p
}
} break;
+ case VS::INSTANCE_IMMEDIATE: {
+ RasterizerStorageGLES2::Immediate *im = storage->immediate_owner.getptr(instance->base);
+ ERR_CONTINUE(!im);
+
+ _add_geometry(im, instance, NULL, -1, p_depth_pass, p_shadow_pass);
+
+ } break;
+
default: {
} break;
@@ -931,6 +939,13 @@ void RasterizerSceneGLES2::_setup_geometry(RenderList::Element *p_element, Raste
state.scene_shader.set_conditional(SceneShaderGLES2::ENABLE_UV2_INTERP, s->attribs[VS::ARRAY_TEX_UV2].enabled);
} break;
+ case VS::INSTANCE_IMMEDIATE: {
+ state.scene_shader.set_conditional(SceneShaderGLES2::USE_INSTANCING, false);
+ state.scene_shader.set_conditional(SceneShaderGLES2::ENABLE_COLOR_INTERP, true);
+ state.scene_shader.set_conditional(SceneShaderGLES2::ENABLE_UV_INTERP, true);
+ state.scene_shader.set_conditional(SceneShaderGLES2::ENABLE_UV2_INTERP, true);
+ } break;
+
default: {
} break;
@@ -1264,6 +1279,118 @@ void RasterizerSceneGLES2::_render_geometry(RenderList::Element *p_element) {
glBindBuffer(GL_ARRAY_BUFFER, 0);
} break;
+
+ case VS::INSTANCE_IMMEDIATE: {
+ const RasterizerStorageGLES2::Immediate *im = static_cast<const RasterizerStorageGLES2::Immediate *>(p_element->geometry);
+
+ if (im->building) {
+ return;
+ }
+
+ bool restore_tex = false;
+
+ glBindBuffer(GL_ARRAY_BUFFER, state.immediate_buffer);
+
+ for (const List<RasterizerStorageGLES2::Immediate::Chunk>::Element *E = im->chunks.front(); E; E = E->next()) {
+ const RasterizerStorageGLES2::Immediate::Chunk &c = E->get();
+
+ if (c.vertices.empty()) {
+ continue;
+ }
+
+ int vertices = c.vertices.size();
+
+ uint32_t buf_ofs = 0;
+
+ storage->info.render.vertices_count += vertices;
+
+ if (c.texture.is_valid() && storage->texture_owner.owns(c.texture)) {
+ RasterizerStorageGLES2::Texture *t = storage->texture_owner.get(c.texture);
+
+ t = t->get_ptr();
+
+ if (t->redraw_if_visible) {
+ VisualServerRaster::redraw_request();
+ }
+
+#ifdef TOOLS_ENABLED
+ if (t->detect_3d) {
+ t->detect_3d(t->detect_3d_ud);
+ }
+#endif
+ if (t->render_target) {
+ t->render_target->used_in_frame = true;
+ }
+
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(t->target, t->tex_id);
+ restore_tex = true;
+ } else if (restore_tex) {
+
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(GL_TEXTURE_2D, state.current_main_tex);
+ restore_tex = false;
+ }
+
+ if (!c.normals.empty()) {
+ glEnableVertexAttribArray(VS::ARRAY_NORMAL);
+ glBufferSubData(GL_ARRAY_BUFFER, buf_ofs, sizeof(Vector3) * vertices, c.normals.ptr());
+ glVertexAttribPointer(VS::ARRAY_NORMAL, 3, GL_FLOAT, GL_FALSE, sizeof(Vector3), ((uint8_t *)NULL) + buf_ofs);
+ buf_ofs += sizeof(Vector3) * vertices;
+ } else {
+ glDisableVertexAttribArray(VS::ARRAY_NORMAL);
+ }
+
+ if (!c.tangents.empty()) {
+ glEnableVertexAttribArray(VS::ARRAY_TANGENT);
+ glBufferSubData(GL_ARRAY_BUFFER, buf_ofs, sizeof(Plane) * vertices, c.tangents.ptr());
+ glVertexAttribPointer(VS::ARRAY_TANGENT, 4, GL_FLOAT, GL_FALSE, sizeof(Plane), ((uint8_t *)NULL) + buf_ofs);
+ buf_ofs += sizeof(Plane) * vertices;
+ } else {
+ glDisableVertexAttribArray(VS::ARRAY_TANGENT);
+ }
+
+ if (!c.colors.empty()) {
+ glEnableVertexAttribArray(VS::ARRAY_COLOR);
+ glBufferSubData(GL_ARRAY_BUFFER, buf_ofs, sizeof(Color) * vertices, c.colors.ptr());
+ glVertexAttribPointer(VS::ARRAY_COLOR, 4, GL_FLOAT, GL_FALSE, sizeof(Color), ((uint8_t *)NULL) + buf_ofs);
+ buf_ofs += sizeof(Color) * vertices;
+ } else {
+ glDisableVertexAttribArray(VS::ARRAY_COLOR);
+ }
+
+ if (!c.uvs.empty()) {
+ glEnableVertexAttribArray(VS::ARRAY_TEX_UV);
+ glBufferSubData(GL_ARRAY_BUFFER, buf_ofs, sizeof(Vector2) * vertices, c.uvs.ptr());
+ glVertexAttribPointer(VS::ARRAY_TEX_UV, 2, GL_FLOAT, GL_FALSE, sizeof(Vector2), ((uint8_t *)NULL) + buf_ofs);
+ buf_ofs += sizeof(Vector2) * vertices;
+ } else {
+ glDisableVertexAttribArray(VS::ARRAY_TEX_UV);
+ }
+
+ if (!c.uv2s.empty()) {
+ glEnableVertexAttribArray(VS::ARRAY_TEX_UV2);
+ glBufferSubData(GL_ARRAY_BUFFER, buf_ofs, sizeof(Vector2) * vertices, c.uv2s.ptr());
+ glVertexAttribPointer(VS::ARRAY_TEX_UV2, 2, GL_FLOAT, GL_FALSE, sizeof(Vector2), ((uint8_t *)NULL) + buf_ofs);
+ buf_ofs += sizeof(Vector2) * vertices;
+ } else {
+ glDisableVertexAttribArray(VS::ARRAY_TEX_UV2);
+ }
+
+ glEnableVertexAttribArray(VS::ARRAY_VERTEX);
+ glBufferSubData(GL_ARRAY_BUFFER, buf_ofs, sizeof(Vector3) * vertices, c.vertices.ptr());
+ glVertexAttribPointer(VS::ARRAY_VERTEX, 3, GL_FLOAT, GL_FALSE, sizeof(Vector3), ((uint8_t *)NULL) + buf_ofs);
+
+ glDrawArrays(gl_primitive[c.primitive], 0, c.vertices.size());
+ }
+
+ if (restore_tex) {
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(GL_TEXTURE_2D, state.current_main_tex);
+ restore_tex = false;
+ }
+
+ } break;
}
}
@@ -1904,7 +2031,7 @@ void RasterizerSceneGLES2::render_scene(const Transform &p_cam_transform, const
} break;
default: {
- print_line("uhm");
+ // FIXME: implement other background modes
} break;
}
}
@@ -2247,6 +2374,15 @@ void RasterizerSceneGLES2::initialize() {
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
+ {
+ uint32_t immediate_buffer_size = GLOBAL_DEF("rendering/limits/buffers/immediate_buffer_size_kb", 2048);
+
+ glGenBuffers(1, &state.immediate_buffer);
+ glBindBuffer(GL_ARRAY_BUFFER, state.immediate_buffer);
+ glBufferData(GL_ARRAY_BUFFER, immediate_buffer_size * 1024, NULL, GL_DYNAMIC_DRAW);
+ glBindBuffer(GL_ARRAY_BUFFER, 0);
+ }
+
// cubemaps for shadows
{
int max_shadow_cubemap_sampler_size = 512;
diff --git a/drivers/gles2/rasterizer_scene_gles2.h b/drivers/gles2/rasterizer_scene_gles2.h
index f47d1f1d4e..e153080e15 100644
--- a/drivers/gles2/rasterizer_scene_gles2.h
+++ b/drivers/gles2/rasterizer_scene_gles2.h
@@ -74,6 +74,8 @@ public:
GLuint sky_verts;
+ GLuint immediate_buffer;
+
// ResolveShaderGLES3 resolve_shader;
// ScreenSpaceReflectionShaderGLES3 ssr_shader;
// EffectBlurShaderGLES3 effect_blur_shader;
diff --git a/drivers/gles2/rasterizer_storage_gles2.cpp b/drivers/gles2/rasterizer_storage_gles2.cpp
index 8c4325ccde..1bd3c0a935 100644
--- a/drivers/gles2/rasterizer_storage_gles2.cpp
+++ b/drivers/gles2/rasterizer_storage_gles2.cpp
@@ -2679,45 +2679,132 @@ void RasterizerStorageGLES2::update_dirty_multimeshes() {
/* IMMEDIATE API */
RID RasterizerStorageGLES2::immediate_create() {
- return RID();
+ Immediate *im = memnew(Immediate);
+ return immediate_owner.make_rid(im);
}
-void RasterizerStorageGLES2::immediate_begin(RID p_immediate, VS::PrimitiveType p_rimitive, RID p_texture) {
+void RasterizerStorageGLES2::immediate_begin(RID p_immediate, VS::PrimitiveType p_primitive, RID p_texture) {
+ Immediate *im = immediate_owner.get(p_immediate);
+ ERR_FAIL_COND(!im);
+ ERR_FAIL_COND(im->building);
+
+ Immediate::Chunk ic;
+ ic.texture = p_texture;
+ ic.primitive = p_primitive;
+ im->chunks.push_back(ic);
+ im->mask = 0;
+ im->building = true;
}
void RasterizerStorageGLES2::immediate_vertex(RID p_immediate, const Vector3 &p_vertex) {
+ Immediate *im = immediate_owner.get(p_immediate);
+ ERR_FAIL_COND(!im);
+ ERR_FAIL_COND(!im->building);
+
+ Immediate::Chunk *c = &im->chunks.back()->get();
+
+ if (c->vertices.empty() && im->chunks.size() == 1) {
+ im->aabb.position = p_vertex;
+ im->aabb.size = Vector3();
+ } else {
+ im->aabb.expand_to(p_vertex);
+ }
+
+ if (im->mask & VS::ARRAY_FORMAT_NORMAL)
+ c->normals.push_back(chunk_normal);
+ if (im->mask & VS::ARRAY_FORMAT_TANGENT)
+ c->tangents.push_back(chunk_tangent);
+ if (im->mask & VS::ARRAY_FORMAT_COLOR)
+ c->colors.push_back(chunk_color);
+ if (im->mask & VS::ARRAY_FORMAT_TEX_UV)
+ c->uvs.push_back(chunk_uv);
+ if (im->mask & VS::ARRAY_FORMAT_TEX_UV2)
+ c->uv2s.push_back(chunk_uv2);
+ im->mask |= VS::ARRAY_FORMAT_VERTEX;
+ c->vertices.push_back(p_vertex);
}
void RasterizerStorageGLES2::immediate_normal(RID p_immediate, const Vector3 &p_normal) {
+ Immediate *im = immediate_owner.get(p_immediate);
+ ERR_FAIL_COND(!im);
+ ERR_FAIL_COND(!im->building);
+
+ im->mask |= VS::ARRAY_FORMAT_NORMAL;
+ chunk_normal = p_normal;
}
void RasterizerStorageGLES2::immediate_tangent(RID p_immediate, const Plane &p_tangent) {
+ Immediate *im = immediate_owner.get(p_immediate);
+ ERR_FAIL_COND(!im);
+ ERR_FAIL_COND(!im->building);
+
+ im->mask |= VS::ARRAY_FORMAT_TANGENT;
+ chunk_tangent = p_tangent;
}
void RasterizerStorageGLES2::immediate_color(RID p_immediate, const Color &p_color) {
+ Immediate *im = immediate_owner.get(p_immediate);
+ ERR_FAIL_COND(!im);
+ ERR_FAIL_COND(!im->building);
+
+ im->mask |= VS::ARRAY_FORMAT_COLOR;
+ chunk_color = p_color;
}
void RasterizerStorageGLES2::immediate_uv(RID p_immediate, const Vector2 &tex_uv) {
+ Immediate *im = immediate_owner.get(p_immediate);
+ ERR_FAIL_COND(!im);
+ ERR_FAIL_COND(!im->building);
+
+ im->mask |= VS::ARRAY_FORMAT_TEX_UV;
+ chunk_uv = tex_uv;
}
void RasterizerStorageGLES2::immediate_uv2(RID p_immediate, const Vector2 &tex_uv) {
+ Immediate *im = immediate_owner.get(p_immediate);
+ ERR_FAIL_COND(!im);
+ ERR_FAIL_COND(!im->building);
+
+ im->mask |= VS::ARRAY_FORMAT_TEX_UV2;
+ chunk_uv2 = tex_uv;
}
void RasterizerStorageGLES2::immediate_end(RID p_immediate) {
+ Immediate *im = immediate_owner.get(p_immediate);
+ ERR_FAIL_COND(!im);
+ ERR_FAIL_COND(!im->building);
+
+ im->building = false;
+ im->instance_change_notify();
}
void RasterizerStorageGLES2::immediate_clear(RID p_immediate) {
+ Immediate *im = immediate_owner.get(p_immediate);
+ ERR_FAIL_COND(!im);
+ ERR_FAIL_COND(im->building);
+
+ im->chunks.clear();
+ im->instance_change_notify();
}
AABB RasterizerStorageGLES2::immediate_get_aabb(RID p_immediate) const {
- return AABB();
+ Immediate *im = immediate_owner.get(p_immediate);
+ ERR_FAIL_COND_V(!im, AABB());
+ return im->aabb;
}
void RasterizerStorageGLES2::immediate_set_material(RID p_immediate, RID p_material) {
+ Immediate *im = immediate_owner.get(p_immediate);
+ ERR_FAIL_COND(!im);
+
+ im->material = p_material;
+ im->instance_material_change_notify();
}
RID RasterizerStorageGLES2::immediate_get_material(RID p_immediate) const {
- return RID();
+ const Immediate *im = immediate_owner.get(p_immediate);
+ ERR_FAIL_COND_V(!im, RID());
+ return im->material;
}
/* SKELETON API */
@@ -3729,15 +3816,15 @@ VS::InstanceType RasterizerStorageGLES2::get_base_type(RID p_rid) const {
if (mesh_owner.owns(p_rid)) {
return VS::INSTANCE_MESH;
- }
- if (light_owner.owns(p_rid)) {
+ } else if (light_owner.owns(p_rid)) {
return VS::INSTANCE_LIGHT;
- }
- if (multimesh_owner.owns(p_rid)) {
+ } else if (multimesh_owner.owns(p_rid)) {
return VS::INSTANCE_MULTIMESH;
+ } else if (immediate_owner.owns(p_rid)) {
+ return VS::INSTANCE_IMMEDIATE;
+ } else {
+ return VS::INSTANCE_NONE;
}
-
- return VS::INSTANCE_NONE;
}
bool RasterizerStorageGLES2::free(RID p_rid) {
@@ -3895,6 +3982,14 @@ bool RasterizerStorageGLES2::free(RID p_rid) {
memdelete(multimesh);
return true;
+ } else if (immediate_owner.owns(p_rid)) {
+ Immediate *im = immediate_owner.get(p_rid);
+ im->instance_remove_deps();
+
+ immediate_owner.free(p_rid);
+ memdelete(im);
+
+ return true;
} else if (light_owner.owns(p_rid)) {
Light *light = light_owner.get(p_rid);
diff --git a/drivers/gles2/rasterizer_storage_gles2.h b/drivers/gles2/rasterizer_storage_gles2.h
index 8bc3369dbb..c1fbf73254 100644
--- a/drivers/gles2/rasterizer_storage_gles2.h
+++ b/drivers/gles2/rasterizer_storage_gles2.h
@@ -797,8 +797,40 @@ public:
/* IMMEDIATE API */
+ struct Immediate : public Geometry {
+
+ struct Chunk {
+ RID texture;
+ VS::PrimitiveType primitive;
+ Vector<Vector3> vertices;
+ Vector<Vector3> normals;
+ Vector<Plane> tangents;
+ Vector<Color> colors;
+ Vector<Vector2> uvs;
+ Vector<Vector2> uv2s;
+ };
+
+ List<Chunk> chunks;
+ bool building;
+ int mask;
+ AABB aabb;
+
+ Immediate() {
+ type = GEOMETRY_IMMEDIATE;
+ building = false;
+ }
+ };
+
+ Vector3 chunk_normal;
+ Plane chunk_tangent;
+ Color chunk_color;
+ Vector2 chunk_uv;
+ Vector2 chunk_uv2;
+
+ mutable RID_Owner<Immediate> immediate_owner;
+
virtual RID immediate_create();
- virtual void immediate_begin(RID p_immediate, VS::PrimitiveType p_rimitive, RID p_texture = RID());
+ virtual void immediate_begin(RID p_immediate, VS::PrimitiveType p_primitive, RID p_texture = RID());
virtual void immediate_vertex(RID p_immediate, const Vector3 &p_vertex);
virtual void immediate_normal(RID p_immediate, const Vector3 &p_normal);
virtual void immediate_tangent(RID p_immediate, const Plane &p_tangent);
diff --git a/drivers/pulseaudio/audio_driver_pulseaudio.cpp b/drivers/pulseaudio/audio_driver_pulseaudio.cpp
index 6db0e58737..d1104eec18 100644
--- a/drivers/pulseaudio/audio_driver_pulseaudio.cpp
+++ b/drivers/pulseaudio/audio_driver_pulseaudio.cpp
@@ -64,18 +64,32 @@ void AudioDriverPulseAudio::pa_sink_info_cb(pa_context *c, const pa_sink_info *l
ad->pa_status++;
}
+void AudioDriverPulseAudio::pa_source_info_cb(pa_context *c, const pa_source_info *l, int eol, void *userdata) {
+ AudioDriverPulseAudio *ad = (AudioDriverPulseAudio *)userdata;
+
+ // If eol is set to a positive number, you're at the end of the list
+ if (eol > 0) {
+ return;
+ }
+
+ ad->pa_rec_map = l->channel_map;
+ ad->pa_status++;
+}
+
void AudioDriverPulseAudio::pa_server_info_cb(pa_context *c, const pa_server_info *i, void *userdata) {
AudioDriverPulseAudio *ad = (AudioDriverPulseAudio *)userdata;
+ ad->capture_default_device = i->default_source_name;
ad->default_device = i->default_sink_name;
ad->pa_status++;
}
-void AudioDriverPulseAudio::detect_channels() {
+void AudioDriverPulseAudio::detect_channels(bool capture) {
- pa_channel_map_init_stereo(&pa_map);
+ pa_channel_map_init_stereo(capture ? &pa_rec_map : &pa_map);
- if (device_name == "Default") {
+ String device = capture ? capture_device_name : device_name;
+ if (device == "Default") {
// Get the default output device name
pa_status = 0;
pa_operation *pa_op = pa_context_get_server_info(pa_ctx, &AudioDriverPulseAudio::pa_server_info_cb, (void *)this);
@@ -93,16 +107,22 @@ void AudioDriverPulseAudio::detect_channels() {
}
}
- char device[1024];
- if (device_name == "Default") {
- strcpy(device, default_device.utf8().get_data());
+ char dev[1024];
+ if (device == "Default") {
+ strcpy(dev, capture ? capture_default_device.utf8().get_data() : default_device.utf8().get_data());
} else {
- strcpy(device, device_name.utf8().get_data());
+ strcpy(dev, device.utf8().get_data());
}
// Now using the device name get the amount of channels
pa_status = 0;
- pa_operation *pa_op = pa_context_get_sink_info_by_name(pa_ctx, device, &AudioDriverPulseAudio::pa_sink_info_cb, (void *)this);
+ pa_operation *pa_op;
+ if (capture) {
+ pa_op = pa_context_get_source_info_by_name(pa_ctx, dev, &AudioDriverPulseAudio::pa_source_info_cb, (void *)this);
+ } else {
+ pa_op = pa_context_get_sink_info_by_name(pa_ctx, dev, &AudioDriverPulseAudio::pa_sink_info_cb, (void *)this);
+ }
+
if (pa_op) {
while (pa_status == 0) {
int ret = pa_mainloop_iterate(pa_ml, 1, NULL);
@@ -113,7 +133,11 @@ void AudioDriverPulseAudio::detect_channels() {
pa_operation_unref(pa_op);
} else {
- ERR_PRINT("pa_context_get_sink_info_by_name error");
+ if (capture) {
+ ERR_PRINT("pa_context_get_source_info_by_name error");
+ } else {
+ ERR_PRINT("pa_context_get_sink_info_by_name error");
+ }
}
}
@@ -195,6 +219,10 @@ Error AudioDriverPulseAudio::init_device() {
samples_in.resize(buffer_frames * channels);
samples_out.resize(pa_buffer_size);
+ // Reset audio input to keep synchronisation.
+ input_position = 0;
+ input_size = 0;
+
return OK;
}
@@ -287,74 +315,71 @@ float AudioDriverPulseAudio::get_latency() {
void AudioDriverPulseAudio::thread_func(void *p_udata) {
AudioDriverPulseAudio *ad = (AudioDriverPulseAudio *)p_udata;
+ unsigned int write_ofs = 0;
+ size_t avail_bytes = 0;
while (!ad->exit_thread) {
- ad->lock();
- ad->start_counting_ticks();
-
- if (!ad->active) {
- for (unsigned int i = 0; i < ad->pa_buffer_size; i++) {
- ad->samples_out.write[i] = 0;
- }
+ size_t read_bytes = 0;
+ size_t written_bytes = 0;
- } else {
- ad->audio_server_process(ad->buffer_frames, ad->samples_in.ptrw());
+ if (avail_bytes == 0) {
+ ad->lock();
+ ad->start_counting_ticks();
- if (ad->channels == ad->pa_map.channels) {
+ if (!ad->active) {
for (unsigned int i = 0; i < ad->pa_buffer_size; i++) {
- ad->samples_out.write[i] = ad->samples_in[i] >> 16;
+ ad->samples_out.write[i] = 0;
}
} else {
- // Uneven amount of channels
- unsigned int in_idx = 0;
- unsigned int out_idx = 0;
+ ad->audio_server_process(ad->buffer_frames, ad->samples_in.ptrw());
- for (unsigned int i = 0; i < ad->buffer_frames; i++) {
- for (unsigned int j = 0; j < ad->pa_map.channels - 1; j++) {
- ad->samples_out.write[out_idx++] = ad->samples_in[in_idx++] >> 16;
+ if (ad->channels == ad->pa_map.channels) {
+ for (unsigned int i = 0; i < ad->pa_buffer_size; i++) {
+ ad->samples_out.write[i] = ad->samples_in[i] >> 16;
+ }
+ } else {
+ // Uneven amount of channels
+ unsigned int in_idx = 0;
+ unsigned int out_idx = 0;
+
+ for (unsigned int i = 0; i < ad->buffer_frames; i++) {
+ for (unsigned int j = 0; j < ad->pa_map.channels - 1; j++) {
+ ad->samples_out.write[out_idx++] = ad->samples_in[in_idx++] >> 16;
+ }
+ uint32_t l = ad->samples_in[in_idx++];
+ uint32_t r = ad->samples_in[in_idx++];
+ ad->samples_out.write[out_idx++] = ((l >> 1) + (r >> 1)) >> 16;
}
- uint32_t l = ad->samples_in[in_idx++];
- uint32_t r = ad->samples_in[in_idx++];
- ad->samples_out.write[out_idx++] = (l >> 1 + r >> 1) >> 16;
}
}
+
+ avail_bytes = ad->pa_buffer_size * sizeof(int16_t);
+ write_ofs = 0;
+ ad->stop_counting_ticks();
+ ad->unlock();
}
- int error_code;
- int byte_size = ad->pa_buffer_size * sizeof(int16_t);
+ ad->lock();
+ ad->start_counting_ticks();
+
int ret;
do {
ret = pa_mainloop_iterate(ad->pa_ml, 0, NULL);
} while (ret > 0);
- if (pa_stream_get_state(ad->pa_str) == PA_STREAM_READY) {
- const void *ptr = ad->samples_out.ptr();
- while (byte_size > 0) {
- size_t bytes = pa_stream_writable_size(ad->pa_str);
- if (bytes > 0) {
- if (bytes > byte_size) {
- bytes = byte_size;
- }
-
- ret = pa_stream_write(ad->pa_str, ptr, bytes, NULL, 0LL, PA_SEEK_RELATIVE);
- if (ret >= 0) {
- byte_size -= bytes;
- ptr = (const char *)ptr + bytes;
- }
+ if (avail_bytes > 0 && pa_stream_get_state(ad->pa_str) == PA_STREAM_READY) {
+ size_t bytes = pa_stream_writable_size(ad->pa_str);
+ if (bytes > 0) {
+ size_t bytes_to_write = MIN(bytes, avail_bytes);
+ const void *ptr = ad->samples_out.ptr();
+ ret = pa_stream_write(ad->pa_str, (char *)ptr + write_ofs, bytes_to_write, NULL, 0LL, PA_SEEK_RELATIVE);
+ if (ret != 0) {
+ ERR_PRINT("pa_stream_write error");
} else {
- ret = pa_mainloop_iterate(ad->pa_ml, 0, NULL);
- if (ret == 0) {
- // If pa_mainloop_iterate returns 0 sleep for 1 msec to wait
- // for the stream to be able to process more bytes
- ad->stop_counting_ticks();
- ad->unlock();
-
- OS::get_singleton()->delay_usec(1000);
-
- ad->lock();
- ad->start_counting_ticks();
- }
+ avail_bytes -= bytes_to_write;
+ write_ofs += bytes_to_write;
+ written_bytes += bytes_to_write;
}
}
}
@@ -379,8 +404,64 @@ void AudioDriverPulseAudio::thread_func(void *p_udata) {
}
}
+ if (ad->pa_rec_str && pa_stream_get_state(ad->pa_rec_str) == PA_STREAM_READY) {
+ size_t bytes = pa_stream_readable_size(ad->pa_rec_str);
+ if (bytes > 0) {
+ const void *ptr = NULL;
+ size_t maxbytes = ad->input_buffer.size() * sizeof(int16_t);
+
+ bytes = MIN(bytes, maxbytes);
+ ret = pa_stream_peek(ad->pa_rec_str, &ptr, &bytes);
+ if (ret != 0) {
+ ERR_PRINT("pa_stream_peek error");
+ } else {
+ int16_t *srcptr = (int16_t *)ptr;
+ for (size_t i = bytes >> 1; i > 0; i--) {
+ int32_t sample = int32_t(*srcptr++) << 16;
+ ad->input_buffer_write(sample);
+
+ if (ad->pa_rec_map.channels == 1) {
+ // In case input device is single channel convert it to Stereo
+ ad->input_buffer_write(sample);
+ }
+ }
+
+ read_bytes += bytes;
+ ret = pa_stream_drop(ad->pa_rec_str);
+ if (ret != 0) {
+ ERR_PRINT("pa_stream_drop error");
+ }
+ }
+ }
+
+ // User selected a new device, finish the current one so we'll init the new device
+ if (ad->capture_device_name != ad->capture_new_device) {
+ ad->capture_device_name = ad->capture_new_device;
+ ad->capture_finish_device();
+
+ Error err = ad->capture_init_device();
+ if (err != OK) {
+ ERR_PRINT("PulseAudio: capture_init_device error");
+ ad->capture_device_name = "Default";
+ ad->capture_new_device = "Default";
+
+ err = ad->capture_init_device();
+ if (err != OK) {
+ ad->active = false;
+ ad->exit_thread = true;
+ break;
+ }
+ }
+ }
+ }
+
ad->stop_counting_ticks();
ad->unlock();
+
+ // Let the thread rest a while if we haven't read or write anything
+ if (written_bytes == 0 && read_bytes == 0) {
+ OS::get_singleton()->delay_usec(1000);
+ }
}
ad->thread_exited = true;
@@ -510,11 +591,165 @@ void AudioDriverPulseAudio::finish() {
thread = NULL;
}
+Error AudioDriverPulseAudio::capture_init_device() {
+
+ // If there is a specified device check that it is really present
+ if (capture_device_name != "Default") {
+ Array list = capture_get_device_list();
+ if (list.find(capture_device_name) == -1) {
+ capture_device_name = "Default";
+ capture_new_device = "Default";
+ }
+ }
+
+ detect_channels(true);
+ switch (pa_rec_map.channels) {
+ case 1: // Mono
+ case 2: // Stereo
+ break;
+
+ default:
+ WARN_PRINTS("PulseAudio: Unsupported number of input channels: " + itos(pa_rec_map.channels));
+ pa_channel_map_init_stereo(&pa_rec_map);
+ break;
+ }
+
+ if (OS::get_singleton()->is_stdout_verbose()) {
+ print_line("PulseAudio: detected " + itos(pa_rec_map.channels) + " input channels");
+ }
+
+ pa_sample_spec spec;
+
+ spec.format = PA_SAMPLE_S16LE;
+ spec.channels = pa_rec_map.channels;
+ spec.rate = mix_rate;
+
+ int latency = 30;
+ input_buffer_frames = closest_power_of_2(latency * mix_rate / 1000);
+ int buffer_size = input_buffer_frames * spec.channels;
+
+ pa_buffer_attr attr;
+ attr.fragsize = buffer_size * sizeof(int16_t);
+
+ pa_rec_str = pa_stream_new(pa_ctx, "Record", &spec, &pa_rec_map);
+ if (pa_rec_str == NULL) {
+ ERR_PRINTS("PulseAudio: pa_stream_new error: " + String(pa_strerror(pa_context_errno(pa_ctx))));
+ ERR_FAIL_V(ERR_CANT_OPEN);
+ }
+
+ const char *dev = capture_device_name == "Default" ? NULL : capture_device_name.utf8().get_data();
+ pa_stream_flags flags = pa_stream_flags(PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_ADJUST_LATENCY | PA_STREAM_AUTO_TIMING_UPDATE);
+ int error_code = pa_stream_connect_record(pa_rec_str, dev, &attr, flags);
+ if (error_code < 0) {
+ ERR_PRINTS("PulseAudio: pa_stream_connect_record error: " + String(pa_strerror(error_code)));
+ ERR_FAIL_V(ERR_CANT_OPEN);
+ }
+
+ input_buffer.resize(input_buffer_frames * 8);
+ input_position = 0;
+ input_size = 0;
+
+ return OK;
+}
+
+void AudioDriverPulseAudio::capture_finish_device() {
+
+ if (pa_rec_str) {
+ int ret = pa_stream_disconnect(pa_rec_str);
+ if (ret != 0) {
+ ERR_PRINTS("PulseAudio: pa_stream_disconnect error: " + String(pa_strerror(ret)));
+ }
+ pa_stream_unref(pa_rec_str);
+ pa_rec_str = NULL;
+ }
+}
+
+Error AudioDriverPulseAudio::capture_start() {
+
+ lock();
+ Error err = capture_init_device();
+ unlock();
+
+ return err;
+}
+
+Error AudioDriverPulseAudio::capture_stop() {
+ lock();
+ capture_finish_device();
+ unlock();
+
+ return OK;
+}
+
+void AudioDriverPulseAudio::capture_set_device(const String &p_name) {
+
+ lock();
+ capture_new_device = p_name;
+ unlock();
+}
+
+void AudioDriverPulseAudio::pa_sourcelist_cb(pa_context *c, const pa_source_info *l, int eol, void *userdata) {
+ AudioDriverPulseAudio *ad = (AudioDriverPulseAudio *)userdata;
+
+ // If eol is set to a positive number, you're at the end of the list
+ if (eol > 0) {
+ return;
+ }
+
+ if (l->monitor_of_sink == PA_INVALID_INDEX) {
+ ad->pa_rec_devices.push_back(l->name);
+ }
+
+ ad->pa_status++;
+}
+
+Array AudioDriverPulseAudio::capture_get_device_list() {
+
+ pa_rec_devices.clear();
+ pa_rec_devices.push_back("Default");
+
+ if (pa_ctx == NULL) {
+ return pa_rec_devices;
+ }
+
+ lock();
+
+ // Get the device list
+ pa_status = 0;
+ pa_operation *pa_op = pa_context_get_source_info_list(pa_ctx, pa_sourcelist_cb, (void *)this);
+ if (pa_op) {
+ while (pa_status == 0) {
+ int ret = pa_mainloop_iterate(pa_ml, 1, NULL);
+ if (ret < 0) {
+ ERR_PRINT("pa_mainloop_iterate error");
+ }
+ }
+
+ pa_operation_unref(pa_op);
+ } else {
+ ERR_PRINT("pa_context_get_server_info error");
+ }
+
+ unlock();
+
+ return pa_rec_devices;
+}
+
+String AudioDriverPulseAudio::capture_get_device() {
+
+ lock();
+ String name = capture_device_name;
+ unlock();
+
+ return name;
+}
+
AudioDriverPulseAudio::AudioDriverPulseAudio() {
pa_ml = NULL;
pa_ctx = NULL;
pa_str = NULL;
+ pa_rec_str = NULL;
mutex = NULL;
thread = NULL;
@@ -528,6 +763,7 @@ AudioDriverPulseAudio::AudioDriverPulseAudio() {
mix_rate = 0;
buffer_frames = 0;
+ input_buffer_frames = 0;
pa_buffer_size = 0;
channels = 0;
pa_ready = 0;
diff --git a/drivers/pulseaudio/audio_driver_pulseaudio.h b/drivers/pulseaudio/audio_driver_pulseaudio.h
index b471f5f9d5..f8358a452b 100644
--- a/drivers/pulseaudio/audio_driver_pulseaudio.h
+++ b/drivers/pulseaudio/audio_driver_pulseaudio.h
@@ -47,22 +47,30 @@ class AudioDriverPulseAudio : public AudioDriver {
pa_mainloop *pa_ml;
pa_context *pa_ctx;
pa_stream *pa_str;
+ pa_stream *pa_rec_str;
pa_channel_map pa_map;
+ pa_channel_map pa_rec_map;
String device_name;
String new_device;
String default_device;
+ String capture_device_name;
+ String capture_new_device;
+ String capture_default_device;
+
Vector<int32_t> samples_in;
Vector<int16_t> samples_out;
unsigned int mix_rate;
unsigned int buffer_frames;
+ unsigned int input_buffer_frames;
unsigned int pa_buffer_size;
int channels;
int pa_ready;
int pa_status;
Array pa_devices;
+ Array pa_rec_devices;
bool active;
bool thread_exited;
@@ -72,13 +80,18 @@ class AudioDriverPulseAudio : public AudioDriver {
static void pa_state_cb(pa_context *c, void *userdata);
static void pa_sink_info_cb(pa_context *c, const pa_sink_info *l, int eol, void *userdata);
+ static void pa_source_info_cb(pa_context *c, const pa_source_info *l, int eol, void *userdata);
static void pa_server_info_cb(pa_context *c, const pa_server_info *i, void *userdata);
static void pa_sinklist_cb(pa_context *c, const pa_sink_info *l, int eol, void *userdata);
+ static void pa_sourcelist_cb(pa_context *c, const pa_source_info *l, int eol, void *userdata);
Error init_device();
void finish_device();
- void detect_channels();
+ Error capture_init_device();
+ void capture_finish_device();
+
+ void detect_channels(bool capture = false);
static void thread_func(void *p_udata);
@@ -91,15 +104,24 @@ public:
virtual void start();
virtual int get_mix_rate() const;
virtual SpeakerMode get_speaker_mode() const;
+
virtual Array get_device_list();
virtual String get_device();
virtual void set_device(String device);
+
+ virtual Array capture_get_device_list();
+ virtual void capture_set_device(const String &p_name);
+ virtual String capture_get_device();
+
virtual void lock();
virtual void unlock();
virtual void finish();
virtual float get_latency();
+ virtual Error capture_start();
+ virtual Error capture_stop();
+
AudioDriverPulseAudio();
~AudioDriverPulseAudio();
};
diff --git a/drivers/wasapi/audio_driver_wasapi.cpp b/drivers/wasapi/audio_driver_wasapi.cpp
index 5982955c4f..2dcb4ff3d8 100644
--- a/drivers/wasapi/audio_driver_wasapi.cpp
+++ b/drivers/wasapi/audio_driver_wasapi.cpp
@@ -52,8 +52,22 @@ const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
const IID IID_IAudioClient = __uuidof(IAudioClient);
const IID IID_IAudioRenderClient = __uuidof(IAudioRenderClient);
+const IID IID_IAudioCaptureClient = __uuidof(IAudioCaptureClient);
-static bool default_device_changed = false;
+#define SAFE_RELEASE(memory) \
+ if ((memory) != NULL) { \
+ (memory)->Release(); \
+ (memory) = NULL; \
+ }
+
+#define REFTIMES_PER_SEC 10000000
+#define REFTIMES_PER_MILLISEC 10000
+
+#define CAPTURE_BUFFER_CHANNELS 2
+
+static StringName capture_device_id;
+static bool default_render_device_changed = false;
+static bool default_capture_device_changed = false;
class CMMNotificationClient : public IMMNotificationClient {
LONG _cRef;
@@ -109,8 +123,13 @@ public:
}
HRESULT STDMETHODCALLTYPE OnDefaultDeviceChanged(EDataFlow flow, ERole role, LPCWSTR pwstrDeviceId) {
- if (flow == eRender && role == eConsole) {
- default_device_changed = true;
+ if (role == eConsole) {
+ if (flow == eRender) {
+ default_render_device_changed = true;
+ } else if (flow == eCapture) {
+ default_capture_device_changed = true;
+ capture_device_id = String(pwstrDeviceId);
+ }
}
return S_OK;
@@ -123,7 +142,7 @@ public:
static CMMNotificationClient notif_client;
-Error AudioDriverWASAPI::init_device(bool reinit) {
+Error AudioDriverWASAPI::audio_device_init(AudioDeviceWASAPI *p_device, bool p_capture, bool reinit) {
WAVEFORMATEX *pwfex;
IMMDeviceEnumerator *enumerator = NULL;
@@ -134,12 +153,12 @@ Error AudioDriverWASAPI::init_device(bool reinit) {
HRESULT hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void **)&enumerator);
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
- if (device_name == "Default") {
- hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole, &device);
+ if (p_device->device_name == "Default") {
+ hr = enumerator->GetDefaultAudioEndpoint(p_capture ? eCapture : eRender, eConsole, &device);
} else {
IMMDeviceCollection *devices = NULL;
- hr = enumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &devices);
+ hr = enumerator->EnumAudioEndpoints(p_capture ? eCapture : eRender, DEVICE_STATE_ACTIVE, &devices);
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
LPWSTR strId = NULL;
@@ -165,7 +184,7 @@ Error AudioDriverWASAPI::init_device(bool reinit) {
hr = props->GetValue(PKEY_Device_FriendlyName, &propvar);
ERR_BREAK(hr != S_OK);
- if (device_name == String(propvar.pwszVal)) {
+ if (p_device->device_name == String(propvar.pwszVal)) {
hr = device->GetId(&strId);
ERR_BREAK(hr != S_OK);
@@ -186,9 +205,10 @@ Error AudioDriverWASAPI::init_device(bool reinit) {
}
if (device == NULL) {
- hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole, &device);
+ hr = enumerator->GetDefaultAudioEndpoint(p_capture ? eCapture : eRender, eConsole, &device);
}
}
+
if (reinit) {
// In case we're trying to re-initialize the device prevent throwing this error on the console,
// otherwise if there is currently no device available this will spam the console.
@@ -200,11 +220,15 @@ Error AudioDriverWASAPI::init_device(bool reinit) {
}
hr = enumerator->RegisterEndpointNotificationCallback(&notif_client);
+ SAFE_RELEASE(enumerator)
+
if (hr != S_OK) {
ERR_PRINT("WASAPI: RegisterEndpointNotificationCallback error");
}
- hr = device->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void **)&audio_client);
+ hr = device->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void **)&p_device->audio_client);
+ SAFE_RELEASE(device)
+
if (reinit) {
if (hr != S_OK) {
return ERR_CANT_OPEN;
@@ -213,75 +237,89 @@ Error AudioDriverWASAPI::init_device(bool reinit) {
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
}
- hr = audio_client->GetMixFormat(&pwfex);
+ hr = p_device->audio_client->GetMixFormat(&pwfex);
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
// Since we're using WASAPI Shared Mode we can't control any of these, we just tag along
- wasapi_channels = pwfex->nChannels;
- format_tag = pwfex->wFormatTag;
- bits_per_sample = pwfex->wBitsPerSample;
+ p_device->channels = pwfex->nChannels;
+ p_device->format_tag = pwfex->wFormatTag;
+ p_device->bits_per_sample = pwfex->wBitsPerSample;
+ p_device->frame_size = (p_device->bits_per_sample / 8) * p_device->channels;
- switch (wasapi_channels) {
- case 2: // Stereo
- case 4: // Surround 3.1
- case 6: // Surround 5.1
- case 8: // Surround 7.1
- channels = wasapi_channels;
- break;
-
- default:
- WARN_PRINTS("WASAPI: Unsupported number of channels: " + itos(wasapi_channels));
- channels = 2;
- break;
- }
-
- if (format_tag == WAVE_FORMAT_EXTENSIBLE) {
+ if (p_device->format_tag == WAVE_FORMAT_EXTENSIBLE) {
WAVEFORMATEXTENSIBLE *wfex = (WAVEFORMATEXTENSIBLE *)pwfex;
if (wfex->SubFormat == KSDATAFORMAT_SUBTYPE_PCM) {
- format_tag = WAVE_FORMAT_PCM;
+ p_device->format_tag = WAVE_FORMAT_PCM;
} else if (wfex->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT) {
- format_tag = WAVE_FORMAT_IEEE_FLOAT;
+ p_device->format_tag = WAVE_FORMAT_IEEE_FLOAT;
} else {
ERR_PRINT("WASAPI: Format not supported");
ERR_FAIL_V(ERR_CANT_OPEN);
}
} else {
- if (format_tag != WAVE_FORMAT_PCM && format_tag != WAVE_FORMAT_IEEE_FLOAT) {
+ if (p_device->format_tag != WAVE_FORMAT_PCM && p_device->format_tag != WAVE_FORMAT_IEEE_FLOAT) {
ERR_PRINT("WASAPI: Format not supported");
ERR_FAIL_V(ERR_CANT_OPEN);
}
}
- DWORD streamflags = AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
+ DWORD streamflags = 0;
if (mix_rate != pwfex->nSamplesPerSec) {
streamflags |= AUDCLNT_STREAMFLAGS_RATEADJUST;
pwfex->nSamplesPerSec = mix_rate;
pwfex->nAvgBytesPerSec = pwfex->nSamplesPerSec * pwfex->nChannels * (pwfex->wBitsPerSample / 8);
}
- hr = audio_client->Initialize(AUDCLNT_SHAREMODE_SHARED, streamflags, 0, 0, pwfex, NULL);
+ hr = p_device->audio_client->Initialize(AUDCLNT_SHAREMODE_SHARED, streamflags, p_capture ? REFTIMES_PER_SEC : 0, 0, pwfex, NULL);
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
- event = CreateEvent(NULL, FALSE, FALSE, NULL);
- ERR_FAIL_COND_V(event == NULL, ERR_CANT_OPEN);
-
- hr = audio_client->SetEventHandle(event);
+ if (p_capture) {
+ hr = p_device->audio_client->GetService(IID_IAudioCaptureClient, (void **)&p_device->capture_client);
+ } else {
+ hr = p_device->audio_client->GetService(IID_IAudioRenderClient, (void **)&p_device->render_client);
+ }
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
- hr = audio_client->GetService(IID_IAudioRenderClient, (void **)&render_client);
- ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
+ // Free memory
+ CoTaskMemFree(pwfex);
+ SAFE_RELEASE(device)
+
+ return OK;
+}
+
+Error AudioDriverWASAPI::init_render_device(bool reinit) {
+
+ Error err = audio_device_init(&audio_output, false, reinit);
+ if (err != OK)
+ return err;
+
+ switch (audio_output.channels) {
+ case 2: // Stereo
+ case 4: // Surround 3.1
+ case 6: // Surround 5.1
+ case 8: // Surround 7.1
+ channels = audio_output.channels;
+ break;
+
+ default:
+ WARN_PRINTS("WASAPI: Unsupported number of channels: " + itos(audio_output.channels));
+ channels = 2;
+ break;
+ }
UINT32 max_frames;
- hr = audio_client->GetBufferSize(&max_frames);
+ HRESULT hr = audio_output.audio_client->GetBufferSize(&max_frames);
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
// Due to WASAPI Shared Mode we have no control of the buffer size
buffer_frames = max_frames;
// Sample rate is independent of channels (ref: https://stackoverflow.com/questions/11048825/audio-sample-frequency-rely-on-channels)
- buffer_size = buffer_frames * channels;
- samples_in.resize(buffer_size);
+ samples_in.resize(buffer_frames * channels);
+
+ input_position = 0;
+ input_size = 0;
if (OS::get_singleton()->is_stdout_verbose()) {
print_line("WASAPI: detected " + itos(channels) + " channels");
@@ -291,41 +329,61 @@ Error AudioDriverWASAPI::init_device(bool reinit) {
return OK;
}
-Error AudioDriverWASAPI::finish_device() {
+Error AudioDriverWASAPI::init_capture_device(bool reinit) {
- if (audio_client) {
- if (active) {
- audio_client->Stop();
- active = false;
- }
+ Error err = audio_device_init(&audio_input, true, reinit);
+ if (err != OK)
+ return err;
- audio_client->Release();
- audio_client = NULL;
- }
+ // Get the max frames
+ UINT32 max_frames;
+ HRESULT hr = audio_input.audio_client->GetBufferSize(&max_frames);
+ ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
- if (render_client) {
- render_client->Release();
- render_client = NULL;
- }
+ // Set the buffer size
+ input_buffer.resize(max_frames * CAPTURE_BUFFER_CHANNELS);
+ input_position = 0;
+ input_size = 0;
- if (audio_client) {
- audio_client->Release();
- audio_client = NULL;
+ return OK;
+}
+
+Error AudioDriverWASAPI::audio_device_finish(AudioDeviceWASAPI *p_device) {
+
+ if (p_device->active) {
+ if (p_device->audio_client) {
+ p_device->audio_client->Stop();
+ }
+
+ p_device->active = false;
}
+ SAFE_RELEASE(p_device->audio_client)
+ SAFE_RELEASE(p_device->render_client)
+ SAFE_RELEASE(p_device->capture_client)
+
return OK;
}
+Error AudioDriverWASAPI::finish_render_device() {
+
+ return audio_device_finish(&audio_output);
+}
+
+Error AudioDriverWASAPI::finish_capture_device() {
+
+ return audio_device_finish(&audio_input);
+}
+
Error AudioDriverWASAPI::init() {
mix_rate = GLOBAL_DEF_RST("audio/mix_rate", DEFAULT_MIX_RATE);
- Error err = init_device();
+ Error err = init_render_device();
if (err != OK) {
- ERR_PRINT("WASAPI: init_device error");
+ ERR_PRINT("WASAPI: init_render_device error");
}
- active = false;
exit_thread = false;
thread_exited = false;
@@ -345,7 +403,7 @@ AudioDriver::SpeakerMode AudioDriverWASAPI::get_speaker_mode() const {
return get_speaker_mode_by_total_channels(channels);
}
-Array AudioDriverWASAPI::get_device_list() {
+Array AudioDriverWASAPI::audio_device_get_list(bool p_capture) {
Array list;
IMMDeviceCollection *devices = NULL;
@@ -358,7 +416,7 @@ Array AudioDriverWASAPI::get_device_list() {
HRESULT hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void **)&enumerator);
ERR_FAIL_COND_V(hr != S_OK, Array());
- hr = enumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &devices);
+ hr = enumerator->EnumAudioEndpoints(p_capture ? eCapture : eRender, DEVICE_STATE_ACTIVE, &devices);
ERR_FAIL_COND_V(hr != S_OK, Array());
UINT count = 0;
@@ -393,21 +451,63 @@ Array AudioDriverWASAPI::get_device_list() {
return list;
}
+Array AudioDriverWASAPI::get_device_list() {
+
+ return audio_device_get_list(false);
+}
+
String AudioDriverWASAPI::get_device() {
- return device_name;
+ lock();
+ String name = audio_output.device_name;
+ unlock();
+
+ return name;
}
void AudioDriverWASAPI::set_device(String device) {
lock();
- new_device = device;
+ audio_output.new_device = device;
unlock();
}
-void AudioDriverWASAPI::write_sample(AudioDriverWASAPI *ad, BYTE *buffer, int i, int32_t sample) {
- if (ad->format_tag == WAVE_FORMAT_PCM) {
- switch (ad->bits_per_sample) {
+int32_t AudioDriverWASAPI::read_sample(WORD format_tag, int bits_per_sample, BYTE *buffer, int i) {
+ if (format_tag == WAVE_FORMAT_PCM) {
+ int32_t sample = 0;
+ switch (bits_per_sample) {
+ case 8:
+ sample = int32_t(((int8_t *)buffer)[i]) << 24;
+ break;
+
+ case 16:
+ sample = int32_t(((int16_t *)buffer)[i]) << 16;
+ break;
+
+ case 24:
+ sample |= int32_t(((int8_t *)buffer)[i * 3 + 2]) << 24;
+ sample |= int32_t(((int8_t *)buffer)[i * 3 + 1]) << 16;
+ sample |= int32_t(((int8_t *)buffer)[i * 3 + 0]) << 8;
+ break;
+
+ case 32:
+ sample = ((int32_t *)buffer)[i];
+ break;
+ }
+
+ return sample;
+ } else if (format_tag == WAVE_FORMAT_IEEE_FLOAT) {
+ return int32_t(((float *)buffer)[i] * 32768.0) << 16;
+ } else {
+ ERR_PRINT("WASAPI: Unknown format tag");
+ }
+
+ return 0;
+}
+
+void AudioDriverWASAPI::write_sample(WORD format_tag, int bits_per_sample, BYTE *buffer, int i, int32_t sample) {
+ if (format_tag == WAVE_FORMAT_PCM) {
+ switch (bits_per_sample) {
case 8:
((int8_t *)buffer)[i] = sample >> 24;
break;
@@ -426,83 +526,99 @@ void AudioDriverWASAPI::write_sample(AudioDriverWASAPI *ad, BYTE *buffer, int i,
((int32_t *)buffer)[i] = sample;
break;
}
- } else if (ad->format_tag == WAVE_FORMAT_IEEE_FLOAT) {
+ } else if (format_tag == WAVE_FORMAT_IEEE_FLOAT) {
((float *)buffer)[i] = (sample >> 16) / 32768.f;
} else {
ERR_PRINT("WASAPI: Unknown format tag");
- ad->exit_thread = true;
}
}
void AudioDriverWASAPI::thread_func(void *p_udata) {
AudioDriverWASAPI *ad = (AudioDriverWASAPI *)p_udata;
+ uint32_t avail_frames = 0;
+ uint32_t write_ofs = 0;
while (!ad->exit_thread) {
- ad->lock();
- ad->start_counting_ticks();
+ uint32_t read_frames = 0;
+ uint32_t written_frames = 0;
- if (ad->active) {
- ad->audio_server_process(ad->buffer_frames, ad->samples_in.ptrw());
- } else {
- for (unsigned int i = 0; i < ad->buffer_size; i++) {
- ad->samples_in.write[i] = 0;
+ if (avail_frames == 0) {
+ ad->lock();
+ ad->start_counting_ticks();
+
+ if (ad->audio_output.active) {
+ ad->audio_server_process(ad->buffer_frames, ad->samples_in.ptrw());
+ } else {
+ for (unsigned int i = 0; i < ad->samples_in.size(); i++) {
+ ad->samples_in.write[i] = 0;
+ }
}
- }
- ad->stop_counting_ticks();
- ad->unlock();
+ avail_frames = ad->buffer_frames;
+ write_ofs = 0;
- unsigned int left_frames = ad->buffer_frames;
- unsigned int buffer_idx = 0;
- while (left_frames > 0 && ad->audio_client) {
- WaitForSingleObject(ad->event, 1000);
+ ad->stop_counting_ticks();
+ ad->unlock();
+ }
- ad->lock();
- ad->start_counting_ticks();
+ ad->lock();
+ ad->start_counting_ticks();
+
+ if (avail_frames > 0 && ad->audio_output.audio_client) {
UINT32 cur_frames;
bool invalidated = false;
- HRESULT hr = ad->audio_client->GetCurrentPadding(&cur_frames);
+ HRESULT hr = ad->audio_output.audio_client->GetCurrentPadding(&cur_frames);
if (hr == S_OK) {
- // Check how much frames are available on the WASAPI buffer
- UINT32 avail_frames = ad->buffer_frames - cur_frames;
- UINT32 write_frames = avail_frames > left_frames ? left_frames : avail_frames;
- BYTE *buffer = NULL;
- hr = ad->render_client->GetBuffer(write_frames, &buffer);
- if (hr == S_OK) {
- // We're using WASAPI Shared Mode so we must convert the buffer
-
- if (ad->channels == ad->wasapi_channels) {
- for (unsigned int i = 0; i < write_frames * ad->channels; i++) {
- ad->write_sample(ad, buffer, i, ad->samples_in[buffer_idx++]);
- }
- } else {
- for (unsigned int i = 0; i < write_frames; i++) {
- for (unsigned int j = 0; j < MIN(ad->channels, ad->wasapi_channels); j++) {
- ad->write_sample(ad, buffer, i * ad->wasapi_channels + j, ad->samples_in[buffer_idx++]);
+ // Check how much frames are available on the WASAPI buffer
+ UINT32 write_frames = MIN(ad->buffer_frames - cur_frames, avail_frames);
+ if (write_frames > 0) {
+ BYTE *buffer = NULL;
+ hr = ad->audio_output.render_client->GetBuffer(write_frames, &buffer);
+ if (hr == S_OK) {
+
+ // We're using WASAPI Shared Mode so we must convert the buffer
+ if (ad->channels == ad->audio_output.channels) {
+ for (unsigned int i = 0; i < write_frames * ad->channels; i++) {
+ ad->write_sample(ad->audio_output.format_tag, ad->audio_output.bits_per_sample, buffer, i, ad->samples_in.write[write_ofs++]);
}
- if (ad->wasapi_channels > ad->channels) {
- for (unsigned int j = ad->channels; j < ad->wasapi_channels; j++) {
- ad->write_sample(ad, buffer, i * ad->wasapi_channels + j, 0);
+ } else {
+ for (unsigned int i = 0; i < write_frames; i++) {
+ for (unsigned int j = 0; j < MIN(ad->channels, ad->audio_output.channels); j++) {
+ ad->write_sample(ad->audio_output.format_tag, ad->audio_output.bits_per_sample, buffer, i * ad->audio_output.channels + j, ad->samples_in.write[write_ofs++]);
+ }
+ if (ad->audio_output.channels > ad->channels) {
+ for (unsigned int j = ad->channels; j < ad->audio_output.channels; j++) {
+ ad->write_sample(ad->audio_output.format_tag, ad->audio_output.bits_per_sample, buffer, i * ad->audio_output.channels + j, 0);
+ }
}
}
}
- }
- hr = ad->render_client->ReleaseBuffer(write_frames, 0);
- if (hr != S_OK) {
- ERR_PRINT("WASAPI: Release buffer error");
- }
+ hr = ad->audio_output.render_client->ReleaseBuffer(write_frames, 0);
+ if (hr != S_OK) {
+ ERR_PRINT("WASAPI: Release buffer error");
+ }
- left_frames -= write_frames;
- } else if (hr == AUDCLNT_E_DEVICE_INVALIDATED) {
- invalidated = true;
- } else {
- ERR_PRINT("WASAPI: Get buffer error");
- ad->exit_thread = true;
+ avail_frames -= write_frames;
+ written_frames += write_frames;
+ } else if (hr == AUDCLNT_E_DEVICE_INVALIDATED) {
+ // Device is not valid anymore, reopen it
+
+ Error err = ad->finish_render_device();
+ if (err != OK) {
+ ERR_PRINT("WASAPI: finish_render_device error");
+ } else {
+ // We reopened the device and samples_in may have resized, so invalidate the current avail_frames
+ avail_frames = 0;
+ }
+ } else {
+ ERR_PRINT("WASAPI: Get buffer error");
+ ad->exit_thread = true;
+ }
}
} else if (hr == AUDCLNT_E_DEVICE_INVALIDATED) {
invalidated = true;
@@ -514,47 +630,117 @@ void AudioDriverWASAPI::thread_func(void *p_udata) {
// Device is not valid anymore
WARN_PRINT("WASAPI: Current device invalidated, closing device");
- Error err = ad->finish_device();
+ Error err = ad->finish_render_device();
if (err != OK) {
- ERR_PRINT("WASAPI: finish_device error");
+ ERR_PRINT("WASAPI: finish_render_device error");
}
}
-
- ad->stop_counting_ticks();
- ad->unlock();
}
- ad->lock();
- ad->start_counting_ticks();
-
// If we're using the Default device and it changed finish it so we'll re-init the device
- if (ad->device_name == "Default" && default_device_changed) {
- Error err = ad->finish_device();
+ if (ad->audio_output.device_name == "Default" && default_render_device_changed) {
+ Error err = ad->finish_render_device();
if (err != OK) {
- ERR_PRINT("WASAPI: finish_device error");
+ ERR_PRINT("WASAPI: finish_render_device error");
}
- default_device_changed = false;
+ default_render_device_changed = false;
}
// User selected a new device, finish the current one so we'll init the new device
- if (ad->device_name != ad->new_device) {
- ad->device_name = ad->new_device;
- Error err = ad->finish_device();
+ if (ad->audio_output.device_name != ad->audio_output.new_device) {
+ ad->audio_output.device_name = ad->audio_output.new_device;
+ Error err = ad->finish_render_device();
if (err != OK) {
- ERR_PRINT("WASAPI: finish_device error");
+ ERR_PRINT("WASAPI: finish_render_device error");
}
}
- if (!ad->audio_client) {
- Error err = ad->init_device(true);
+ if (!ad->audio_output.audio_client) {
+ Error err = ad->init_render_device(true);
if (err == OK) {
ad->start();
}
}
+ if (ad->audio_input.active) {
+ UINT32 packet_length = 0;
+ BYTE *data;
+ UINT32 num_frames_available;
+ DWORD flags;
+
+ HRESULT hr = ad->audio_input.capture_client->GetNextPacketSize(&packet_length);
+ if (hr == S_OK) {
+ while (packet_length != 0) {
+ hr = ad->audio_input.capture_client->GetBuffer(&data, &num_frames_available, &flags, NULL, NULL);
+ ERR_BREAK(hr != S_OK);
+
+ // fixme: Only works for floating point atm
+ for (int j = 0; j < num_frames_available; j++) {
+ int32_t l, r;
+
+ if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
+ l = r = 0;
+ } else {
+ if (ad->audio_input.channels == 2) {
+ l = read_sample(ad->audio_input.format_tag, ad->audio_input.bits_per_sample, data, j * 2);
+ r = read_sample(ad->audio_input.format_tag, ad->audio_input.bits_per_sample, data, j * 2 + 1);
+ } else if (ad->audio_input.channels == 1) {
+ l = r = read_sample(ad->audio_input.format_tag, ad->audio_input.bits_per_sample, data, j);
+ } else {
+ l = r = 0;
+ ERR_PRINT("WASAPI: unsupported channel count in microphone!");
+ }
+ }
+
+ ad->input_buffer_write(l);
+ ad->input_buffer_write(r);
+ }
+
+ read_frames += num_frames_available;
+
+ hr = ad->audio_input.capture_client->ReleaseBuffer(num_frames_available);
+ ERR_BREAK(hr != S_OK);
+
+ hr = ad->audio_input.capture_client->GetNextPacketSize(&packet_length);
+ ERR_BREAK(hr != S_OK);
+ }
+ }
+
+ // If we're using the Default device and it changed finish it so we'll re-init the device
+ if (ad->audio_input.device_name == "Default" && default_capture_device_changed) {
+ Error err = ad->finish_capture_device();
+ if (err != OK) {
+ ERR_PRINT("WASAPI: finish_capture_device error");
+ }
+
+ default_capture_device_changed = false;
+ }
+
+ // User selected a new device, finish the current one so we'll init the new device
+ if (ad->audio_input.device_name != ad->audio_input.new_device) {
+ ad->audio_input.device_name = ad->audio_input.new_device;
+ Error err = ad->finish_capture_device();
+ if (err != OK) {
+ ERR_PRINT("WASAPI: finish_capture_device error");
+ }
+ }
+
+ if (!ad->audio_input.audio_client) {
+ Error err = ad->init_capture_device(true);
+ if (err == OK) {
+ ad->capture_start();
+ }
+ }
+ }
+
ad->stop_counting_ticks();
ad->unlock();
+
+ // Let the thread rest a while if we haven't read or write anything
+ if (written_frames == 0 && read_frames == 0) {
+ OS::get_singleton()->delay_usec(1000);
+ }
}
ad->thread_exited = true;
@@ -562,12 +748,12 @@ void AudioDriverWASAPI::thread_func(void *p_udata) {
void AudioDriverWASAPI::start() {
- if (audio_client) {
- HRESULT hr = audio_client->Start();
+ if (audio_output.audio_client) {
+ HRESULT hr = audio_output.audio_client->Start();
if (hr != S_OK) {
ERR_PRINT("WASAPI: Start failed");
} else {
- active = true;
+ audio_output.active = true;
}
}
}
@@ -594,7 +780,8 @@ void AudioDriverWASAPI::finish() {
thread = NULL;
}
- finish_device();
+ finish_capture_device();
+ finish_render_device();
if (mutex) {
memdelete(mutex);
@@ -602,30 +789,70 @@ void AudioDriverWASAPI::finish() {
}
}
+Error AudioDriverWASAPI::capture_start() {
+
+ Error err = init_capture_device();
+ if (err != OK) {
+ ERR_PRINT("WASAPI: init_capture_device error");
+ return err;
+ }
+
+ if (audio_input.active == false) {
+ audio_input.audio_client->Start();
+ audio_input.active = true;
+
+ return OK;
+ }
+
+ return FAILED;
+}
+
+Error AudioDriverWASAPI::capture_stop() {
+
+ if (audio_input.active == true) {
+ audio_input.audio_client->Stop();
+ audio_input.active = false;
+
+ return OK;
+ }
+
+ return FAILED;
+}
+
+void AudioDriverWASAPI::capture_set_device(const String &p_name) {
+
+ lock();
+ audio_input.new_device = p_name;
+ unlock();
+}
+
+Array AudioDriverWASAPI::capture_get_device_list() {
+
+ return audio_device_get_list(true);
+}
+
+String AudioDriverWASAPI::capture_get_device() {
+
+ lock();
+ String name = audio_input.device_name;
+ unlock();
+
+ return name;
+}
+
AudioDriverWASAPI::AudioDriverWASAPI() {
- audio_client = NULL;
- render_client = NULL;
mutex = NULL;
thread = NULL;
- format_tag = 0;
- bits_per_sample = 0;
-
samples_in.clear();
- buffer_size = 0;
channels = 0;
- wasapi_channels = 0;
mix_rate = 0;
buffer_frames = 0;
thread_exited = false;
exit_thread = false;
- active = false;
-
- device_name = "Default";
- new_device = "Default";
}
#endif
diff --git a/drivers/wasapi/audio_driver_wasapi.h b/drivers/wasapi/audio_driver_wasapi.h
index f3ee5976eb..3d94f3ba49 100644
--- a/drivers/wasapi/audio_driver_wasapi.h
+++ b/drivers/wasapi/audio_driver_wasapi.h
@@ -43,35 +43,63 @@
class AudioDriverWASAPI : public AudioDriver {
- HANDLE event;
- IAudioClient *audio_client;
- IAudioRenderClient *render_client;
+ class AudioDeviceWASAPI {
+ public:
+ IAudioClient *audio_client;
+ IAudioRenderClient *render_client;
+ IAudioCaptureClient *capture_client;
+ bool active;
+
+ WORD format_tag;
+ WORD bits_per_sample;
+ unsigned int channels;
+ unsigned int frame_size;
+
+ String device_name;
+ String new_device;
+
+ AudioDeviceWASAPI() {
+ audio_client = NULL;
+ render_client = NULL;
+ capture_client = NULL;
+ active = false;
+ format_tag = 0;
+ bits_per_sample = 0;
+ channels = 0;
+ frame_size = 0;
+ device_name = "Default";
+ new_device = "Default";
+ }
+ };
+
+ AudioDeviceWASAPI audio_input;
+ AudioDeviceWASAPI audio_output;
+
Mutex *mutex;
Thread *thread;
- String device_name;
- String new_device;
-
- WORD format_tag;
- WORD bits_per_sample;
-
Vector<int32_t> samples_in;
- unsigned int buffer_size;
unsigned int channels;
- unsigned int wasapi_channels;
int mix_rate;
int buffer_frames;
bool thread_exited;
mutable bool exit_thread;
- bool active;
- _FORCE_INLINE_ void write_sample(AudioDriverWASAPI *ad, BYTE *buffer, int i, int32_t sample);
+ static _FORCE_INLINE_ void write_sample(WORD format_tag, int bits_per_sample, BYTE *buffer, int i, int32_t sample);
+ static _FORCE_INLINE_ int32_t read_sample(WORD format_tag, int bits_per_sample, BYTE *buffer, int i);
static void thread_func(void *p_udata);
- Error init_device(bool reinit = false);
- Error finish_device();
+ Error init_render_device(bool reinit = false);
+ Error init_capture_device(bool reinit = false);
+
+ Error finish_render_device();
+ Error finish_capture_device();
+
+ Error audio_device_init(AudioDeviceWASAPI *p_device, bool p_capture, bool reinit);
+ Error audio_device_finish(AudioDeviceWASAPI *p_device);
+ Array audio_device_get_list(bool p_capture);
public:
virtual const char *get_name() const {
@@ -89,6 +117,12 @@ public:
virtual void unlock();
virtual void finish();
+ virtual Error capture_start();
+ virtual Error capture_stop();
+ virtual Array capture_get_device_list();
+ virtual void capture_set_device(const String &p_name);
+ virtual String capture_get_device();
+
AudioDriverWASAPI();
};