diff options
author | RĂ©mi Verschelde <rverschelde@gmail.com> | 2018-08-11 15:24:41 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2018-08-11 15:24:41 +0200 |
commit | 73cf0fd30524a6acc4400ef069210d90eaba29a2 (patch) | |
tree | 0e178885d794844124b1d74cf57a19c7971314b6 /drivers/wasapi | |
parent | b6b1d14ca1fb9f4067ce200d0bffd0c32b38f48b (diff) | |
parent | 73636953ae48c84fc9667109bd3dfbd234f41ea5 (diff) |
Merge pull request #19106 from SaracenOne/audio_mic
[WIP] Experimental microphone support
Diffstat (limited to 'drivers/wasapi')
-rw-r--r-- | drivers/wasapi/audio_driver_wasapi.cpp | 531 | ||||
-rw-r--r-- | drivers/wasapi/audio_driver_wasapi.h | 64 |
2 files changed, 429 insertions, 166 deletions
diff --git a/drivers/wasapi/audio_driver_wasapi.cpp b/drivers/wasapi/audio_driver_wasapi.cpp index 5982955c4f..a2f619a6ef 100644 --- a/drivers/wasapi/audio_driver_wasapi.cpp +++ b/drivers/wasapi/audio_driver_wasapi.cpp @@ -32,6 +32,8 @@ #include "audio_driver_wasapi.h" +#include <Functiondiscoverykeys_devpkey.h> + #include "os/os.h" #include "project_settings.h" @@ -52,8 +54,22 @@ const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator); const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator); const IID IID_IAudioClient = __uuidof(IAudioClient); const IID IID_IAudioRenderClient = __uuidof(IAudioRenderClient); +const IID IID_IAudioCaptureClient = __uuidof(IAudioCaptureClient); + +#define SAFE_RELEASE(memory) \ + if ((memory) != NULL) { \ + (memory)->Release(); \ + (memory) = NULL; \ + } -static bool default_device_changed = false; +#define REFTIMES_PER_SEC 10000000 +#define REFTIMES_PER_MILLISEC 10000 + +#define CAPTURE_BUFFER_CHANNELS 2 + +static StringName capture_device_id; +static bool default_render_device_changed = false; +static bool default_capture_device_changed = false; class CMMNotificationClient : public IMMNotificationClient { LONG _cRef; @@ -109,8 +125,13 @@ public: } HRESULT STDMETHODCALLTYPE OnDefaultDeviceChanged(EDataFlow flow, ERole role, LPCWSTR pwstrDeviceId) { - if (flow == eRender && role == eConsole) { - default_device_changed = true; + if (role == eConsole) { + if (flow == eRender) { + default_render_device_changed = true; + } else if (flow == eCapture) { + default_capture_device_changed = true; + capture_device_id = String(pwstrDeviceId); + } } return S_OK; @@ -123,7 +144,7 @@ public: static CMMNotificationClient notif_client; -Error AudioDriverWASAPI::init_device(bool reinit) { +Error AudioDriverWASAPI::audio_device_init(AudioDeviceWASAPI *p_device, bool p_capture, bool reinit) { WAVEFORMATEX *pwfex; IMMDeviceEnumerator *enumerator = NULL; @@ -134,12 +155,12 @@ Error AudioDriverWASAPI::init_device(bool reinit) { HRESULT hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void **)&enumerator); ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN); - if (device_name == "Default") { - hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole, &device); + if (p_device->device_name == "Default") { + hr = enumerator->GetDefaultAudioEndpoint(p_capture ? eCapture : eRender, eConsole, &device); } else { IMMDeviceCollection *devices = NULL; - hr = enumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &devices); + hr = enumerator->EnumAudioEndpoints(p_capture ? eCapture : eRender, DEVICE_STATE_ACTIVE, &devices); ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN); LPWSTR strId = NULL; @@ -165,7 +186,7 @@ Error AudioDriverWASAPI::init_device(bool reinit) { hr = props->GetValue(PKEY_Device_FriendlyName, &propvar); ERR_BREAK(hr != S_OK); - if (device_name == String(propvar.pwszVal)) { + if (p_device->device_name == String(propvar.pwszVal)) { hr = device->GetId(&strId); ERR_BREAK(hr != S_OK); @@ -186,9 +207,10 @@ Error AudioDriverWASAPI::init_device(bool reinit) { } if (device == NULL) { - hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole, &device); + hr = enumerator->GetDefaultAudioEndpoint(p_capture ? eCapture : eRender, eConsole, &device); } } + if (reinit) { // In case we're trying to re-initialize the device prevent throwing this error on the console, // otherwise if there is currently no device available this will spam the console. @@ -200,11 +222,15 @@ Error AudioDriverWASAPI::init_device(bool reinit) { } hr = enumerator->RegisterEndpointNotificationCallback(¬if_client); + SAFE_RELEASE(enumerator) + if (hr != S_OK) { ERR_PRINT("WASAPI: RegisterEndpointNotificationCallback error"); } - hr = device->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void **)&audio_client); + hr = device->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void **)&p_device->audio_client); + SAFE_RELEASE(device) + if (reinit) { if (hr != S_OK) { return ERR_CANT_OPEN; @@ -213,75 +239,89 @@ Error AudioDriverWASAPI::init_device(bool reinit) { ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN); } - hr = audio_client->GetMixFormat(&pwfex); + hr = p_device->audio_client->GetMixFormat(&pwfex); ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN); // Since we're using WASAPI Shared Mode we can't control any of these, we just tag along - wasapi_channels = pwfex->nChannels; - format_tag = pwfex->wFormatTag; - bits_per_sample = pwfex->wBitsPerSample; + p_device->channels = pwfex->nChannels; + p_device->format_tag = pwfex->wFormatTag; + p_device->bits_per_sample = pwfex->wBitsPerSample; + p_device->frame_size = (p_device->bits_per_sample / 8) * p_device->channels; - switch (wasapi_channels) { - case 2: // Stereo - case 4: // Surround 3.1 - case 6: // Surround 5.1 - case 8: // Surround 7.1 - channels = wasapi_channels; - break; - - default: - WARN_PRINTS("WASAPI: Unsupported number of channels: " + itos(wasapi_channels)); - channels = 2; - break; - } - - if (format_tag == WAVE_FORMAT_EXTENSIBLE) { + if (p_device->format_tag == WAVE_FORMAT_EXTENSIBLE) { WAVEFORMATEXTENSIBLE *wfex = (WAVEFORMATEXTENSIBLE *)pwfex; if (wfex->SubFormat == KSDATAFORMAT_SUBTYPE_PCM) { - format_tag = WAVE_FORMAT_PCM; + p_device->format_tag = WAVE_FORMAT_PCM; } else if (wfex->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT) { - format_tag = WAVE_FORMAT_IEEE_FLOAT; + p_device->format_tag = WAVE_FORMAT_IEEE_FLOAT; } else { ERR_PRINT("WASAPI: Format not supported"); ERR_FAIL_V(ERR_CANT_OPEN); } } else { - if (format_tag != WAVE_FORMAT_PCM && format_tag != WAVE_FORMAT_IEEE_FLOAT) { + if (p_device->format_tag != WAVE_FORMAT_PCM && p_device->format_tag != WAVE_FORMAT_IEEE_FLOAT) { ERR_PRINT("WASAPI: Format not supported"); ERR_FAIL_V(ERR_CANT_OPEN); } } - DWORD streamflags = AUDCLNT_STREAMFLAGS_EVENTCALLBACK; + DWORD streamflags = 0; if (mix_rate != pwfex->nSamplesPerSec) { streamflags |= AUDCLNT_STREAMFLAGS_RATEADJUST; pwfex->nSamplesPerSec = mix_rate; pwfex->nAvgBytesPerSec = pwfex->nSamplesPerSec * pwfex->nChannels * (pwfex->wBitsPerSample / 8); } - hr = audio_client->Initialize(AUDCLNT_SHAREMODE_SHARED, streamflags, 0, 0, pwfex, NULL); + hr = p_device->audio_client->Initialize(AUDCLNT_SHAREMODE_SHARED, streamflags, p_capture ? REFTIMES_PER_SEC : 0, 0, pwfex, NULL); ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN); - event = CreateEvent(NULL, FALSE, FALSE, NULL); - ERR_FAIL_COND_V(event == NULL, ERR_CANT_OPEN); - - hr = audio_client->SetEventHandle(event); + if (p_capture) { + hr = p_device->audio_client->GetService(IID_IAudioCaptureClient, (void **)&p_device->capture_client); + } else { + hr = p_device->audio_client->GetService(IID_IAudioRenderClient, (void **)&p_device->render_client); + } ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN); - hr = audio_client->GetService(IID_IAudioRenderClient, (void **)&render_client); - ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN); + // Free memory + CoTaskMemFree(pwfex); + SAFE_RELEASE(device) + + return OK; +} + +Error AudioDriverWASAPI::init_render_device(bool reinit) { + + Error err = audio_device_init(&audio_output, false, reinit); + if (err != OK) + return err; + + switch (audio_output.channels) { + case 2: // Stereo + case 4: // Surround 3.1 + case 6: // Surround 5.1 + case 8: // Surround 7.1 + channels = audio_output.channels; + break; + + default: + WARN_PRINTS("WASAPI: Unsupported number of channels: " + itos(audio_output.channels)); + channels = 2; + break; + } UINT32 max_frames; - hr = audio_client->GetBufferSize(&max_frames); + HRESULT hr = audio_output.audio_client->GetBufferSize(&max_frames); ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN); // Due to WASAPI Shared Mode we have no control of the buffer size buffer_frames = max_frames; // Sample rate is independent of channels (ref: https://stackoverflow.com/questions/11048825/audio-sample-frequency-rely-on-channels) - buffer_size = buffer_frames * channels; - samples_in.resize(buffer_size); + samples_in.resize(buffer_frames * channels); + + input_position = 0; + input_size = 0; if (OS::get_singleton()->is_stdout_verbose()) { print_line("WASAPI: detected " + itos(channels) + " channels"); @@ -291,41 +331,61 @@ Error AudioDriverWASAPI::init_device(bool reinit) { return OK; } -Error AudioDriverWASAPI::finish_device() { +Error AudioDriverWASAPI::init_capture_device(bool reinit) { - if (audio_client) { - if (active) { - audio_client->Stop(); - active = false; - } + Error err = audio_device_init(&audio_input, true, reinit); + if (err != OK) + return err; - audio_client->Release(); - audio_client = NULL; - } + // Get the max frames + UINT32 max_frames; + HRESULT hr = audio_input.audio_client->GetBufferSize(&max_frames); + ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN); - if (render_client) { - render_client->Release(); - render_client = NULL; - } + // Set the buffer size + input_buffer.resize(max_frames * CAPTURE_BUFFER_CHANNELS); + input_position = 0; + input_size = 0; - if (audio_client) { - audio_client->Release(); - audio_client = NULL; + return OK; +} + +Error AudioDriverWASAPI::audio_device_finish(AudioDeviceWASAPI *p_device) { + + if (p_device->active) { + if (p_device->audio_client) { + p_device->audio_client->Stop(); + } + + p_device->active = false; } + SAFE_RELEASE(p_device->audio_client) + SAFE_RELEASE(p_device->render_client) + SAFE_RELEASE(p_device->capture_client) + return OK; } +Error AudioDriverWASAPI::finish_render_device() { + + return audio_device_finish(&audio_output); +} + +Error AudioDriverWASAPI::finish_capture_device() { + + return audio_device_finish(&audio_input); +} + Error AudioDriverWASAPI::init() { mix_rate = GLOBAL_DEF_RST("audio/mix_rate", DEFAULT_MIX_RATE); - Error err = init_device(); + Error err = init_render_device(); if (err != OK) { - ERR_PRINT("WASAPI: init_device error"); + ERR_PRINT("WASAPI: init_render_device error"); } - active = false; exit_thread = false; thread_exited = false; @@ -345,7 +405,7 @@ AudioDriver::SpeakerMode AudioDriverWASAPI::get_speaker_mode() const { return get_speaker_mode_by_total_channels(channels); } -Array AudioDriverWASAPI::get_device_list() { +Array AudioDriverWASAPI::audio_device_get_list(bool p_capture) { Array list; IMMDeviceCollection *devices = NULL; @@ -358,7 +418,7 @@ Array AudioDriverWASAPI::get_device_list() { HRESULT hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void **)&enumerator); ERR_FAIL_COND_V(hr != S_OK, Array()); - hr = enumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &devices); + hr = enumerator->EnumAudioEndpoints(p_capture ? eCapture : eRender, DEVICE_STATE_ACTIVE, &devices); ERR_FAIL_COND_V(hr != S_OK, Array()); UINT count = 0; @@ -393,21 +453,63 @@ Array AudioDriverWASAPI::get_device_list() { return list; } +Array AudioDriverWASAPI::get_device_list() { + + return audio_device_get_list(false); +} + String AudioDriverWASAPI::get_device() { - return device_name; + lock(); + String name = audio_output.device_name; + unlock(); + + return name; } void AudioDriverWASAPI::set_device(String device) { lock(); - new_device = device; + audio_output.new_device = device; unlock(); } -void AudioDriverWASAPI::write_sample(AudioDriverWASAPI *ad, BYTE *buffer, int i, int32_t sample) { - if (ad->format_tag == WAVE_FORMAT_PCM) { - switch (ad->bits_per_sample) { +int32_t AudioDriverWASAPI::read_sample(WORD format_tag, int bits_per_sample, BYTE *buffer, int i) { + if (format_tag == WAVE_FORMAT_PCM) { + int32_t sample = 0; + switch (bits_per_sample) { + case 8: + sample = int32_t(((int8_t *)buffer)[i]) << 24; + break; + + case 16: + sample = int32_t(((int16_t *)buffer)[i]) << 16; + break; + + case 24: + sample |= int32_t(((int8_t *)buffer)[i * 3 + 2]) << 24; + sample |= int32_t(((int8_t *)buffer)[i * 3 + 1]) << 16; + sample |= int32_t(((int8_t *)buffer)[i * 3 + 0]) << 8; + break; + + case 32: + sample = ((int32_t *)buffer)[i]; + break; + } + + return sample; + } else if (format_tag == WAVE_FORMAT_IEEE_FLOAT) { + return int32_t(((float *)buffer)[i] * 32768.0) << 16; + } else { + ERR_PRINT("WASAPI: Unknown format tag"); + } + + return 0; +} + +void AudioDriverWASAPI::write_sample(WORD format_tag, int bits_per_sample, BYTE *buffer, int i, int32_t sample) { + if (format_tag == WAVE_FORMAT_PCM) { + switch (bits_per_sample) { case 8: ((int8_t *)buffer)[i] = sample >> 24; break; @@ -426,83 +528,99 @@ void AudioDriverWASAPI::write_sample(AudioDriverWASAPI *ad, BYTE *buffer, int i, ((int32_t *)buffer)[i] = sample; break; } - } else if (ad->format_tag == WAVE_FORMAT_IEEE_FLOAT) { + } else if (format_tag == WAVE_FORMAT_IEEE_FLOAT) { ((float *)buffer)[i] = (sample >> 16) / 32768.f; } else { ERR_PRINT("WASAPI: Unknown format tag"); - ad->exit_thread = true; } } void AudioDriverWASAPI::thread_func(void *p_udata) { AudioDriverWASAPI *ad = (AudioDriverWASAPI *)p_udata; + uint32_t avail_frames = 0; + uint32_t write_ofs = 0; while (!ad->exit_thread) { - ad->lock(); - ad->start_counting_ticks(); + uint32_t read_frames = 0; + uint32_t written_frames = 0; - if (ad->active) { - ad->audio_server_process(ad->buffer_frames, ad->samples_in.ptrw()); - } else { - for (unsigned int i = 0; i < ad->buffer_size; i++) { - ad->samples_in.write[i] = 0; + if (avail_frames == 0) { + ad->lock(); + ad->start_counting_ticks(); + + if (ad->audio_output.active) { + ad->audio_server_process(ad->buffer_frames, ad->samples_in.ptrw()); + } else { + for (unsigned int i = 0; i < ad->samples_in.size(); i++) { + ad->samples_in.write[i] = 0; + } } - } - ad->stop_counting_ticks(); - ad->unlock(); + avail_frames = ad->buffer_frames; + write_ofs = 0; - unsigned int left_frames = ad->buffer_frames; - unsigned int buffer_idx = 0; - while (left_frames > 0 && ad->audio_client) { - WaitForSingleObject(ad->event, 1000); + ad->stop_counting_ticks(); + ad->unlock(); + } - ad->lock(); - ad->start_counting_ticks(); + ad->lock(); + ad->start_counting_ticks(); + + if (avail_frames > 0 && ad->audio_output.audio_client) { UINT32 cur_frames; bool invalidated = false; - HRESULT hr = ad->audio_client->GetCurrentPadding(&cur_frames); + HRESULT hr = ad->audio_output.audio_client->GetCurrentPadding(&cur_frames); if (hr == S_OK) { - // Check how much frames are available on the WASAPI buffer - UINT32 avail_frames = ad->buffer_frames - cur_frames; - UINT32 write_frames = avail_frames > left_frames ? left_frames : avail_frames; - BYTE *buffer = NULL; - hr = ad->render_client->GetBuffer(write_frames, &buffer); - if (hr == S_OK) { - // We're using WASAPI Shared Mode so we must convert the buffer - - if (ad->channels == ad->wasapi_channels) { - for (unsigned int i = 0; i < write_frames * ad->channels; i++) { - ad->write_sample(ad, buffer, i, ad->samples_in[buffer_idx++]); - } - } else { - for (unsigned int i = 0; i < write_frames; i++) { - for (unsigned int j = 0; j < MIN(ad->channels, ad->wasapi_channels); j++) { - ad->write_sample(ad, buffer, i * ad->wasapi_channels + j, ad->samples_in[buffer_idx++]); + // Check how much frames are available on the WASAPI buffer + UINT32 write_frames = MIN(ad->buffer_frames - cur_frames, avail_frames); + if (write_frames > 0) { + BYTE *buffer = NULL; + hr = ad->audio_output.render_client->GetBuffer(write_frames, &buffer); + if (hr == S_OK) { + + // We're using WASAPI Shared Mode so we must convert the buffer + if (ad->channels == ad->audio_output.channels) { + for (unsigned int i = 0; i < write_frames * ad->channels; i++) { + ad->write_sample(ad->audio_output.format_tag, ad->audio_output.bits_per_sample, buffer, i, ad->samples_in.write[write_ofs++]); } - if (ad->wasapi_channels > ad->channels) { - for (unsigned int j = ad->channels; j < ad->wasapi_channels; j++) { - ad->write_sample(ad, buffer, i * ad->wasapi_channels + j, 0); + } else { + for (unsigned int i = 0; i < write_frames; i++) { + for (unsigned int j = 0; j < MIN(ad->channels, ad->audio_output.channels); j++) { + ad->write_sample(ad->audio_output.format_tag, ad->audio_output.bits_per_sample, buffer, i * ad->audio_output.channels + j, ad->samples_in.write[write_ofs++]); + } + if (ad->audio_output.channels > ad->channels) { + for (unsigned int j = ad->channels; j < ad->audio_output.channels; j++) { + ad->write_sample(ad->audio_output.format_tag, ad->audio_output.bits_per_sample, buffer, i * ad->audio_output.channels + j, 0); + } } } } - } - hr = ad->render_client->ReleaseBuffer(write_frames, 0); - if (hr != S_OK) { - ERR_PRINT("WASAPI: Release buffer error"); - } + hr = ad->audio_output.render_client->ReleaseBuffer(write_frames, 0); + if (hr != S_OK) { + ERR_PRINT("WASAPI: Release buffer error"); + } - left_frames -= write_frames; - } else if (hr == AUDCLNT_E_DEVICE_INVALIDATED) { - invalidated = true; - } else { - ERR_PRINT("WASAPI: Get buffer error"); - ad->exit_thread = true; + avail_frames -= write_frames; + written_frames += write_frames; + } else if (hr == AUDCLNT_E_DEVICE_INVALIDATED) { + // Device is not valid anymore, reopen it + + Error err = ad->finish_render_device(); + if (err != OK) { + ERR_PRINT("WASAPI: finish_render_device error"); + } else { + // We reopened the device and samples_in may have resized, so invalidate the current avail_frames + avail_frames = 0; + } + } else { + ERR_PRINT("WASAPI: Get buffer error"); + ad->exit_thread = true; + } } } else if (hr == AUDCLNT_E_DEVICE_INVALIDATED) { invalidated = true; @@ -514,47 +632,117 @@ void AudioDriverWASAPI::thread_func(void *p_udata) { // Device is not valid anymore WARN_PRINT("WASAPI: Current device invalidated, closing device"); - Error err = ad->finish_device(); + Error err = ad->finish_render_device(); if (err != OK) { - ERR_PRINT("WASAPI: finish_device error"); + ERR_PRINT("WASAPI: finish_render_device error"); } } - - ad->stop_counting_ticks(); - ad->unlock(); } - ad->lock(); - ad->start_counting_ticks(); - // If we're using the Default device and it changed finish it so we'll re-init the device - if (ad->device_name == "Default" && default_device_changed) { - Error err = ad->finish_device(); + if (ad->audio_output.device_name == "Default" && default_render_device_changed) { + Error err = ad->finish_render_device(); if (err != OK) { - ERR_PRINT("WASAPI: finish_device error"); + ERR_PRINT("WASAPI: finish_render_device error"); } - default_device_changed = false; + default_render_device_changed = false; } // User selected a new device, finish the current one so we'll init the new device - if (ad->device_name != ad->new_device) { - ad->device_name = ad->new_device; - Error err = ad->finish_device(); + if (ad->audio_output.device_name != ad->audio_output.new_device) { + ad->audio_output.device_name = ad->audio_output.new_device; + Error err = ad->finish_render_device(); if (err != OK) { - ERR_PRINT("WASAPI: finish_device error"); + ERR_PRINT("WASAPI: finish_render_device error"); } } - if (!ad->audio_client) { - Error err = ad->init_device(true); + if (!ad->audio_output.audio_client) { + Error err = ad->init_render_device(true); if (err == OK) { ad->start(); } } + if (ad->audio_input.active) { + UINT32 packet_length = 0; + BYTE *data; + UINT32 num_frames_available; + DWORD flags; + + HRESULT hr = ad->audio_input.capture_client->GetNextPacketSize(&packet_length); + if (hr == S_OK) { + while (packet_length != 0) { + hr = ad->audio_input.capture_client->GetBuffer(&data, &num_frames_available, &flags, NULL, NULL); + ERR_BREAK(hr != S_OK); + + // fixme: Only works for floating point atm + for (int j = 0; j < num_frames_available; j++) { + int32_t l, r; + + if (flags & AUDCLNT_BUFFERFLAGS_SILENT) { + l = r = 0; + } else { + if (ad->audio_input.channels == 2) { + l = read_sample(ad->audio_input.format_tag, ad->audio_input.bits_per_sample, data, j * 2); + r = read_sample(ad->audio_input.format_tag, ad->audio_input.bits_per_sample, data, j * 2 + 1); + } else if (ad->audio_input.channels == 1) { + l = r = read_sample(ad->audio_input.format_tag, ad->audio_input.bits_per_sample, data, j); + } else { + l = r = 0; + ERR_PRINT("WASAPI: unsupported channel count in microphone!"); + } + } + + ad->input_buffer_write(l); + ad->input_buffer_write(r); + } + + read_frames += num_frames_available; + + hr = ad->audio_input.capture_client->ReleaseBuffer(num_frames_available); + ERR_BREAK(hr != S_OK); + + hr = ad->audio_input.capture_client->GetNextPacketSize(&packet_length); + ERR_BREAK(hr != S_OK); + } + } + + // If we're using the Default device and it changed finish it so we'll re-init the device + if (ad->audio_input.device_name == "Default" && default_capture_device_changed) { + Error err = ad->finish_capture_device(); + if (err != OK) { + ERR_PRINT("WASAPI: finish_capture_device error"); + } + + default_capture_device_changed = false; + } + + // User selected a new device, finish the current one so we'll init the new device + if (ad->audio_input.device_name != ad->audio_input.new_device) { + ad->audio_input.device_name = ad->audio_input.new_device; + Error err = ad->finish_capture_device(); + if (err != OK) { + ERR_PRINT("WASAPI: finish_capture_device error"); + } + } + + if (!ad->audio_input.audio_client) { + Error err = ad->init_capture_device(true); + if (err == OK) { + ad->capture_start(); + } + } + } + ad->stop_counting_ticks(); ad->unlock(); + + // Let the thread rest a while if we haven't read or write anything + if (written_frames == 0 && read_frames == 0) { + OS::get_singleton()->delay_usec(1000); + } } ad->thread_exited = true; @@ -562,12 +750,12 @@ void AudioDriverWASAPI::thread_func(void *p_udata) { void AudioDriverWASAPI::start() { - if (audio_client) { - HRESULT hr = audio_client->Start(); + if (audio_output.audio_client) { + HRESULT hr = audio_output.audio_client->Start(); if (hr != S_OK) { ERR_PRINT("WASAPI: Start failed"); } else { - active = true; + audio_output.active = true; } } } @@ -594,7 +782,8 @@ void AudioDriverWASAPI::finish() { thread = NULL; } - finish_device(); + finish_capture_device(); + finish_render_device(); if (mutex) { memdelete(mutex); @@ -602,30 +791,70 @@ void AudioDriverWASAPI::finish() { } } +Error AudioDriverWASAPI::capture_start() { + + Error err = init_capture_device(); + if (err != OK) { + ERR_PRINT("WASAPI: init_capture_device error"); + return err; + } + + if (audio_input.active == false) { + audio_input.audio_client->Start(); + audio_input.active = true; + + return OK; + } + + return FAILED; +} + +Error AudioDriverWASAPI::capture_stop() { + + if (audio_input.active == true) { + audio_input.audio_client->Stop(); + audio_input.active = false; + + return OK; + } + + return FAILED; +} + +void AudioDriverWASAPI::capture_set_device(const String &p_name) { + + lock(); + audio_input.new_device = p_name; + unlock(); +} + +Array AudioDriverWASAPI::capture_get_device_list() { + + return audio_device_get_list(true); +} + +String AudioDriverWASAPI::capture_get_device() { + + lock(); + String name = audio_input.device_name; + unlock(); + + return name; +} + AudioDriverWASAPI::AudioDriverWASAPI() { - audio_client = NULL; - render_client = NULL; mutex = NULL; thread = NULL; - format_tag = 0; - bits_per_sample = 0; - samples_in.clear(); - buffer_size = 0; channels = 0; - wasapi_channels = 0; mix_rate = 0; buffer_frames = 0; thread_exited = false; exit_thread = false; - active = false; - - device_name = "Default"; - new_device = "Default"; } #endif diff --git a/drivers/wasapi/audio_driver_wasapi.h b/drivers/wasapi/audio_driver_wasapi.h index f3ee5976eb..3d94f3ba49 100644 --- a/drivers/wasapi/audio_driver_wasapi.h +++ b/drivers/wasapi/audio_driver_wasapi.h @@ -43,35 +43,63 @@ class AudioDriverWASAPI : public AudioDriver { - HANDLE event; - IAudioClient *audio_client; - IAudioRenderClient *render_client; + class AudioDeviceWASAPI { + public: + IAudioClient *audio_client; + IAudioRenderClient *render_client; + IAudioCaptureClient *capture_client; + bool active; + + WORD format_tag; + WORD bits_per_sample; + unsigned int channels; + unsigned int frame_size; + + String device_name; + String new_device; + + AudioDeviceWASAPI() { + audio_client = NULL; + render_client = NULL; + capture_client = NULL; + active = false; + format_tag = 0; + bits_per_sample = 0; + channels = 0; + frame_size = 0; + device_name = "Default"; + new_device = "Default"; + } + }; + + AudioDeviceWASAPI audio_input; + AudioDeviceWASAPI audio_output; + Mutex *mutex; Thread *thread; - String device_name; - String new_device; - - WORD format_tag; - WORD bits_per_sample; - Vector<int32_t> samples_in; - unsigned int buffer_size; unsigned int channels; - unsigned int wasapi_channels; int mix_rate; int buffer_frames; bool thread_exited; mutable bool exit_thread; - bool active; - _FORCE_INLINE_ void write_sample(AudioDriverWASAPI *ad, BYTE *buffer, int i, int32_t sample); + static _FORCE_INLINE_ void write_sample(WORD format_tag, int bits_per_sample, BYTE *buffer, int i, int32_t sample); + static _FORCE_INLINE_ int32_t read_sample(WORD format_tag, int bits_per_sample, BYTE *buffer, int i); static void thread_func(void *p_udata); - Error init_device(bool reinit = false); - Error finish_device(); + Error init_render_device(bool reinit = false); + Error init_capture_device(bool reinit = false); + + Error finish_render_device(); + Error finish_capture_device(); + + Error audio_device_init(AudioDeviceWASAPI *p_device, bool p_capture, bool reinit); + Error audio_device_finish(AudioDeviceWASAPI *p_device); + Array audio_device_get_list(bool p_capture); public: virtual const char *get_name() const { @@ -89,6 +117,12 @@ public: virtual void unlock(); virtual void finish(); + virtual Error capture_start(); + virtual Error capture_stop(); + virtual Array capture_get_device_list(); + virtual void capture_set_device(const String &p_name); + virtual String capture_get_device(); + AudioDriverWASAPI(); }; |