From 29343ec0ee0140a7cc4d95290e5ab74025cdba78 Mon Sep 17 00:00:00 2001 From: "tommi@chromium.org" Date: Wed, 5 Mar 2014 15:31:41 +0000 Subject: [PATCH] Fix audio ducking support for the output side on Windows. The input side is now correctly opening up the communication device and we're getting the appropriate ducking behavior whether or not the default communication device also the default console device. What was missing was to also open up the render streams as a communication device for associated output streams (associated to the communication input streams that is). If we don't do that, the communication output streams would also get ducked, thus making the whole exercise futile. BUG=347531 NOTRY=True Review URL: https://codereview.chromium.org/185863002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@255042 0039d316-1c4b-4281-b951-d872f2087c98 --- content/renderer/media/webrtc_audio_renderer.cc | 4 ++-- .../renderer/media/webrtc_local_audio_renderer.cc | 26 +++++++++++----------- media/audio/win/audio_low_latency_output_win.cc | 3 ++- media/audio/win/audio_manager_win.cc | 13 +++++++++-- media/audio/win/core_audio_util_win.cc | 15 ++++++++++++- 5 files changed, 42 insertions(+), 19 deletions(-) diff --git a/content/renderer/media/webrtc_audio_renderer.cc b/content/renderer/media/webrtc_audio_renderer.cc index edc014a1cbb0..c341d0fb3c6b 100644 --- a/content/renderer/media/webrtc_audio_renderer.cc +++ b/content/renderer/media/webrtc_audio_renderer.cc @@ -201,8 +201,8 @@ WebRtcAudioRenderer::WebRtcAudioRenderer( audio_delay_milliseconds_(0), fifo_delay_milliseconds_(0), sink_params_(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, - media::CHANNEL_LAYOUT_STEREO, sample_rate, 16, - frames_per_buffer) { + media::CHANNEL_LAYOUT_STEREO, 0, sample_rate, 16, + frames_per_buffer, media::AudioParameters::DUCKING) { WebRtcLogMessage(base::StringPrintf( "WAR::WAR. source_render_view_id=%d" ", session_id=%d, sample_rate=%d, frames_per_buffer=%d", diff --git a/content/renderer/media/webrtc_local_audio_renderer.cc b/content/renderer/media/webrtc_local_audio_renderer.cc index 6fee864a6d7c..53aad237f873 100644 --- a/content/renderer/media/webrtc_local_audio_renderer.cc +++ b/content/renderer/media/webrtc_local_audio_renderer.cc @@ -102,22 +102,22 @@ void WebRtcLocalAudioRenderer::OnSetFormat( source_params_ = params; - sink_params_.Reset(source_params_.format(), - source_params_.channel_layout(), - source_params_.channels(), - source_params_.input_channels(), - source_params_.sample_rate(), - source_params_.bits_per_sample(), + sink_params_ = media::AudioParameters(source_params_.format(), + source_params_.channel_layout(), source_params_.channels(), + source_params_.input_channels(), source_params_.sample_rate(), + source_params_.bits_per_sample(), #if defined(OS_ANDROID) - // On Android, input and output are using same sampling rate. In order to - // achieve low latency mode, we need use buffer size suggested by - // AudioManager for the sink paramters which will be used to decide - // buffer size for shared memory buffer. - frames_per_buffer_ + // On Android, input and output use the same sample rate. In order to + // use the low latency mode, we need to use the buffer size suggested by + // the AudioManager for the sink. It will later be used to decide + // the buffer size of the shared memory buffer. + frames_per_buffer_, #else - 2 * source_params_.frames_per_buffer() + 2 * source_params_.frames_per_buffer(), #endif - ); + // If DUCKING is enabled on the source, it needs to be enabled on the + // sink as well. + source_params_.effects()); // TODO(henrika): we could add a more dynamic solution here but I prefer // a fixed size combined with bad audio at overflow. The alternative is diff --git a/media/audio/win/audio_low_latency_output_win.cc b/media/audio/win/audio_low_latency_output_win.cc index cd53e1280526..5c3205c9f25f 100644 --- a/media/audio/win/audio_low_latency_output_win.cc +++ b/media/audio/win/audio_low_latency_output_win.cc @@ -130,7 +130,8 @@ bool WASAPIAudioOutputStream::Open() { // Create an IAudioClient interface for the default rendering IMMDevice. ScopedComPtr audio_client; - if (device_id_.empty()) { + if (device_id_.empty() || + CoreAudioUtil::DeviceIsDefault(eRender, device_role_, device_id_)) { audio_client = CoreAudioUtil::CreateDefaultClient(eRender, device_role_); } else { ScopedComPtr device(CoreAudioUtil::CreateDevice(device_id_)); diff --git a/media/audio/win/audio_manager_win.cc b/media/audio/win/audio_manager_win.cc index 3478a21e46c5..00e8b7ac44e1 100644 --- a/media/audio/win/audio_manager_win.cc +++ b/media/audio/win/audio_manager_win.cc @@ -363,7 +363,8 @@ AudioOutputStream* AudioManagerWin::MakeLowLatencyOutputStream( return new WASAPIAudioOutputStream(this, device_id == AudioManagerBase::kDefaultDeviceId ? std::string() : device_id, - params, eConsole); + params, + params.effects() & AudioParameters::DUCKING ? eCommunications : eConsole); } // Factory for the implementations of AudioInputStream for AUDIO_PCM_LINEAR @@ -410,6 +411,7 @@ AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters( int buffer_size = kFallbackBufferSize; int bits_per_sample = 16; int input_channels = 0; + int effects = AudioParameters::NO_EFFECTS; bool use_input_params = !core_audio_supported(); if (core_audio_supported()) { if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio)) { @@ -433,7 +435,13 @@ AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters( buffer_size = params.frames_per_buffer(); channel_layout = params.channel_layout(); sample_rate = params.sample_rate(); + effects = params.effects(); } else { + // TODO(tommi): This should never happen really and I'm not sure that + // setting use_input_params is the right thing to do since WASAPI i + // definitely supported (see core_audio_supported() above) and + // |use_input_params| is only for cases when it isn't supported. + DLOG(ERROR) << "GetPreferredAudioParameters failed: " << std::hex << hr; use_input_params = true; } } @@ -468,6 +476,7 @@ AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters( } } input_channels = input_params.input_channels(); + effects |= input_params.effects(); if (use_input_params) { // If WASAPI isn't supported we'll fallback to WaveOut, which will take // care of resampling and bits per sample changes. By setting these @@ -487,7 +496,7 @@ AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters( return AudioParameters( AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, input_channels, - sample_rate, bits_per_sample, buffer_size, AudioParameters::NO_EFFECTS); + sample_rate, bits_per_sample, buffer_size, effects); } AudioInputStream* AudioManagerWin::CreatePCMWaveInAudioInputStream( diff --git a/media/audio/win/core_audio_util_win.cc b/media/audio/win/core_audio_util_win.cc index cf2c51c2386a..51623955be18 100644 --- a/media/audio/win/core_audio_util_win.cc +++ b/media/audio/win/core_audio_util_win.cc @@ -672,7 +672,20 @@ HRESULT CoreAudioUtil::GetPreferredAudioParameters( // actual error code. The exact value is not important here. return AUDCLNT_E_ENDPOINT_CREATE_FAILED; } - return GetPreferredAudioParameters(client, params); + + HRESULT hr = GetPreferredAudioParameters(client, params); + if (FAILED(hr)) + return hr; + + if (role == eCommunications) { + // Raise the 'DUCKING' flag for default communication devices. + *params = AudioParameters(params->format(), params->channel_layout(), + params->channels(), params->input_channels(), params->sample_rate(), + params->bits_per_sample(), params->frames_per_buffer(), + params->effects() | AudioParameters::DUCKING); + } + + return hr; } HRESULT CoreAudioUtil::GetPreferredAudioParameters( -- 2.11.4.GIT