From 42979149ac67727de85b1e5fc5cf874cf709eb36 Mon Sep 17 00:00:00 2001 From: ajm Date: Mon, 14 Sep 2015 11:41:13 -0700 Subject: [PATCH] Add a virtual beamforming audio device on ChromeOS. AudioManagerCras checks with CrasAudioHandler for an internal mic device with a valid positions field. If present, it adds a virtual beamforming device. When this device is selected by a web app, the mic positions are plumbed up to MediaStreamAudioProcessor via AudioParameters. MSAP will enable beamforming when it sees valid mic positions. See the design doc for background: go/virtual-beamforming-device BUG=497001 TEST=Selecting the beamforming device on a swanky indeed enables beamforming for the internal mic and continues to work fine for an external USB mic. Selecting the non-beamforming device disables beamforming for the internal mic. Review URL: https://codereview.chromium.org/1275783003 Cr-Commit-Position: refs/heads/master@{#348668} --- .../media/audio_input_device_manager.cc | 1 + content/common/media/media_param_traits.cc | 7 +- content/common/media/media_stream_messages.h | 1 + content/public/common/DEPS | 3 + content/public/common/media_stream_request.cc | 14 ++ content/public/common/media_stream_request.h | 23 ++- .../renderer/media/media_stream_audio_processor.cc | 49 ++----- .../renderer/media/media_stream_audio_processor.h | 11 +- .../media/media_stream_audio_processor_options.cc | 58 ++++---- .../media/media_stream_audio_processor_options.h | 14 +- .../media/media_stream_audio_processor_unittest.cc | 135 +++++++++++------ .../media/mock_media_constraint_factory.cc | 2 +- .../renderer/media/mock_media_constraint_factory.h | 2 +- content/renderer/media/webrtc_audio_capturer.cc | 2 +- media/BUILD.gn | 3 + media/audio/BUILD.gn | 7 - media/audio/audio_parameters.cc | 14 +- media/audio/audio_parameters.h | 25 +++- media/audio/audio_parameters_unittest.cc | 5 + media/audio/cras/audio_manager_cras.cc | 87 ++++++++--- media/audio/cras/audio_manager_cras.h | 7 + media/audio/openbsd/audio_manager_openbsd.cc | 160 --------------------- media/audio/openbsd/audio_manager_openbsd.h | 55 ------- media/audio/point.cc | 61 ++++++++ media/audio/point.h | 31 ++++ media/audio/point_unittest.cc | 41 ++++++ media/audio/pulse/audio_manager_pulse.cc | 6 +- media/base/audio_buffer_unittest.cc | 9 +- media/media.gyp | 11 +- media/shared_memory_support.gypi | 2 + ui/gfx/gfx.gyp | 2 + ui/gfx/ipc/gfx_param_traits.cc | 44 ++++-- ui/gfx/ipc/gfx_param_traits.h | 9 ++ 33 files changed, 485 insertions(+), 416 deletions(-) delete mode 100644 media/audio/openbsd/audio_manager_openbsd.cc delete mode 100644 media/audio/openbsd/audio_manager_openbsd.h create mode 100644 media/audio/point.cc create mode 100644 media/audio/point.h create mode 100644 media/audio/point_unittest.cc diff --git a/content/browser/renderer_host/media/audio_input_device_manager.cc b/content/browser/renderer_host/media/audio_input_device_manager.cc index 111dc1e2952f..5b231b055555 100644 --- a/content/browser/renderer_host/media/audio_input_device_manager.cc +++ b/content/browser/renderer_host/media/audio_input_device_manager.cc @@ -213,6 +213,7 @@ void AudioInputDeviceManager::OpenOnDeviceThread( input_params.channel_layout = params.channel_layout(); input_params.frames_per_buffer = params.frames_per_buffer(); input_params.effects = params.effects(); + input_params.mic_positions = params.mic_positions(); // Add preferred output device information if a matching output device // exists. diff --git a/content/common/media/media_param_traits.cc b/content/common/media/media_param_traits.cc index 7b3edcd8528c..5d5020c96544 100644 --- a/content/common/media/media_param_traits.cc +++ b/content/common/media/media_param_traits.cc @@ -9,6 +9,7 @@ #include "content/common/media/video_capture_messages.h" #include "ipc/ipc_message_utils.h" #include "media/audio/audio_parameters.h" +#include "media/audio/point.h" #include "media/base/limits.h" #include "ui/gfx/ipc/gfx_param_traits.h" @@ -27,6 +28,7 @@ void ParamTraits::Write(Message* m, WriteParam(m, p.frames_per_buffer()); WriteParam(m, p.channels()); WriteParam(m, p.effects()); + WriteParam(m, p.mic_positions()); } bool ParamTraits::Read(const Message* m, @@ -35,12 +37,14 @@ bool ParamTraits::Read(const Message* m, AudioParameters::Format format; ChannelLayout channel_layout; int sample_rate, bits_per_sample, frames_per_buffer, channels, effects; + std::vector mic_positions; if (!ReadParam(m, iter, &format) || !ReadParam(m, iter, &channel_layout) || !ReadParam(m, iter, &sample_rate) || !ReadParam(m, iter, &bits_per_sample) || !ReadParam(m, iter, &frames_per_buffer) || - !ReadParam(m, iter, &channels) || !ReadParam(m, iter, &effects)) { + !ReadParam(m, iter, &channels) || !ReadParam(m, iter, &effects) || + !ReadParam(m, iter, &mic_positions)) { return false; } @@ -48,6 +52,7 @@ bool ParamTraits::Read(const Message* m, frames_per_buffer); params.set_channels_for_discrete(channels); params.set_effects(effects); + params.set_mic_positions(mic_positions); *r = params; return r->IsValid(); diff --git a/content/common/media/media_stream_messages.h b/content/common/media/media_stream_messages.h index b66d052a3015..897535530945 100644 --- a/content/common/media/media_stream_messages.h +++ b/content/common/media/media_stream_messages.h @@ -50,6 +50,7 @@ IPC_STRUCT_TRAITS_BEGIN(content::StreamDeviceInfo) IPC_STRUCT_TRAITS_MEMBER(device.input.channel_layout) IPC_STRUCT_TRAITS_MEMBER(device.input.frames_per_buffer) IPC_STRUCT_TRAITS_MEMBER(device.input.effects) + IPC_STRUCT_TRAITS_MEMBER(device.input.mic_positions) IPC_STRUCT_TRAITS_MEMBER(device.matched_output.sample_rate) IPC_STRUCT_TRAITS_MEMBER(device.matched_output.channel_layout) IPC_STRUCT_TRAITS_MEMBER(device.matched_output.frames_per_buffer) diff --git a/content/public/common/DEPS b/content/public/common/DEPS index 6f777b0a6068..55a793f00067 100644 --- a/content/public/common/DEPS +++ b/content/public/common/DEPS @@ -1,5 +1,8 @@ specific_include_rules = { ".*\.cc": [ "+content/common", + ], + "media_stream_request.h": [ + "+media/audio/audio_parameters.h", ] } diff --git a/content/public/common/media_stream_request.cc b/content/public/common/media_stream_request.cc index 0a35bd783bf7..7eadf9a6c133 100644 --- a/content/public/common/media_stream_request.cc +++ b/content/public/common/media_stream_request.cc @@ -83,6 +83,20 @@ const MediaStreamDevice* MediaStreamDevices::FindById( return NULL; } +MediaStreamDevice::AudioDeviceParameters::AudioDeviceParameters() + : sample_rate(), channel_layout(), frames_per_buffer(), effects() {} + +MediaStreamDevice::AudioDeviceParameters::AudioDeviceParameters( + int sample_rate, + int channel_layout, + int frames_per_buffer) + : sample_rate(sample_rate), + channel_layout(channel_layout), + frames_per_buffer(frames_per_buffer), + effects() {} + +MediaStreamDevice::AudioDeviceParameters::~AudioDeviceParameters() {} + MediaStreamRequest::MediaStreamRequest( int render_process_id, int render_frame_id, diff --git a/content/public/common/media_stream_request.h b/content/public/common/media_stream_request.h index b60835ee04b7..8ff838703418 100644 --- a/content/public/common/media_stream_request.h +++ b/content/public/common/media_stream_request.h @@ -13,6 +13,7 @@ #include "base/callback_forward.h" #include "base/memory/scoped_ptr.h" #include "content/common/content_export.h" +#include "media/audio/audio_parameters.h" #include "ui/gfx/native_widget_types.h" #include "url/gurl.h" @@ -125,18 +126,14 @@ struct CONTENT_EXPORT MediaStreamDevice { // Contains properties that match directly with those with the same name // in media::AudioParameters. - struct AudioDeviceParameters { - AudioDeviceParameters() - : sample_rate(), channel_layout(), frames_per_buffer(), effects() { - } - - AudioDeviceParameters(int sample_rate, int channel_layout, - int frames_per_buffer) - : sample_rate(sample_rate), - channel_layout(channel_layout), - frames_per_buffer(frames_per_buffer), - effects() { - } + // TODO(ajm): Remove this type and use media::AudioParameters directly. + struct CONTENT_EXPORT AudioDeviceParameters { + AudioDeviceParameters(); + AudioDeviceParameters(int sample_rate, + int channel_layout, + int frames_per_buffer); + + ~AudioDeviceParameters(); // Preferred sample rate in samples per second for the device. int sample_rate; @@ -154,6 +151,8 @@ struct CONTENT_EXPORT MediaStreamDevice { // See media::AudioParameters::PlatformEffectsMask. int effects; + + std::vector mic_positions; }; // These below two member variables are valid only when the type of device is diff --git a/content/renderer/media/media_stream_audio_processor.cc b/content/renderer/media/media_stream_audio_processor.cc index 168697e20b94..e15e13b143c5 100644 --- a/content/renderer/media/media_stream_audio_processor.cc +++ b/content/renderer/media/media_stream_audio_processor.cc @@ -20,10 +20,6 @@ #include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface.h" #include "third_party/webrtc/modules/audio_processing/typing_detection.h" -#if defined(OS_CHROMEOS) -#include "base/sys_info.h" -#endif - namespace content { namespace { @@ -93,31 +89,6 @@ bool IsBeamformingEnabled(const MediaAudioConstraints& audio_constraints) { audio_constraints.GetProperty(MediaAudioConstraints::kGoogBeamforming); } -void ConfigureBeamforming(webrtc::Config* config, - const std::string& geometry_str) { - std::vector geometry = ParseArrayGeometry(geometry_str); -#if defined(OS_CHROMEOS) - if (geometry.empty()) { - const std::string& board = base::SysInfo::GetLsbReleaseBoard(); - if (board.find("nyan_kitty") != std::string::npos) { - geometry.push_back(webrtc::Point(-0.03f, 0.f, 0.f)); - geometry.push_back(webrtc::Point(0.03f, 0.f, 0.f)); - } else if (board.find("peach_pi") != std::string::npos) { - geometry.push_back(webrtc::Point(-0.025f, 0.f, 0.f)); - geometry.push_back(webrtc::Point(0.025f, 0.f, 0.f)); - } else if (board.find("samus") != std::string::npos) { - geometry.push_back(webrtc::Point(-0.032f, 0.f, 0.f)); - geometry.push_back(webrtc::Point(0.032f, 0.f, 0.f)); - } else if (board.find("swanky") != std::string::npos) { - geometry.push_back(webrtc::Point(-0.026f, 0.f, 0.f)); - geometry.push_back(webrtc::Point(0.026f, 0.f, 0.f)); - } - } -#endif - config->Set( - new webrtc::Beamforming(geometry.size() > 1, geometry)); -} - } // namespace // Wraps AudioBus to provide access to the array of channel pointers, since this @@ -271,7 +242,7 @@ class MediaStreamAudioFifo { MediaStreamAudioProcessor::MediaStreamAudioProcessor( const blink::WebMediaConstraints& constraints, - int effects, + const MediaStreamDevice::AudioDeviceParameters& input_params, WebRtcPlayoutDataSource* playout_data_source) : render_delay_ms_(0), playout_data_source_(playout_data_source), @@ -280,7 +251,7 @@ MediaStreamAudioProcessor::MediaStreamAudioProcessor( stopped_(false) { capture_thread_checker_.DetachFromThread(); render_thread_checker_.DetachFromThread(); - InitializeAudioProcessingModule(constraints, effects); + InitializeAudioProcessingModule(constraints, input_params); aec_dump_message_filter_ = AecDumpMessageFilter::Get(); // In unit tests not creating a message filter, |aec_dump_message_filter_| @@ -455,11 +426,12 @@ void MediaStreamAudioProcessor::GetStats(AudioProcessorStats* stats) { } void MediaStreamAudioProcessor::InitializeAudioProcessingModule( - const blink::WebMediaConstraints& constraints, int effects) { + const blink::WebMediaConstraints& constraints, + const MediaStreamDevice::AudioDeviceParameters& input_params) { DCHECK(main_thread_checker_.CalledOnValidThread()); DCHECK(!audio_processing_); - MediaAudioConstraints audio_constraints(constraints, effects); + MediaAudioConstraints audio_constraints(constraints, input_params.effects); // Audio mirroring can be enabled even though audio processing is otherwise // disabled. @@ -511,9 +483,12 @@ void MediaStreamAudioProcessor::InitializeAudioProcessingModule( if (IsDelayAgnosticAecEnabled()) config.Set(new webrtc::DelayAgnostic(true)); if (goog_beamforming) { - ConfigureBeamforming(&config, - audio_constraints.GetPropertyAsString( - MediaAudioConstraints::kGoogArrayGeometry)); + const auto& geometry = + GetArrayGeometryPreferringConstraints(audio_constraints, input_params); + + // Only enable beamforming if we have at least two mics. + config.Set( + new webrtc::Beamforming(geometry.size() > 1, geometry)); } // Create and configure the webrtc::AudioProcessing. @@ -603,7 +578,7 @@ void MediaStreamAudioProcessor::InitializeCaptureFifo( // 10 ms chunks regardless, while WebAudio sinks want less, and we're assuming // we can identify WebAudio sinks by the input chunk size. Less fragile would // be to have the sink actually tell us how much it wants (as in the above - // TODO). + // todo). int processing_frames = input_format.sample_rate() / 100; int output_frames = output_sample_rate / 100; if (!audio_processing_ && input_format.frames_per_buffer() < output_frames) { diff --git a/content/renderer/media/media_stream_audio_processor.h b/content/renderer/media/media_stream_audio_processor.h index e2033d63ee7e..8d28008ae785 100644 --- a/content/renderer/media/media_stream_audio_processor.h +++ b/content/renderer/media/media_stream_audio_processor.h @@ -11,6 +11,7 @@ #include "base/threading/thread_checker.h" #include "base/time/time.h" #include "content/common/content_export.h" +#include "content/public/common/media_stream_request.h" #include "content/renderer/media/aec_dump_message_filter.h" #include "content/renderer/media/webrtc_audio_device_impl.h" #include "media/base/audio_converter.h" @@ -52,9 +53,10 @@ class CONTENT_EXPORT MediaStreamAudioProcessor : // |playout_data_source| is used to register this class as a sink to the // WebRtc playout data for processing AEC. If clients do not enable AEC, // |playout_data_source| won't be used. - MediaStreamAudioProcessor(const blink::WebMediaConstraints& constraints, - int effects, - WebRtcPlayoutDataSource* playout_data_source); + MediaStreamAudioProcessor( + const blink::WebMediaConstraints& constraints, + const MediaStreamDevice::AudioDeviceParameters& input_params, + WebRtcPlayoutDataSource* playout_data_source); // Called when the format of the capture data has changed. // Called on the main render thread. The caller is responsible for stopping @@ -125,7 +127,8 @@ class CONTENT_EXPORT MediaStreamAudioProcessor : // Helper to initialize the WebRtc AudioProcessing. void InitializeAudioProcessingModule( - const blink::WebMediaConstraints& constraints, int effects); + const blink::WebMediaConstraints& constraints, + const MediaStreamDevice::AudioDeviceParameters& input_params); // Helper to initialize the capture converter. void InitializeCaptureFifo(const media::AudioParameters& input_format); diff --git a/content/renderer/media/media_stream_audio_processor_options.cc b/content/renderer/media/media_stream_audio_processor_options.cc index 5c61a055cc47..8a7056ed3c91 100644 --- a/content/renderer/media/media_stream_audio_processor_options.cc +++ b/content/renderer/media/media_stream_audio_processor_options.cc @@ -65,6 +65,8 @@ struct { { MediaAudioConstraints::kGoogHighpassFilter, true }, { MediaAudioConstraints::kGoogTypingNoiseDetection, true }, { MediaAudioConstraints::kGoogExperimentalNoiseSuppression, false }, + // Beamforming will only be enabled if we are also provided with a + // multi-microphone geometry. { MediaAudioConstraints::kGoogBeamforming, false }, { kMediaStreamAudioHotword, false }, }; @@ -99,6 +101,19 @@ DelayBasedEchoQuality EchoDelayFrequencyToQuality(float delay_frequency) { return DELAY_BASED_ECHO_QUALITY_BAD; } +webrtc::Point WebrtcPointFromMediaPoint(const media::Point& point) { + return webrtc::Point(point.x(), point.y(), point.z()); +} + +std::vector WebrtcPointsFromMediaPoints( + const std::vector& points) { + std::vector webrtc_points; + webrtc_points.reserve(webrtc_points.size()); + for (const auto& point : points) + webrtc_points.push_back(WebrtcPointFromMediaPoint(point)); + return webrtc_points; +} + } // namespace // TODO(xians): Remove this method after the APM in WebRtc is deprecated. @@ -377,36 +392,19 @@ void GetAecStats(webrtc::EchoCancellation* echo_cancellation, } } -CONTENT_EXPORT std::vector ParseArrayGeometry( - const std::string& geometry_string) { - const auto& tokens = - base::SplitString(geometry_string, base::kWhitespaceASCII, - base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY); - std::vector geometry; - if (tokens.size() < 3 || tokens.size() % 3 != 0) { - LOG(ERROR) << "Malformed geometry string: " << geometry_string; - return geometry; - } - - std::vector float_tokens; - float_tokens.reserve(tokens.size()); - for (const auto& token : tokens) { - double float_token; - if (!base::StringToDouble(token, &float_token)) { - LOG(ERROR) << "Unable to convert token=" << token - << " to double from geometry string: " << geometry_string; - return geometry; - } - float_tokens.push_back(float_token); - } - - geometry.reserve(float_tokens.size() / 3); - for (size_t i = 0; i < float_tokens.size(); i += 3) { - geometry.push_back(webrtc::Point(float_tokens[i + 0], float_tokens[i + 1], - float_tokens[i + 2])); - } - - return geometry; +std::vector GetArrayGeometryPreferringConstraints( + const MediaAudioConstraints& audio_constraints, + const MediaStreamDevice::AudioDeviceParameters& input_params) { + const std::string constraints_geometry = + audio_constraints.GetPropertyAsString( + MediaAudioConstraints::kGoogArrayGeometry); + + // Give preference to the audio constraint over the device-supplied mic + // positions. This is mainly for testing purposes. + return WebrtcPointsFromMediaPoints( + constraints_geometry.empty() + ? input_params.mic_positions + : media::ParsePointsFromString(constraints_geometry)); } } // namespace content diff --git a/content/renderer/media/media_stream_audio_processor_options.h b/content/renderer/media/media_stream_audio_processor_options.h index 1d806bf5c008..cf98b34f1887 100644 --- a/content/renderer/media/media_stream_audio_processor_options.h +++ b/content/renderer/media/media_stream_audio_processor_options.h @@ -9,13 +9,13 @@ #include "base/files/file.h" #include "content/common/content_export.h" +#include "content/public/common/media_stream_request.h" #include "third_party/WebKit/public/platform/WebMediaConstraints.h" #include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h" #include "third_party/webrtc/modules/audio_processing/include/audio_processing.h" namespace webrtc { -class AudioFrame; class EchoCancellation; class MediaConstraintsInterface; class TypingDetection; @@ -139,13 +139,11 @@ void EnableAutomaticGainControl(AudioProcessing* audio_processing); void GetAecStats(webrtc::EchoCancellation* echo_cancellation, webrtc::AudioProcessorInterface::AudioProcessorStats* stats); -// Parses the microphone array geometry from |geometry_string| formatted as -// "x1 y1 z1 ... xn yn zn" for an n-microphone array. See -// switches::kMicrophonePositions for more detail. -// -// Returns a zero-sized vector if |geometry_string| isn't a parseable geometry. -CONTENT_EXPORT std::vector ParseArrayGeometry( - const std::string& geometry_string); +// Returns the array geometry from the media constraints if existing and +// otherwise that provided by the input device. +CONTENT_EXPORT std::vector GetArrayGeometryPreferringConstraints( + const MediaAudioConstraints& audio_constraints, + const MediaStreamDevice::AudioDeviceParameters& input_params); } // namespace content diff --git a/content/renderer/media/media_stream_audio_processor_unittest.cc b/content/renderer/media/media_stream_audio_processor_unittest.cc index 0e40ece9e5c4..20eb18124020 100644 --- a/content/renderer/media/media_stream_audio_processor_unittest.cc +++ b/content/renderer/media/media_stream_audio_processor_unittest.cc @@ -26,6 +26,16 @@ using ::testing::AnyNumber; using ::testing::AtLeast; using ::testing::Return; +using media::AudioParameters; + +namespace webrtc { + +bool operator==(const webrtc::Point& lhs, const webrtc::Point& rhs) { + return lhs.x() == rhs.x() && lhs.y() == rhs.y() && lhs.z() == rhs.z(); +} + +} // namespace webrtc + namespace content { namespace { @@ -133,12 +143,12 @@ class MediaStreamAudioProcessorTest : public ::testing::Test { EXPECT_NEAR(input_capture_delay.InMillisecondsF(), capture_delay.InMillisecondsF(), output_buffer_duration.InMillisecondsF()); - EXPECT_EQ(audio_processor->OutputFormat().sample_rate(), - expected_output_sample_rate); - EXPECT_EQ(audio_processor->OutputFormat().channels(), - expected_output_channels); - EXPECT_EQ(audio_processor->OutputFormat().frames_per_buffer(), - expected_output_buffer_size); + EXPECT_EQ(expected_output_sample_rate, + audio_processor->OutputFormat().sample_rate()); + EXPECT_EQ(expected_output_channels, + audio_processor->OutputFormat().channels()); + EXPECT_EQ(expected_output_buffer_size, + audio_processor->OutputFormat().frames_per_buffer()); } data_ptr += params.frames_per_buffer() * params.channels(); @@ -181,6 +191,7 @@ class MediaStreamAudioProcessorTest : public ::testing::Test { } media::AudioParameters params_; + MediaStreamDevice::AudioDeviceParameters input_device_params_; }; // Test crashing with ASAN on Android. crbug.com/468762 @@ -195,7 +206,7 @@ TEST_F(MediaStreamAudioProcessorTest, MAYBE_WithAudioProcessing) { new WebRtcAudioDeviceImpl()); scoped_refptr audio_processor( new rtc::RefCountedObject( - constraint_factory.CreateWebMediaConstraints(), 0, + constraint_factory.CreateWebMediaConstraints(), input_device_params_, webrtc_audio_device.get())); EXPECT_TRUE(audio_processor->has_audio_processing()); audio_processor->OnCaptureFormatChanged(params_); @@ -220,8 +231,8 @@ TEST_F(MediaStreamAudioProcessorTest, VerifyTabCaptureWithoutAudioProcessing) { tab_string); scoped_refptr audio_processor( new rtc::RefCountedObject( - tab_constraint_factory.CreateWebMediaConstraints(), 0, - webrtc_audio_device.get())); + tab_constraint_factory.CreateWebMediaConstraints(), + input_device_params_, webrtc_audio_device.get())); EXPECT_FALSE(audio_processor->has_audio_processing()); audio_processor->OnCaptureFormatChanged(params_); @@ -237,8 +248,8 @@ TEST_F(MediaStreamAudioProcessorTest, VerifyTabCaptureWithoutAudioProcessing) { system_constraint_factory.AddMandatory(kMediaStreamSource, system_string); audio_processor = new rtc::RefCountedObject( - system_constraint_factory.CreateWebMediaConstraints(), 0, - webrtc_audio_device.get()); + system_constraint_factory.CreateWebMediaConstraints(), + input_device_params_, webrtc_audio_device.get()); EXPECT_FALSE(audio_processor->has_audio_processing()); // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives @@ -254,7 +265,7 @@ TEST_F(MediaStreamAudioProcessorTest, TurnOffDefaultConstraints) { new WebRtcAudioDeviceImpl()); scoped_refptr audio_processor( new rtc::RefCountedObject( - constraint_factory.CreateWebMediaConstraints(), 0, + constraint_factory.CreateWebMediaConstraints(), input_device_params_, webrtc_audio_device.get())); EXPECT_FALSE(audio_processor->has_audio_processing()); audio_processor->OnCaptureFormatChanged(params_); @@ -367,6 +378,67 @@ TEST_F(MediaStreamAudioProcessorTest, ValidateConstraints) { EXPECT_FALSE(audio_constraints.IsValid()); } +MediaAudioConstraints MakeMediaAudioConstraints( + const MockMediaConstraintFactory& constraint_factory) { + return MediaAudioConstraints(constraint_factory.CreateWebMediaConstraints(), + AudioParameters::NO_EFFECTS); +} + +TEST_F(MediaStreamAudioProcessorTest, SelectsConstraintsArrayGeometryIfExists) { + std::vector constraints_geometry(1, + webrtc::Point(-0.02f, 0, 0)); + constraints_geometry.push_back(webrtc::Point(0.02f, 0, 0)); + + std::vector input_device_geometry(1, webrtc::Point(0, 0, 0)); + input_device_geometry.push_back(webrtc::Point(0, 0.05f, 0)); + + { + // Both geometries empty. + MockMediaConstraintFactory constraint_factory; + MediaStreamDevice::AudioDeviceParameters input_params; + + const auto& actual_geometry = GetArrayGeometryPreferringConstraints( + MakeMediaAudioConstraints(constraint_factory), input_params); + EXPECT_EQ(std::vector(), actual_geometry); + } + { + // Constraints geometry empty. + MockMediaConstraintFactory constraint_factory; + MediaStreamDevice::AudioDeviceParameters input_params; + input_params.mic_positions.push_back(media::Point(0, 0, 0)); + input_params.mic_positions.push_back(media::Point(0, 0.05f, 0)); + + const auto& actual_geometry = GetArrayGeometryPreferringConstraints( + MakeMediaAudioConstraints(constraint_factory), input_params); + EXPECT_EQ(input_device_geometry, actual_geometry); + } + { + // Input device geometry empty. + MockMediaConstraintFactory constraint_factory; + constraint_factory.AddOptional(MediaAudioConstraints::kGoogArrayGeometry, + std::string("-0.02 0 0 0.02 0 0")); + MediaStreamDevice::AudioDeviceParameters input_params; + + const auto& actual_geometry = GetArrayGeometryPreferringConstraints( + MakeMediaAudioConstraints(constraint_factory), input_params); + EXPECT_EQ(constraints_geometry, actual_geometry); + } + { + // Both geometries existing. + MockMediaConstraintFactory constraint_factory; + constraint_factory.AddOptional(MediaAudioConstraints::kGoogArrayGeometry, + std::string("-0.02 0 0 0.02 0 0")); + MediaStreamDevice::AudioDeviceParameters input_params; + input_params.mic_positions.push_back(media::Point(0, 0, 0)); + input_params.mic_positions.push_back(media::Point(0, 0.05f, 0)); + + // Constraints geometry is preferred. + const auto& actual_geometry = GetArrayGeometryPreferringConstraints( + MakeMediaAudioConstraints(constraint_factory), input_params); + EXPECT_EQ(constraints_geometry, actual_geometry); + } +} + // Test crashing with ASAN on Android. crbug.com/468762 #if defined(OS_ANDROID) && defined(ADDRESS_SANITIZER) #define MAYBE_TestAllSampleRates DISABLED_TestAllSampleRates @@ -379,7 +451,7 @@ TEST_F(MediaStreamAudioProcessorTest, MAYBE_TestAllSampleRates) { new WebRtcAudioDeviceImpl()); scoped_refptr audio_processor( new rtc::RefCountedObject( - constraint_factory.CreateWebMediaConstraints(), 0, + constraint_factory.CreateWebMediaConstraints(), input_device_params_, webrtc_audio_device.get())); EXPECT_TRUE(audio_processor->has_audio_processing()); @@ -420,7 +492,7 @@ TEST_F(MediaStreamAudioProcessorTest, GetAecDumpMessageFilter) { new WebRtcAudioDeviceImpl()); scoped_refptr audio_processor( new rtc::RefCountedObject( - constraint_factory.CreateWebMediaConstraints(), 0, + constraint_factory.CreateWebMediaConstraints(), input_device_params_, webrtc_audio_device.get())); EXPECT_TRUE(audio_processor->aec_dump_message_filter_.get()); @@ -440,7 +512,7 @@ TEST_F(MediaStreamAudioProcessorTest, TestStereoAudio) { new WebRtcAudioDeviceImpl()); scoped_refptr audio_processor( new rtc::RefCountedObject( - constraint_factory.CreateWebMediaConstraints(), 0, + constraint_factory.CreateWebMediaConstraints(), input_device_params_, webrtc_audio_device.get())); EXPECT_FALSE(audio_processor->has_audio_processing()); const media::AudioParameters source_params( @@ -504,7 +576,7 @@ TEST_F(MediaStreamAudioProcessorTest, MAYBE_TestWithKeyboardMicChannel) { new WebRtcAudioDeviceImpl()); scoped_refptr audio_processor( new rtc::RefCountedObject( - constraint_factory.CreateWebMediaConstraints(), 0, + constraint_factory.CreateWebMediaConstraints(), input_device_params_, webrtc_audio_device.get())); EXPECT_TRUE(audio_processor->has_audio_processing()); @@ -522,35 +594,4 @@ TEST_F(MediaStreamAudioProcessorTest, MAYBE_TestWithKeyboardMicChannel) { audio_processor = NULL; } -using Point = webrtc::Point; -using PointVector = std::vector; - -void ExpectPointVectorEqual(const PointVector& expected, - const PointVector& actual) { - EXPECT_EQ(expected.size(), actual.size()); - for (size_t i = 0; i < actual.size(); ++i) { - EXPECT_EQ(expected[i].x(), actual[i].x()); - EXPECT_EQ(expected[i].y(), actual[i].y()); - EXPECT_EQ(expected[i].z(), actual[i].z()); - } -} - -TEST(MediaStreamAudioProcessorOptionsTest, ParseArrayGeometry) { - const PointVector expected_empty; - ExpectPointVectorEqual(expected_empty, ParseArrayGeometry("")); - ExpectPointVectorEqual(expected_empty, ParseArrayGeometry("0 0 a")); - ExpectPointVectorEqual(expected_empty, ParseArrayGeometry("1 2")); - ExpectPointVectorEqual(expected_empty, ParseArrayGeometry("1 2 3 4")); - - { - PointVector expected(1, Point(-0.02f, 0, 0)); - expected.push_back(Point(0.02f, 0, 0)); - ExpectPointVectorEqual(expected, ParseArrayGeometry("-0.02 0 0 0.02 0 0")); - } - { - PointVector expected(1, Point(1, 2, 3)); - ExpectPointVectorEqual(expected, ParseArrayGeometry("1 2 3")); - } -} - } // namespace content diff --git a/content/renderer/media/mock_media_constraint_factory.cc b/content/renderer/media/mock_media_constraint_factory.cc index 11894508cc71..7c5290a58d36 100644 --- a/content/renderer/media/mock_media_constraint_factory.cc +++ b/content/renderer/media/mock_media_constraint_factory.cc @@ -23,7 +23,7 @@ MockMediaConstraintFactory::~MockMediaConstraintFactory() { } blink::WebMediaConstraints -MockMediaConstraintFactory::CreateWebMediaConstraints() { +MockMediaConstraintFactory::CreateWebMediaConstraints() const { blink::WebVector mandatory(mandatory_); blink::WebVector optional(optional_); blink::WebMediaConstraints constraints; diff --git a/content/renderer/media/mock_media_constraint_factory.h b/content/renderer/media/mock_media_constraint_factory.h index 4632dacdfd9e..3ea20bcd0b36 100644 --- a/content/renderer/media/mock_media_constraint_factory.h +++ b/content/renderer/media/mock_media_constraint_factory.h @@ -17,7 +17,7 @@ class MockMediaConstraintFactory { MockMediaConstraintFactory(); ~MockMediaConstraintFactory(); - blink::WebMediaConstraints CreateWebMediaConstraints(); + blink::WebMediaConstraints CreateWebMediaConstraints() const; void AddMandatory(const std::string& key, int value); void AddMandatory(const std::string& key, double value); void AddMandatory(const std::string& key, const std::string& value); diff --git a/content/renderer/media/webrtc_audio_capturer.cc b/content/renderer/media/webrtc_audio_capturer.cc index 9a63564eceb8..f6605ebfacf7 100644 --- a/content/renderer/media/webrtc_audio_capturer.cc +++ b/content/renderer/media/webrtc_audio_capturer.cc @@ -244,7 +244,7 @@ WebRtcAudioCapturer::WebRtcAudioCapturer( : constraints_(constraints), audio_processor_(new rtc::RefCountedObject( constraints, - device_info.device.input.effects, + device_info.device.input, audio_device)), running_(false), render_frame_id_(render_frame_id), diff --git a/media/BUILD.gn b/media/BUILD.gn index 51fb153a530c..78c53102b2c2 100644 --- a/media/BUILD.gn +++ b/media/BUILD.gn @@ -793,6 +793,8 @@ component("shared_memory_support") { sources = [ "audio/audio_parameters.cc", "audio/audio_parameters.h", + "audio/point.cc", + "audio/point.h", "base/audio_bus.cc", "base/audio_bus.h", "base/channel_layout.cc", @@ -808,6 +810,7 @@ component("shared_memory_support") { ] deps = [ "//base", + "//ui/gfx/geometry", ] } diff --git a/media/audio/BUILD.gn b/media/audio/BUILD.gn index 24ef1efefcef..369d7d0a1081 100644 --- a/media/audio/BUILD.gn +++ b/media/audio/BUILD.gn @@ -185,13 +185,6 @@ source_set("audio") { deps += [ "//media/base/android:media_jni_headers" ] } - if (is_openbsd) { - sources += [ - "openbsd/audio_manager_openbsd.cc", - "openbsd/audio_manager_openbsd.h", - ] - } - if (is_linux) { sources += [ "linux/audio_manager_linux.cc" ] } diff --git a/media/audio/audio_parameters.cc b/media/audio/audio_parameters.cc index a78eb84a4cd1..14c32147a4df 100644 --- a/media/audio/audio_parameters.cc +++ b/media/audio/audio_parameters.cc @@ -21,6 +21,11 @@ AudioParameters::AudioParameters(Format format, frames_per_buffer); } +AudioParameters::~AudioParameters() {} + +AudioParameters::AudioParameters(const AudioParameters&) = default; +AudioParameters& AudioParameters::operator=(const AudioParameters&) = default; + void AudioParameters::Reset(Format format, ChannelLayout channel_layout, int sample_rate, @@ -33,6 +38,7 @@ void AudioParameters::Reset(Format format, bits_per_sample_ = bits_per_sample; frames_per_buffer_ = frames_per_buffer; effects_ = NO_EFFECTS; + mic_positions_.clear(); } bool AudioParameters::IsValid() const { @@ -54,7 +60,8 @@ std::string AudioParameters::AsHumanReadableString() const { << " channels: " << channels() << " sample_rate: " << sample_rate() << " bits_per_sample: " << bits_per_sample() << " frames_per_buffer: " << frames_per_buffer() - << " effects: " << effects(); + << " effects: " << effects() + << " mic_positions: " << PointsToString(mic_positions_); return s.str(); } @@ -77,13 +84,12 @@ base::TimeDelta AudioParameters::GetBufferDuration() const { } bool AudioParameters::Equals(const AudioParameters& other) const { - return format_ == other.format() && - sample_rate_ == other.sample_rate() && + return format_ == other.format() && sample_rate_ == other.sample_rate() && channel_layout_ == other.channel_layout() && channels_ == other.channels() && bits_per_sample_ == other.bits_per_sample() && frames_per_buffer_ == other.frames_per_buffer() && - effects_ == other.effects(); + effects_ == other.effects() && mic_positions_ == other.mic_positions_; } } // namespace media diff --git a/media/audio/audio_parameters.h b/media/audio/audio_parameters.h index 57fb960e488f..61ca8121f9e9 100644 --- a/media/audio/audio_parameters.h +++ b/media/audio/audio_parameters.h @@ -11,6 +11,7 @@ #include "base/basictypes.h" #include "base/compiler_specific.h" #include "base/time/time.h" +#include "media/audio/point.h" #include "media/base/audio_bus.h" #include "media/base/channel_layout.h" #include "media/base/media_export.h" @@ -86,6 +87,8 @@ class MEDIA_EXPORT AudioParameters { int bits_per_sample, int frames_per_buffer); + ~AudioParameters(); + // Re-initializes all members. void Reset(Format format, ChannelLayout channel_layout, @@ -148,8 +151,13 @@ class MEDIA_EXPORT AudioParameters { void set_effects(int effects) { effects_ = effects; } int effects() const { return effects_; } - AudioParameters(const AudioParameters&) = default; - AudioParameters& operator=(const AudioParameters&) = default; + void set_mic_positions(const std::vector& mic_positions) { + mic_positions_ = mic_positions; + } + const std::vector& mic_positions() const { return mic_positions_; } + + AudioParameters(const AudioParameters&); + AudioParameters& operator=(const AudioParameters&); private: Format format_; // Format of the stream. @@ -160,6 +168,19 @@ class MEDIA_EXPORT AudioParameters { int bits_per_sample_; // Number of bits per sample. int frames_per_buffer_; // Number of frames in a buffer. int effects_; // Bitmask using PlatformEffectsMask. + + // Microphone positions using Cartesian coordinates: + // x: the horizontal dimension, with positive to the right from the camera's + // perspective. + // y: the depth dimension, with positive forward from the camera's + // perspective. + // z: the vertical dimension, with positive upwards. + // + // Usually, the center of the microphone array will be treated as the origin + // (often the position of the camera). + // + // An empty vector indicates unknown positions. + std::vector mic_positions_; }; // Comparison is useful when AudioParameters is used with std structures. diff --git a/media/audio/audio_parameters_unittest.cc b/media/audio/audio_parameters_unittest.cc index 2cf541d14c38..c39e8d4915dc 100644 --- a/media/audio/audio_parameters_unittest.cc +++ b/media/audio/audio_parameters_unittest.cc @@ -16,6 +16,9 @@ TEST(AudioParameters, Constructor_Default) { ChannelLayout expected_channel_layout = CHANNEL_LAYOUT_NONE; int expected_rate = 0; int expected_samples = 0; + AudioParameters::PlatformEffectsMask expected_effects = + AudioParameters::NO_EFFECTS; + std::vector expected_mic_positions; AudioParameters params; @@ -25,6 +28,8 @@ TEST(AudioParameters, Constructor_Default) { EXPECT_EQ(expected_channel_layout, params.channel_layout()); EXPECT_EQ(expected_rate, params.sample_rate()); EXPECT_EQ(expected_samples, params.frames_per_buffer()); + EXPECT_EQ(expected_effects, params.effects()); + EXPECT_EQ(expected_mic_positions, params.mic_positions()); } TEST(AudioParameters, Constructor_ParameterValues) { diff --git a/media/audio/cras/audio_manager_cras.cc b/media/audio/cras/audio_manager_cras.cc index 31a178033009..2b47de40cc9f 100644 --- a/media/audio/cras/audio_manager_cras.cc +++ b/media/audio/cras/audio_manager_cras.cc @@ -23,28 +23,66 @@ #undef max namespace media { +namespace { -static void AddDefaultDevice(AudioDeviceNames* device_names) { - DCHECK(device_names->empty()); +// Maximum number of output streams that can be open simultaneously. +const int kMaxOutputStreams = 50; + +// Default sample rate for input and output streams. +const int kDefaultSampleRate = 48000; +// Define bounds for the output buffer size. +const int kMinimumOutputBufferSize = 512; +const int kMaximumOutputBufferSize = 8192; + +// Default input buffer size. +const int kDefaultInputBufferSize = 1024; + +void AddDefaultDevice(AudioDeviceNames* device_names) { // Cras will route audio from a proper physical device automatically. device_names->push_back( AudioDeviceName(AudioManagerBase::kDefaultDeviceName, AudioManagerBase::kDefaultDeviceId)); } -// Maximum number of output streams that can be open simultaneously. -static const int kMaxOutputStreams = 50; +// Returns the AudioDeviceName of the virtual device with beamforming on. +AudioDeviceName BeamformingOnDeviceName() { + // TODO(ajm): Replace these strings with properly localized ones. + // (crbug.com/497001) + static const char kBeamformingOnNameSuffix[] = " (pick up just one person)"; + static const char kBeamformingOnIdSuffix[] = "-beamforming"; -// Default sample rate for input and output streams. -static const int kDefaultSampleRate = 48000; + return AudioDeviceName( + std::string(AudioManagerBase::kDefaultDeviceName) + + kBeamformingOnNameSuffix, + std::string(AudioManagerBase::kDefaultDeviceId) + kBeamformingOnIdSuffix); +} -// Define bounds for the output buffer size. -static const int kMinimumOutputBufferSize = 512; -static const int kMaximumOutputBufferSize = 8192; +// Returns the AudioDeviceName of the virtual device with beamforming off. +AudioDeviceName BeamformingOffDeviceName() { + static const char kBeamformingOffNameSuffix[] = " (pick up everything)"; + return AudioDeviceName(std::string(AudioManagerBase::kDefaultDeviceName) + + kBeamformingOffNameSuffix, + AudioManagerBase::kDefaultDeviceId); +} -// Default input buffer size. -static const int kDefaultInputBufferSize = 1024; +// Returns a mic positions string if the machine has a beamforming capable +// internal mic and otherwise an empty string. +std::string MicPositions() { + // Get the list of devices from CRAS. An internal mic with a non-empty + // positions field indicates the machine has a beamforming capable mic array. + chromeos::AudioDeviceList devices; + chromeos::CrasAudioHandler::Get()->GetAudioDevices(&devices); + for (const auto& device : devices) { + if (device.type == chromeos::AUDIO_TYPE_INTERNAL_MIC) { + // There should be only one internal mic device. + return device.mic_positions; + } + } + return ""; +} + +} // namespace bool AudioManagerCras::HasAudioOutputDevices() { return true; @@ -62,7 +100,9 @@ bool AudioManagerCras::HasAudioInputDevices() { AudioManagerCras::AudioManagerCras(AudioLogFactory* audio_log_factory) : AudioManagerBase(audio_log_factory), - has_keyboard_mic_(false) { + has_keyboard_mic_(false), + beamforming_on_device_name_(BeamformingOnDeviceName()), + beamforming_off_device_name_(BeamformingOffDeviceName()) { SetMaxOutputStreamsAllowed(kMaxOutputStreams); } @@ -76,11 +116,24 @@ void AudioManagerCras::ShowAudioInputSettings() { void AudioManagerCras::GetAudioInputDeviceNames( AudioDeviceNames* device_names) { - AddDefaultDevice(device_names); + DCHECK(device_names->empty()); + + mic_positions_ = ParsePointsFromString(MicPositions()); + // At least two mic positions indicates we have a beamforming capable mic + // array. Add the virtual beamforming device to the list. When this device is + // queried through GetInputStreamParameters, provide the cached mic positions. + if (mic_positions_.size() > 1) { + device_names->push_back(beamforming_on_device_name_); + device_names->push_back(beamforming_off_device_name_); + } else { + AddDefaultDevice(device_names); + } } void AudioManagerCras::GetAudioOutputDeviceNames( AudioDeviceNames* device_names) { + DCHECK(device_names->empty()); + AddDefaultDevice(device_names); } @@ -91,16 +144,16 @@ AudioParameters AudioManagerCras::GetInputStreamParameters( int user_buffer_size = GetUserBufferSize(); int buffer_size = user_buffer_size ? user_buffer_size : kDefaultInputBufferSize; - AudioParameters::PlatformEffectsMask effects = - has_keyboard_mic_ ? AudioParameters::KEYBOARD_MIC - : AudioParameters::NO_EFFECTS; // TODO(hshi): Fine-tune audio parameters based on |device_id|. The optimal // parameters for the loopback stream may differ from the default. AudioParameters params(AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO, kDefaultSampleRate, 16, buffer_size); - params.set_effects(effects); + if (has_keyboard_mic_) + params.set_effects(AudioParameters::KEYBOARD_MIC); + if (device_id == beamforming_on_device_name_.unique_id) + params.set_mic_positions(mic_positions_); return params; } diff --git a/media/audio/cras/audio_manager_cras.h b/media/audio/cras/audio_manager_cras.h index 4c8f992ee05a..d55756dc3f2a 100644 --- a/media/audio/cras/audio_manager_cras.h +++ b/media/audio/cras/audio_manager_cras.h @@ -61,6 +61,13 @@ class MEDIA_EXPORT AudioManagerCras : public AudioManagerBase { bool has_keyboard_mic_; + // Holds the name and ID of the virtual beamforming devices. + const AudioDeviceName beamforming_on_device_name_; + const AudioDeviceName beamforming_off_device_name_; + + // Stores the mic positions field from the device. + std::vector mic_positions_; + DISALLOW_COPY_AND_ASSIGN(AudioManagerCras); }; diff --git a/media/audio/openbsd/audio_manager_openbsd.cc b/media/audio/openbsd/audio_manager_openbsd.cc deleted file mode 100644 index 618fb0caab83..000000000000 --- a/media/audio/openbsd/audio_manager_openbsd.cc +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright (c) 2012 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "media/audio/openbsd/audio_manager_openbsd.h" - -#include - -#include "base/command_line.h" -#include "base/file_path.h" -#include "base/stl_util.h" -#include "media/audio/audio_output_dispatcher.h" -#include "media/audio/audio_parameters.h" -#include "media/audio/pulse/pulse_output.h" -#include "media/audio/pulse/pulse_stubs.h" -#include "media/base/channel_layout.h" -#include "media/base/limits.h" -#include "media/base/media_switches.h" - -using media_audio_pulse::kModulePulse; -using media_audio_pulse::InitializeStubs; -using media_audio_pulse::StubPathMap; - -namespace media { - -// Maximum number of output streams that can be open simultaneously. -static const int kMaxOutputStreams = 50; - -// Default sample rate for input and output streams. -static const int kDefaultSampleRate = 48000; - -static const base::FilePath::CharType kPulseLib[] = - FILE_PATH_LITERAL("libpulse.so.0"); - -// Implementation of AudioManager. -static bool HasAudioHardware() { - int fd; - const char *file; - - if ((file = getenv("AUDIOCTLDEVICE")) == 0 || *file == '\0') - file = "/dev/audioctl"; - - if ((fd = open(file, O_RDONLY)) < 0) - return false; - - close(fd); - return true; -} - -bool AudioManagerOpenBSD::HasAudioOutputDevices() { - return HasAudioHardware(); -} - -bool AudioManagerOpenBSD::HasAudioInputDevices() { - return HasAudioHardware(); -} - -AudioParameters AudioManagerOpenBSD::GetInputStreamParameters( - const std::string& device_id) { - static const int kDefaultInputBufferSize = 1024; - - int user_buffer_size = GetUserBufferSize(); - int buffer_size = user_buffer_size ? - user_buffer_size : kDefaultInputBufferSize; - - return AudioParameters( - AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO, - kDefaultSampleRate, 16, buffer_size); -} - -AudioManagerOpenBSD::AudioManagerOpenBSD(AudioLogFactory* audio_log_factory) - : AudioManagerBase(audio_log_factory), - pulse_library_is_initialized_(false) { - SetMaxOutputStreamsAllowed(kMaxOutputStreams); - StubPathMap paths; - - // Check if the pulse library is avialbale. - paths[kModulePulse].push_back(kPulseLib); - if (!InitializeStubs(paths)) { - DLOG(WARNING) << "Failed on loading the Pulse library and symbols"; - return; - } - - pulse_library_is_initialized_ = true; -} - -AudioManagerOpenBSD::~AudioManagerOpenBSD() { - Shutdown(); -} - -AudioOutputStream* AudioManagerOpenBSD::MakeLinearOutputStream( - const AudioParameters& params) { - DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format); - return MakeOutputStream(params); -} - -AudioOutputStream* AudioManagerOpenBSD::MakeLowLatencyOutputStream( - const AudioParameters& params, - const std::string& device_id) { - DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!"; - DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format); - return MakeOutputStream(params); -} - -AudioInputStream* AudioManagerOpenBSD::MakeLinearInputStream( - const AudioParameters& params, const std::string& device_id) { - DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format); - NOTIMPLEMENTED(); - return NULL; -} - -AudioInputStream* AudioManagerOpenBSD::MakeLowLatencyInputStream( - const AudioParameters& params, const std::string& device_id) { - DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format); - NOTIMPLEMENTED(); - return NULL; -} - -AudioParameters AudioManagerOpenBSD::GetPreferredOutputStreamParameters( - const std::string& output_device_id, - const AudioParameters& input_params) { - // TODO(tommi): Support |output_device_id|. - DLOG_IF(ERROR, !output_device_id.empty()) << "Not implemented!"; - static const int kDefaultOutputBufferSize = 512; - - ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO; - int sample_rate = kDefaultSampleRate; - int buffer_size = kDefaultOutputBufferSize; - int bits_per_sample = 16; - if (input_params.IsValid()) { - sample_rate = input_params.sample_rate(); - bits_per_sample = input_params.bits_per_sample(); - channel_layout = input_params.channel_layout(); - buffer_size = std::min(buffer_size, input_params.frames_per_buffer()); - } - - int user_buffer_size = GetUserBufferSize(); - if (user_buffer_size) - buffer_size = user_buffer_size; - - return AudioParameters( - AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, - sample_rate, bits_per_sample, buffer_size, AudioParameters::NO_EFFECTS); -} - -AudioOutputStream* AudioManagerOpenBSD::MakeOutputStream( - const AudioParameters& params) { - if (pulse_library_is_initialized_) - return new PulseAudioOutputStream(params, this); - - return NULL; -} - -// TODO(xians): Merge AudioManagerOpenBSD with AudioManagerPulse; -// static -AudioManager* CreateAudioManager(AudioLogFactory* audio_log_factory) { - return new AudioManagerOpenBSD(audio_log_factory); -} - -} // namespace media diff --git a/media/audio/openbsd/audio_manager_openbsd.h b/media/audio/openbsd/audio_manager_openbsd.h deleted file mode 100644 index 3326952bb04b..000000000000 --- a/media/audio/openbsd/audio_manager_openbsd.h +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2012 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef MEDIA_AUDIO_OPENBSD_AUDIO_MANAGER_OPENBSD_H_ -#define MEDIA_AUDIO_OPENBSD_AUDIO_MANAGER_OPENBSD_H_ - -#include - -#include "base/compiler_specific.h" -#include "media/audio/audio_manager_base.h" - -namespace media { - -class MEDIA_EXPORT AudioManagerOpenBSD : public AudioManagerBase { - public: - AudioManagerOpenBSD(AudioLogFactory* audio_log_factory); - - // Implementation of AudioManager. - bool HasAudioOutputDevices() override; - bool HasAudioInputDevices() override; - AudioParameters GetInputStreamParameters( - const std::string& device_id) override; - - // Implementation of AudioManagerBase. - AudioOutputStream* MakeLinearOutputStream( - const AudioParameters& params) override; - AudioOutputStream* MakeLowLatencyOutputStream( - const AudioParameters& params, - const std::string& device_id) override; - AudioInputStream* MakeLinearInputStream( - const AudioParameters& params, const std::string& device_id) override; - AudioInputStream* MakeLowLatencyInputStream( - const AudioParameters& params, const std::string& device_id) override; - - protected: - ~AudioManagerOpenBSD() override; - - AudioParameters GetPreferredOutputStreamParameters( - const std::string& output_device_id, - const AudioParameters& input_params) override; - - private: - // Called by MakeLinearOutputStream and MakeLowLatencyOutputStream. - AudioOutputStream* MakeOutputStream(const AudioParameters& params); - - // Flag to indicate whether the pulse library has been initialized or not. - bool pulse_library_is_initialized_; - - DISALLOW_COPY_AND_ASSIGN(AudioManagerOpenBSD); -}; - -} // namespace media - -#endif // MEDIA_AUDIO_OPENBSD_AUDIO_MANAGER_OPENBSD_H_ diff --git a/media/audio/point.cc b/media/audio/point.cc new file mode 100644 index 000000000000..3246089fd137 --- /dev/null +++ b/media/audio/point.cc @@ -0,0 +1,61 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "media/audio/point.h" + +#include "base/logging.h" +#include "base/strings/string_number_conversions.h" +#include "base/strings/string_split.h" +#include "base/strings/string_util.h" +#include "base/strings/stringprintf.h" + +namespace media { + +std::string PointsToString(const std::vector& points) { + std::string points_string; + if (!points.empty()) { + for (size_t i = 0; i < points.size() - 1; ++i) { + points_string.append(points[i].ToString()); + points_string.append(", "); + } + points_string.append(points.back().ToString()); + } + return points_string; +} + +std::vector ParsePointsFromString(const std::string& points_string) { + std::vector points; + if (points_string.empty()) + return points; + + const auto& tokens = + base::SplitString(points_string, base::kWhitespaceASCII, + base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY); + if (tokens.size() < 3 || tokens.size() % 3 != 0) { + LOG(ERROR) << "Malformed points string: " << points_string; + return points; + } + + std::vector float_tokens; + float_tokens.reserve(tokens.size()); + for (const auto& token : tokens) { + double float_token; + if (!base::StringToDouble(token, &float_token)) { + LOG(ERROR) << "Unable to convert token=" << token + << " to double from points string: " << points_string; + return points; + } + float_tokens.push_back(float_token); + } + + points.reserve(float_tokens.size() / 3); + for (size_t i = 0; i < float_tokens.size(); i += 3) { + points.push_back( + Point(float_tokens[i + 0], float_tokens[i + 1], float_tokens[i + 2])); + } + + return points; +} + +} // namespace media diff --git a/media/audio/point.h b/media/audio/point.h new file mode 100644 index 000000000000..d215a5991195 --- /dev/null +++ b/media/audio/point.h @@ -0,0 +1,31 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef MEDIA_AUDIO_POINT_H_ +#define MEDIA_AUDIO_POINT_H_ + +#include +#include + +#include "media/base/media_export.h" +#include "ui/gfx/geometry/point3_f.h" + +namespace media { + +using Point = gfx::Point3F; + +// Returns a vector of points parsed from a whitespace-separated string +// formatted as: "x1 y1 z1 ... zn yn zn" for n points. +// +// Returns an empty vector if |points_string| is empty or isn't parseable. +MEDIA_EXPORT std::vector ParsePointsFromString( + const std::string& points_string); + +// Returns |points| as a human-readable string. (Not necessarily in the format +// required by ParsePointsFromString). +MEDIA_EXPORT std::string PointsToString(const std::vector& points); + +} // namespace media + +#endif // MEDIA_AUDIO_POINT_H_ diff --git a/media/audio/point_unittest.cc b/media/audio/point_unittest.cc new file mode 100644 index 000000000000..98aec6493069 --- /dev/null +++ b/media/audio/point_unittest.cc @@ -0,0 +1,41 @@ +// Copyright (c) 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include + +#include "media/audio/point.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace media { +namespace { + +TEST(PointTest, PointsToString) { + std::vector points(1, Point(1, 0, 0.01f)); + points.push_back(Point(0, 2, 0.02f)); + EXPECT_EQ("1.000000,0.000000,0.010000, 0.000000,2.000000,0.020000", + PointsToString(points)); + + EXPECT_EQ("", PointsToString(std::vector())); +} + +TEST(PointTest, ParsePointString) { + const std::vector expected_empty; + EXPECT_EQ(expected_empty, ParsePointsFromString("")); + EXPECT_EQ(expected_empty, ParsePointsFromString("0 0 a")); + EXPECT_EQ(expected_empty, ParsePointsFromString("1 2")); + EXPECT_EQ(expected_empty, ParsePointsFromString("1 2 3 4")); + + { + std::vector expected(1, Point(-0.02f, 0, 0)); + expected.push_back(Point(0.02f, 0, 0)); + EXPECT_EQ(expected, ParsePointsFromString("-0.02 0 0 0.02 0 0")); + } + { + std::vector expected(1, Point(1, 2, 3)); + EXPECT_EQ(expected, ParsePointsFromString("1 2 3")); + } +} + +} // namespace +} // namespace media diff --git a/media/audio/pulse/audio_manager_pulse.cc b/media/audio/pulse/audio_manager_pulse.cc index 3044f3c34c5b..f3d8832635b1 100644 --- a/media/audio/pulse/audio_manager_pulse.cc +++ b/media/audio/pulse/audio_manager_pulse.cc @@ -135,9 +135,9 @@ AudioParameters AudioManagerPulse::GetInputStreamParameters( user_buffer_size : kDefaultInputBufferSize; // TODO(xians): add support for querying native channel layout for pulse. - return AudioParameters( - AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO, - GetNativeSampleRate(), 16, buffer_size); + return AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, + CHANNEL_LAYOUT_STEREO, GetNativeSampleRate(), 16, + buffer_size); } AudioOutputStream* AudioManagerPulse::MakeLinearOutputStream( diff --git a/media/base/audio_buffer_unittest.cc b/media/base/audio_buffer_unittest.cc index 43c763e848f0..b48220df528d 100644 --- a/media/base/audio_buffer_unittest.cc +++ b/media/base/audio_buffer_unittest.cc @@ -202,13 +202,8 @@ TEST(AudioBufferTest, FrameSize) { kTimestamp); EXPECT_EQ(16, buffer->frame_count()); // 2 channels of 8-bit data - buffer = AudioBuffer::CopyFrom(kSampleFormatF32, - CHANNEL_LAYOUT_4_0, - 4, - kSampleRate, - 2, - data, - kTimestamp); + buffer = AudioBuffer::CopyFrom(kSampleFormatF32, CHANNEL_LAYOUT_4_0, 4, + kSampleRate, 2, data, kTimestamp); EXPECT_EQ(2, buffer->frame_count()); // now 4 channels of 32-bit data } diff --git a/media/media.gyp b/media/media.gyp index 102117e7039d..21b9ececc342 100644 --- a/media/media.gyp +++ b/media/media.gyp @@ -161,8 +161,6 @@ 'audio/mac/audio_manager_mac.h', 'audio/null_audio_sink.cc', 'audio/null_audio_sink.h', - 'audio/openbsd/audio_manager_openbsd.cc', - 'audio/openbsd/audio_manager_openbsd.h', 'audio/pulse/audio_manager_pulse.cc', 'audio/pulse/audio_manager_pulse.h', 'audio/pulse/pulse_input.cc', @@ -780,12 +778,7 @@ ['exclude', '_alsa\\.(h|cc)$'], ], }], - ['OS!="openbsd"', { - 'sources!': [ - 'audio/openbsd/audio_manager_openbsd.cc', - 'audio/openbsd/audio_manager_openbsd.h', - ], - }, { # else: openbsd==1 + ['OS=="openbsd"', { 'sources!': [ 'capture/video/linux/v4l2_capture_delegate_multi_plane.cc', 'capture/video/linux/v4l2_capture_delegate_multi_plane.h', @@ -1502,6 +1495,7 @@ 'audio/audio_parameters_unittest.cc', 'audio/audio_power_monitor_unittest.cc', 'audio/fake_audio_worker_unittest.cc', + 'audio/point_unittest.cc', 'audio/simple_sources_unittest.cc', 'audio/virtual_audio_input_stream_unittest.cc', 'audio/virtual_audio_output_stream_unittest.cc', @@ -1651,6 +1645,7 @@ 'type': '<(component)', 'dependencies': [ '../base/base.gyp:base', + '../ui/gfx/gfx.gyp:gfx_geometry', ], 'defines': [ 'MEDIA_IMPLEMENTATION', diff --git a/media/shared_memory_support.gypi b/media/shared_memory_support.gypi index 65403f748d86..721f6aa4c497 100644 --- a/media/shared_memory_support.gypi +++ b/media/shared_memory_support.gypi @@ -10,6 +10,8 @@ 'shared_memory_support_sources': [ 'audio/audio_parameters.cc', 'audio/audio_parameters.h', + 'audio/point.cc', + 'audio/point.h', 'base/audio_bus.cc', 'base/audio_bus.h', 'base/channel_layout.cc', diff --git a/ui/gfx/gfx.gyp b/ui/gfx/gfx.gyp index c6bb8a5f9674..a9fa3f559bae 100644 --- a/ui/gfx/gfx.gyp +++ b/ui/gfx/gfx.gyp @@ -527,6 +527,8 @@ '../../base/base.gyp:base', '../../skia/skia.gyp:skia', '../../testing/gtest.gyp:gtest', + 'gfx', + 'gfx_geometry', ], 'conditions': [ ['OS == "mac"', { diff --git a/ui/gfx/ipc/gfx_param_traits.cc b/ui/gfx/ipc/gfx_param_traits.cc index 78c024f234e3..3013b0c8cc9b 100644 --- a/ui/gfx/ipc/gfx_param_traits.cc +++ b/ui/gfx/ipc/gfx_param_traits.cc @@ -7,6 +7,7 @@ #include #include "third_party/skia/include/core/SkBitmap.h" +#include "ui/gfx/geometry/point3_f.h" #include "ui/gfx/geometry/rect.h" #include "ui/gfx/geometry/rect_f.h" #include "ui/gfx/range/range.h" @@ -53,16 +54,15 @@ struct SkBitmap_Data { namespace IPC { void ParamTraits::Write(Message* m, const gfx::Point& p) { - m->WriteInt(p.x()); - m->WriteInt(p.y()); + WriteParam(m, p.x()); + WriteParam(m, p.y()); } bool ParamTraits::Read(const Message* m, base::PickleIterator* iter, gfx::Point* r) { int x, y; - if (!iter->ReadInt(&x) || - !iter->ReadInt(&y)) + if (!ReadParam(m, iter, &x) || !ReadParam(m, iter, &y)) return false; r->set_x(x); r->set_y(y); @@ -73,25 +73,47 @@ void ParamTraits::Log(const gfx::Point& p, std::string* l) { l->append(base::StringPrintf("(%d, %d)", p.x(), p.y())); } -void ParamTraits::Write(Message* m, const gfx::PointF& v) { - ParamTraits::Write(m, v.x()); - ParamTraits::Write(m, v.y()); +void ParamTraits::Write(Message* m, const gfx::PointF& p) { + WriteParam(m, p.x()); + WriteParam(m, p.y()); } bool ParamTraits::Read(const Message* m, base::PickleIterator* iter, gfx::PointF* r) { float x, y; - if (!ParamTraits::Read(m, iter, &x) || - !ParamTraits::Read(m, iter, &y)) + if (!ReadParam(m, iter, &x) || !ReadParam(m, iter, &y)) return false; r->set_x(x); r->set_y(y); return true; } -void ParamTraits::Log(const gfx::PointF& v, std::string* l) { - l->append(base::StringPrintf("(%f, %f)", v.x(), v.y())); +void ParamTraits::Log(const gfx::PointF& p, std::string* l) { + l->append(base::StringPrintf("(%f, %f)", p.x(), p.y())); +} + +void ParamTraits::Write(Message* m, const gfx::Point3F& p) { + WriteParam(m, p.x()); + WriteParam(m, p.y()); + WriteParam(m, p.z()); +} + +bool ParamTraits::Read(const Message* m, + base::PickleIterator* iter, + gfx::Point3F* r) { + float x, y, z; + if (!ReadParam(m, iter, &x) || !ReadParam(m, iter, &y) || + !ReadParam(m, iter, &z)) + return false; + r->set_x(x); + r->set_y(y); + r->set_z(z); + return true; +} + +void ParamTraits::Log(const gfx::Point3F& p, std::string* l) { + l->append(base::StringPrintf("(%f, %f, %f)", p.x(), p.y(), p.z())); } void ParamTraits::Write(Message* m, const gfx::Size& p) { diff --git a/ui/gfx/ipc/gfx_param_traits.h b/ui/gfx/ipc/gfx_param_traits.h index 5bdd1d37e2ab..77aa120600fb 100644 --- a/ui/gfx/ipc/gfx_param_traits.h +++ b/ui/gfx/ipc/gfx_param_traits.h @@ -18,6 +18,7 @@ class SkBitmap; namespace gfx { class Point; class PointF; +class Point3F; class Range; class Rect; class RectF; @@ -46,6 +47,14 @@ struct GFX_IPC_EXPORT ParamTraits { }; template <> +struct GFX_IPC_EXPORT ParamTraits { + typedef gfx::Point3F param_type; + static void Write(Message* m, const param_type& p); + static bool Read(const Message* m, base::PickleIterator* iter, param_type* r); + static void Log(const param_type& p, std::string* l); +}; + +template <> struct GFX_IPC_EXPORT ParamTraits { typedef gfx::Size param_type; static void Write(Message* m, const param_type& p); -- 2.11.4.GIT