blob: 73b3f2709b501d7054bf6b9c0ef4142f3594f131 [file] [log] [blame]
niklase@google.com470e71d2011-07-07 08:21:25 +00001/*
andrew@webrtc.org40654032012-01-30 20:51:15 +00002 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
niklase@google.com470e71d2011-07-07 08:21:25 +00003 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000011#include "webrtc/modules/audio_processing/audio_processing_impl.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000012
ajm@google.com808e0e02011-08-03 21:08:51 +000013#include <assert.h>
niklase@google.com470e71d2011-07-07 08:21:25 +000014
xians@webrtc.orge46bc772014-10-10 08:36:56 +000015#include "webrtc/base/platform_file.h"
andrew@webrtc.org17e40642014-03-04 20:58:13 +000016#include "webrtc/common_audio/include/audio_util.h"
andrew@webrtc.org60730cf2014-01-07 17:45:09 +000017#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
pbos@webrtc.org788acd12014-12-15 09:41:24 +000018#include "webrtc/modules/audio_processing/agc/agc_manager_direct.h"
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000019#include "webrtc/modules/audio_processing/audio_buffer.h"
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +000020#include "webrtc/modules/audio_processing/beamformer/beamformer.h"
aluebs@webrtc.org87893762014-11-27 23:40:25 +000021#include "webrtc/modules/audio_processing/channel_buffer.h"
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000022#include "webrtc/modules/audio_processing/common.h"
andrew@webrtc.org56e4a052014-02-27 22:23:17 +000023#include "webrtc/modules/audio_processing/echo_cancellation_impl.h"
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000024#include "webrtc/modules/audio_processing/echo_control_mobile_impl.h"
25#include "webrtc/modules/audio_processing/gain_control_impl.h"
26#include "webrtc/modules/audio_processing/high_pass_filter_impl.h"
27#include "webrtc/modules/audio_processing/level_estimator_impl.h"
28#include "webrtc/modules/audio_processing/noise_suppression_impl.h"
29#include "webrtc/modules/audio_processing/processing_component.h"
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +000030#include "webrtc/modules/audio_processing/transient/transient_suppressor.h"
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000031#include "webrtc/modules/audio_processing/voice_detection_impl.h"
32#include "webrtc/modules/interface/module_common_types.h"
andrew@webrtc.org60730cf2014-01-07 17:45:09 +000033#include "webrtc/system_wrappers/interface/compile_assert.h"
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000034#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
35#include "webrtc/system_wrappers/interface/file_wrapper.h"
36#include "webrtc/system_wrappers/interface/logging.h"
andrew@webrtc.org7bf26462011-12-03 00:03:31 +000037
38#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
39// Files generated at build-time by the protobuf compiler.
leozwang@webrtc.orga3736342012-03-16 21:36:00 +000040#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
leozwang@webrtc.org534e4952012-10-22 21:21:52 +000041#include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h"
leozwang@google.comce9bfbb2011-08-03 23:34:31 +000042#else
ajm@google.com808e0e02011-08-03 21:08:51 +000043#include "webrtc/audio_processing/debug.pb.h"
leozwang@google.comce9bfbb2011-08-03 23:34:31 +000044#endif
andrew@webrtc.org7bf26462011-12-03 00:03:31 +000045#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +000046
andrew@webrtc.org60730cf2014-01-07 17:45:09 +000047#define RETURN_ON_ERR(expr) \
48 do { \
49 int err = expr; \
50 if (err != kNoError) { \
51 return err; \
52 } \
53 } while (0)
54
niklase@google.com470e71d2011-07-07 08:21:25 +000055namespace webrtc {
andrew@webrtc.org60730cf2014-01-07 17:45:09 +000056
57// Throughout webrtc, it's assumed that success is represented by zero.
58COMPILE_ASSERT(AudioProcessing::kNoError == 0, no_error_must_be_zero);
59
pbos@webrtc.org788acd12014-12-15 09:41:24 +000060// This class has two main functionalities:
61//
62// 1) It is returned instead of the real GainControl after the new AGC has been
63// enabled in order to prevent an outside user from overriding compression
64// settings. It doesn't do anything in its implementation, except for
65// delegating the const methods and Enable calls to the real GainControl, so
66// AGC can still be disabled.
67//
68// 2) It is injected into AgcManagerDirect and implements volume callbacks for
69// getting and setting the volume level. It just caches this value to be used
70// in VoiceEngine later.
71class GainControlForNewAgc : public GainControl, public VolumeCallbacks {
72 public:
73 explicit GainControlForNewAgc(GainControlImpl* gain_control)
74 : real_gain_control_(gain_control),
75 volume_(0) {
76 }
77
78 // GainControl implementation.
79 virtual int Enable(bool enable) OVERRIDE {
80 return real_gain_control_->Enable(enable);
81 }
82 virtual bool is_enabled() const OVERRIDE {
83 return real_gain_control_->is_enabled();
84 }
85 virtual int set_stream_analog_level(int level) OVERRIDE {
86 volume_ = level;
87 return AudioProcessing::kNoError;
88 }
89 virtual int stream_analog_level() OVERRIDE {
90 return volume_;
91 }
92 virtual int set_mode(Mode mode) OVERRIDE { return AudioProcessing::kNoError; }
93 virtual Mode mode() const OVERRIDE { return GainControl::kAdaptiveAnalog; }
94 virtual int set_target_level_dbfs(int level) OVERRIDE {
95 return AudioProcessing::kNoError;
96 }
97 virtual int target_level_dbfs() const OVERRIDE {
98 return real_gain_control_->target_level_dbfs();
99 }
100 virtual int set_compression_gain_db(int gain) OVERRIDE {
101 return AudioProcessing::kNoError;
102 }
103 virtual int compression_gain_db() const OVERRIDE {
104 return real_gain_control_->compression_gain_db();
105 }
106 virtual int enable_limiter(bool enable) OVERRIDE {
107 return AudioProcessing::kNoError;
108 }
109 virtual bool is_limiter_enabled() const OVERRIDE {
110 return real_gain_control_->is_limiter_enabled();
111 }
112 virtual int set_analog_level_limits(int minimum,
113 int maximum) OVERRIDE {
114 return AudioProcessing::kNoError;
115 }
116 virtual int analog_level_minimum() const OVERRIDE {
117 return real_gain_control_->analog_level_minimum();
118 }
119 virtual int analog_level_maximum() const OVERRIDE {
120 return real_gain_control_->analog_level_maximum();
121 }
122 virtual bool stream_is_saturated() const OVERRIDE {
123 return real_gain_control_->stream_is_saturated();
124 }
125
126 // VolumeCallbacks implementation.
127 virtual void SetMicVolume(int volume) OVERRIDE {
128 volume_ = volume;
129 }
130 virtual int GetMicVolume() OVERRIDE {
131 return volume_;
132 }
133
134 private:
135 GainControl* real_gain_control_;
136 int volume_;
137};
138
niklase@google.com470e71d2011-07-07 08:21:25 +0000139AudioProcessing* AudioProcessing::Create(int id) {
andrew@webrtc.orge84978f2014-01-25 02:09:06 +0000140 return Create();
141}
142
143AudioProcessing* AudioProcessing::Create() {
144 Config config;
145 return Create(config);
146}
147
148AudioProcessing* AudioProcessing::Create(const Config& config) {
149 AudioProcessingImpl* apm = new AudioProcessingImpl(config);
niklase@google.com470e71d2011-07-07 08:21:25 +0000150 if (apm->Initialize() != kNoError) {
151 delete apm;
152 apm = NULL;
153 }
154
155 return apm;
156}
157
andrew@webrtc.orge84978f2014-01-25 02:09:06 +0000158AudioProcessingImpl::AudioProcessingImpl(const Config& config)
andrew@webrtc.org60730cf2014-01-07 17:45:09 +0000159 : echo_cancellation_(NULL),
niklase@google.com470e71d2011-07-07 08:21:25 +0000160 echo_control_mobile_(NULL),
161 gain_control_(NULL),
162 high_pass_filter_(NULL),
163 level_estimator_(NULL),
164 noise_suppression_(NULL),
165 voice_detection_(NULL),
niklase@google.com470e71d2011-07-07 08:21:25 +0000166 crit_(CriticalSectionWrapper::CreateCriticalSection()),
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000167#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
168 debug_file_(FileWrapper::Create()),
169 event_msg_(new audioproc::Event()),
170#endif
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000171 fwd_in_format_(kSampleRate16kHz, 1),
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000172 fwd_proc_format_(kSampleRate16kHz),
173 fwd_out_format_(kSampleRate16kHz, 1),
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000174 rev_in_format_(kSampleRate16kHz, 1),
175 rev_proc_format_(kSampleRate16kHz, 1),
176 split_rate_(kSampleRate16kHz),
niklase@google.com470e71d2011-07-07 08:21:25 +0000177 stream_delay_ms_(0),
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000178 delay_offset_ms_(0),
niklase@google.com470e71d2011-07-07 08:21:25 +0000179 was_stream_delay_set_(false),
andrew@webrtc.org38bf2492014-02-13 17:43:44 +0000180 output_will_be_muted_(false),
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000181 key_pressed_(false),
182#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
183 use_new_agc_(false),
184#else
185 use_new_agc_(config.Get<ExperimentalAgc>().enabled),
186#endif
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000187 transient_suppressor_enabled_(config.Get<ExperimentalNs>().enabled),
aluebs@webrtc.orgfb7a0392015-01-05 21:58:58 +0000188 beamformer_enabled_(config.Get<Beamforming>().enabled),
189 array_geometry_(config.Get<Beamforming>().array_geometry) {
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000190 echo_cancellation_ = new EchoCancellationImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000191 component_list_.push_back(echo_cancellation_);
192
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000193 echo_control_mobile_ = new EchoControlMobileImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000194 component_list_.push_back(echo_control_mobile_);
195
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000196 gain_control_ = new GainControlImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000197 component_list_.push_back(gain_control_);
198
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000199 high_pass_filter_ = new HighPassFilterImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000200 component_list_.push_back(high_pass_filter_);
201
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000202 level_estimator_ = new LevelEstimatorImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000203 component_list_.push_back(level_estimator_);
204
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000205 noise_suppression_ = new NoiseSuppressionImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000206 component_list_.push_back(noise_suppression_);
207
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000208 voice_detection_ = new VoiceDetectionImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000209 component_list_.push_back(voice_detection_);
andrew@webrtc.orge84978f2014-01-25 02:09:06 +0000210
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000211 gain_control_for_new_agc_.reset(new GainControlForNewAgc(gain_control_));
212
andrew@webrtc.orge84978f2014-01-25 02:09:06 +0000213 SetExtraOptions(config);
niklase@google.com470e71d2011-07-07 08:21:25 +0000214}
215
216AudioProcessingImpl::~AudioProcessingImpl() {
andrew@webrtc.org81865342012-10-27 00:28:27 +0000217 {
218 CriticalSectionScoped crit_scoped(crit_);
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000219 // Depends on gain_control_ and gain_control_for_new_agc_.
220 agc_manager_.reset();
221 // Depends on gain_control_.
222 gain_control_for_new_agc_.reset();
andrew@webrtc.org81865342012-10-27 00:28:27 +0000223 while (!component_list_.empty()) {
224 ProcessingComponent* component = component_list_.front();
225 component->Destroy();
226 delete component;
227 component_list_.pop_front();
228 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000229
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000230#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
andrew@webrtc.org81865342012-10-27 00:28:27 +0000231 if (debug_file_->Open()) {
232 debug_file_->CloseFile();
233 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000234#endif
niklase@google.com470e71d2011-07-07 08:21:25 +0000235 }
andrew@webrtc.org16cfbe22012-08-29 16:58:25 +0000236 delete crit_;
237 crit_ = NULL;
niklase@google.com470e71d2011-07-07 08:21:25 +0000238}
239
niklase@google.com470e71d2011-07-07 08:21:25 +0000240int AudioProcessingImpl::Initialize() {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000241 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000242 return InitializeLocked();
243}
244
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000245int AudioProcessingImpl::set_sample_rate_hz(int rate) {
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000246 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000247 return InitializeLocked(rate,
248 rate,
249 rev_in_format_.rate(),
250 fwd_in_format_.num_channels(),
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000251 fwd_out_format_.num_channels(),
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000252 rev_in_format_.num_channels());
253}
254
255int AudioProcessingImpl::Initialize(int input_sample_rate_hz,
256 int output_sample_rate_hz,
257 int reverse_sample_rate_hz,
258 ChannelLayout input_layout,
259 ChannelLayout output_layout,
260 ChannelLayout reverse_layout) {
261 CriticalSectionScoped crit_scoped(crit_);
262 return InitializeLocked(input_sample_rate_hz,
263 output_sample_rate_hz,
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000264 reverse_sample_rate_hz,
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000265 ChannelsFromLayout(input_layout),
266 ChannelsFromLayout(output_layout),
267 ChannelsFromLayout(reverse_layout));
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000268}
269
niklase@google.com470e71d2011-07-07 08:21:25 +0000270int AudioProcessingImpl::InitializeLocked() {
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000271 const int fwd_audio_buffer_channels = beamformer_enabled_ ?
272 fwd_in_format_.num_channels() :
273 fwd_out_format_.num_channels();
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000274 render_audio_.reset(new AudioBuffer(rev_in_format_.samples_per_channel(),
275 rev_in_format_.num_channels(),
276 rev_proc_format_.samples_per_channel(),
277 rev_proc_format_.num_channels(),
278 rev_proc_format_.samples_per_channel()));
279 capture_audio_.reset(new AudioBuffer(fwd_in_format_.samples_per_channel(),
280 fwd_in_format_.num_channels(),
281 fwd_proc_format_.samples_per_channel(),
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000282 fwd_audio_buffer_channels,
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000283 fwd_out_format_.samples_per_channel()));
niklase@google.com470e71d2011-07-07 08:21:25 +0000284
niklase@google.com470e71d2011-07-07 08:21:25 +0000285 // Initialize all components.
286 std::list<ProcessingComponent*>::iterator it;
andrew@webrtc.org81865342012-10-27 00:28:27 +0000287 for (it = component_list_.begin(); it != component_list_.end(); ++it) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000288 int err = (*it)->Initialize();
289 if (err != kNoError) {
290 return err;
291 }
292 }
293
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000294 int err = InitializeExperimentalAgc();
295 if (err != kNoError) {
296 return err;
297 }
298
299 err = InitializeTransient();
300 if (err != kNoError) {
301 return err;
302 }
303
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000304 InitializeBeamformer();
305
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000306#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
ajm@google.com808e0e02011-08-03 21:08:51 +0000307 if (debug_file_->Open()) {
308 int err = WriteInitMessage();
309 if (err != kNoError) {
310 return err;
311 }
312 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000313#endif
ajm@google.com808e0e02011-08-03 21:08:51 +0000314
niklase@google.com470e71d2011-07-07 08:21:25 +0000315 return kNoError;
316}
317
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000318int AudioProcessingImpl::InitializeLocked(int input_sample_rate_hz,
319 int output_sample_rate_hz,
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000320 int reverse_sample_rate_hz,
321 int num_input_channels,
322 int num_output_channels,
323 int num_reverse_channels) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000324 if (input_sample_rate_hz <= 0 ||
325 output_sample_rate_hz <= 0 ||
326 reverse_sample_rate_hz <= 0) {
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000327 return kBadSampleRateError;
328 }
329 if (num_output_channels > num_input_channels) {
330 return kBadNumberChannelsError;
331 }
332 // Only mono and stereo supported currently.
333 if (num_input_channels > 2 || num_input_channels < 1 ||
334 num_output_channels > 2 || num_output_channels < 1 ||
335 num_reverse_channels > 2 || num_reverse_channels < 1) {
336 return kBadNumberChannelsError;
337 }
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000338
339 fwd_in_format_.set(input_sample_rate_hz, num_input_channels);
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000340 fwd_out_format_.set(output_sample_rate_hz, num_output_channels);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000341 rev_in_format_.set(reverse_sample_rate_hz, num_reverse_channels);
342
343 // We process at the closest native rate >= min(input rate, output rate)...
344 int min_proc_rate = std::min(fwd_in_format_.rate(), fwd_out_format_.rate());
345 int fwd_proc_rate;
346 if (min_proc_rate > kSampleRate16kHz) {
347 fwd_proc_rate = kSampleRate32kHz;
348 } else if (min_proc_rate > kSampleRate8kHz) {
349 fwd_proc_rate = kSampleRate16kHz;
350 } else {
351 fwd_proc_rate = kSampleRate8kHz;
352 }
353 // ...with one exception.
354 if (echo_control_mobile_->is_enabled() && min_proc_rate > kSampleRate16kHz) {
355 fwd_proc_rate = kSampleRate16kHz;
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000356 }
357
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000358 fwd_proc_format_.set(fwd_proc_rate);
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000359
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000360 // We normally process the reverse stream at 16 kHz. Unless...
361 int rev_proc_rate = kSampleRate16kHz;
362 if (fwd_proc_format_.rate() == kSampleRate8kHz) {
363 // ...the forward stream is at 8 kHz.
364 rev_proc_rate = kSampleRate8kHz;
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000365 } else {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000366 if (rev_in_format_.rate() == kSampleRate32kHz) {
367 // ...or the input is at 32 kHz, in which case we use the splitting
368 // filter rather than the resampler.
369 rev_proc_rate = kSampleRate32kHz;
370 }
371 }
372
andrew@webrtc.org30be8272014-09-24 20:06:23 +0000373 // Always downmix the reverse stream to mono for analysis. This has been
374 // demonstrated to work well for AEC in most practical scenarios.
375 rev_proc_format_.set(rev_proc_rate, 1);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000376
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000377 if (fwd_proc_format_.rate() == kSampleRate32kHz ||
378 fwd_proc_format_.rate() == kSampleRate48kHz) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000379 split_rate_ = kSampleRate16kHz;
380 } else {
381 split_rate_ = fwd_proc_format_.rate();
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000382 }
383
384 return InitializeLocked();
385}
386
387// Calls InitializeLocked() if any of the audio parameters have changed from
388// their current values.
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000389int AudioProcessingImpl::MaybeInitializeLocked(int input_sample_rate_hz,
390 int output_sample_rate_hz,
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000391 int reverse_sample_rate_hz,
392 int num_input_channels,
393 int num_output_channels,
394 int num_reverse_channels) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000395 if (input_sample_rate_hz == fwd_in_format_.rate() &&
396 output_sample_rate_hz == fwd_out_format_.rate() &&
397 reverse_sample_rate_hz == rev_in_format_.rate() &&
398 num_input_channels == fwd_in_format_.num_channels() &&
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000399 num_output_channels == fwd_out_format_.num_channels() &&
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000400 num_reverse_channels == rev_in_format_.num_channels()) {
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000401 return kNoError;
402 }
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000403 if (beamformer_enabled_ &&
aluebs@webrtc.orgfb7a0392015-01-05 21:58:58 +0000404 (static_cast<size_t>(num_input_channels) != array_geometry_.size() ||
405 num_output_channels > 1)) {
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000406 return kBadNumberChannelsError;
407 }
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000408 return InitializeLocked(input_sample_rate_hz,
409 output_sample_rate_hz,
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000410 reverse_sample_rate_hz,
411 num_input_channels,
412 num_output_channels,
413 num_reverse_channels);
414}
415
andrew@webrtc.org61e596f2013-07-25 18:28:29 +0000416void AudioProcessingImpl::SetExtraOptions(const Config& config) {
andrew@webrtc.orge84978f2014-01-25 02:09:06 +0000417 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org61e596f2013-07-25 18:28:29 +0000418 std::list<ProcessingComponent*>::iterator it;
419 for (it = component_list_.begin(); it != component_list_.end(); ++it)
420 (*it)->SetExtraOptions(config);
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000421
422 if (transient_suppressor_enabled_ != config.Get<ExperimentalNs>().enabled) {
423 transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled;
424 InitializeTransient();
425 }
andrew@webrtc.org61e596f2013-07-25 18:28:29 +0000426}
427
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000428int AudioProcessingImpl::input_sample_rate_hz() const {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000429 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000430 return fwd_in_format_.rate();
niklase@google.com470e71d2011-07-07 08:21:25 +0000431}
432
andrew@webrtc.org46b31b12014-04-23 03:33:54 +0000433int AudioProcessingImpl::sample_rate_hz() const {
434 CriticalSectionScoped crit_scoped(crit_);
435 return fwd_in_format_.rate();
436}
437
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000438int AudioProcessingImpl::proc_sample_rate_hz() const {
439 return fwd_proc_format_.rate();
niklase@google.com470e71d2011-07-07 08:21:25 +0000440}
441
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000442int AudioProcessingImpl::proc_split_sample_rate_hz() const {
443 return split_rate_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000444}
445
446int AudioProcessingImpl::num_reverse_channels() const {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000447 return rev_proc_format_.num_channels();
niklase@google.com470e71d2011-07-07 08:21:25 +0000448}
449
450int AudioProcessingImpl::num_input_channels() const {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000451 return fwd_in_format_.num_channels();
niklase@google.com470e71d2011-07-07 08:21:25 +0000452}
453
454int AudioProcessingImpl::num_output_channels() const {
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000455 return fwd_out_format_.num_channels();
niklase@google.com470e71d2011-07-07 08:21:25 +0000456}
457
andrew@webrtc.org17342e52014-02-12 22:28:31 +0000458void AudioProcessingImpl::set_output_will_be_muted(bool muted) {
459 output_will_be_muted_ = muted;
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000460 CriticalSectionScoped lock(crit_);
461 if (agc_manager_.get()) {
462 agc_manager_->SetCaptureMuted(output_will_be_muted_);
463 }
andrew@webrtc.org17342e52014-02-12 22:28:31 +0000464}
465
466bool AudioProcessingImpl::output_will_be_muted() const {
467 return output_will_be_muted_;
468}
469
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000470int AudioProcessingImpl::ProcessStream(const float* const* src,
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000471 int samples_per_channel,
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000472 int input_sample_rate_hz,
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000473 ChannelLayout input_layout,
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000474 int output_sample_rate_hz,
475 ChannelLayout output_layout,
476 float* const* dest) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000477 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000478 if (!src || !dest) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000479 return kNullPointerError;
480 }
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000481
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000482 RETURN_ON_ERR(MaybeInitializeLocked(input_sample_rate_hz,
483 output_sample_rate_hz,
484 rev_in_format_.rate(),
485 ChannelsFromLayout(input_layout),
486 ChannelsFromLayout(output_layout),
487 rev_in_format_.num_channels()));
488 if (samples_per_channel != fwd_in_format_.samples_per_channel()) {
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000489 return kBadDataLengthError;
490 }
491
492#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
493 if (debug_file_->Open()) {
494 event_msg_->set_type(audioproc::Event::STREAM);
495 audioproc::Stream* msg = event_msg_->mutable_stream();
aluebs@webrtc.org59a1b1b2014-08-28 10:43:09 +0000496 const size_t channel_size =
497 sizeof(float) * fwd_in_format_.samples_per_channel();
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000498 for (int i = 0; i < fwd_in_format_.num_channels(); ++i)
499 msg->add_input_channel(src[i], channel_size);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000500 }
501#endif
502
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000503 capture_audio_->CopyFrom(src, samples_per_channel, input_layout);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000504 RETURN_ON_ERR(ProcessStreamLocked());
505 if (output_copy_needed(is_data_processed())) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000506 capture_audio_->CopyTo(fwd_out_format_.samples_per_channel(),
507 output_layout,
508 dest);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000509 }
510
511#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
512 if (debug_file_->Open()) {
513 audioproc::Stream* msg = event_msg_->mutable_stream();
aluebs@webrtc.org59a1b1b2014-08-28 10:43:09 +0000514 const size_t channel_size =
515 sizeof(float) * fwd_out_format_.samples_per_channel();
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000516 for (int i = 0; i < fwd_out_format_.num_channels(); ++i)
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000517 msg->add_output_channel(dest[i], channel_size);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000518 RETURN_ON_ERR(WriteMessageToDebugFile());
519 }
520#endif
521
522 return kNoError;
523}
524
525int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
526 CriticalSectionScoped crit_scoped(crit_);
527 if (!frame) {
528 return kNullPointerError;
529 }
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000530 // Must be a native rate.
531 if (frame->sample_rate_hz_ != kSampleRate8kHz &&
532 frame->sample_rate_hz_ != kSampleRate16kHz &&
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000533 frame->sample_rate_hz_ != kSampleRate32kHz &&
534 frame->sample_rate_hz_ != kSampleRate48kHz) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000535 return kBadSampleRateError;
536 }
537 if (echo_control_mobile_->is_enabled() &&
538 frame->sample_rate_hz_ > kSampleRate16kHz) {
539 LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates";
540 return kUnsupportedComponentError;
541 }
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000542
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000543 // TODO(ajm): The input and output rates and channels are currently
544 // constrained to be identical in the int16 interface.
andrew@webrtc.org60730cf2014-01-07 17:45:09 +0000545 RETURN_ON_ERR(MaybeInitializeLocked(frame->sample_rate_hz_,
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000546 frame->sample_rate_hz_,
547 rev_in_format_.rate(),
548 frame->num_channels_,
549 frame->num_channels_,
550 rev_in_format_.num_channels()));
551 if (frame->samples_per_channel_ != fwd_in_format_.samples_per_channel()) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000552 return kBadDataLengthError;
553 }
554
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000555#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000556 if (debug_file_->Open()) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000557 event_msg_->set_type(audioproc::Event::STREAM);
558 audioproc::Stream* msg = event_msg_->mutable_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000559 const size_t data_size = sizeof(int16_t) *
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000560 frame->samples_per_channel_ *
561 frame->num_channels_;
562 msg->set_input_data(frame->data_, data_size);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000563 }
564#endif
565
566 capture_audio_->DeinterleaveFrom(frame);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000567 RETURN_ON_ERR(ProcessStreamLocked());
568 capture_audio_->InterleaveTo(frame, output_copy_needed(is_data_processed()));
569
570#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
571 if (debug_file_->Open()) {
572 audioproc::Stream* msg = event_msg_->mutable_stream();
573 const size_t data_size = sizeof(int16_t) *
574 frame->samples_per_channel_ *
575 frame->num_channels_;
576 msg->set_output_data(frame->data_, data_size);
577 RETURN_ON_ERR(WriteMessageToDebugFile());
578 }
579#endif
580
581 return kNoError;
582}
583
584
585int AudioProcessingImpl::ProcessStreamLocked() {
586#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
587 if (debug_file_->Open()) {
588 audioproc::Stream* msg = event_msg_->mutable_stream();
ajm@google.com808e0e02011-08-03 21:08:51 +0000589 msg->set_delay(stream_delay_ms_);
590 msg->set_drift(echo_cancellation_->stream_drift_samples());
591 msg->set_level(gain_control_->stream_analog_level());
andrew@webrtc.orgce8e0772014-02-12 15:28:30 +0000592 msg->set_keypress(key_pressed_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000593 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000594#endif
niklase@google.com470e71d2011-07-07 08:21:25 +0000595
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000596 AudioBuffer* ca = capture_audio_.get(); // For brevity.
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000597 if (use_new_agc_ && gain_control_->is_enabled()) {
598 agc_manager_->AnalyzePreProcess(ca->data(0),
599 ca->num_channels(),
600 fwd_proc_format_.samples_per_channel());
601 }
602
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000603 bool data_processed = is_data_processed();
604 if (analysis_needed(data_processed)) {
aluebs@webrtc.orgbe05c742014-11-14 22:18:10 +0000605 ca->SplitIntoFrequencyBands();
niklase@google.com470e71d2011-07-07 08:21:25 +0000606 }
607
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000608#ifdef WEBRTC_BEAMFORMER
609 if (beamformer_enabled_) {
610 beamformer_->ProcessChunk(ca->split_channels_const_f(kBand0To8kHz),
611 ca->split_channels_const_f(kBand8To16kHz),
612 ca->num_channels(),
613 ca->samples_per_split_channel(),
614 ca->split_channels_f(kBand0To8kHz),
615 ca->split_channels_f(kBand8To16kHz));
616 ca->set_num_channels(1);
617 }
618#endif
619
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000620 RETURN_ON_ERR(high_pass_filter_->ProcessCaptureAudio(ca));
621 RETURN_ON_ERR(gain_control_->AnalyzeCaptureAudio(ca));
aluebs@webrtc.orga0ce9fa2014-09-24 14:18:03 +0000622 RETURN_ON_ERR(noise_suppression_->AnalyzeCaptureAudio(ca));
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000623 RETURN_ON_ERR(echo_cancellation_->ProcessCaptureAudio(ca));
niklase@google.com470e71d2011-07-07 08:21:25 +0000624
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000625 if (echo_control_mobile_->is_enabled() && noise_suppression_->is_enabled()) {
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000626 ca->CopyLowPassToReference();
niklase@google.com470e71d2011-07-07 08:21:25 +0000627 }
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000628 RETURN_ON_ERR(noise_suppression_->ProcessCaptureAudio(ca));
629 RETURN_ON_ERR(echo_control_mobile_->ProcessCaptureAudio(ca));
630 RETURN_ON_ERR(voice_detection_->ProcessCaptureAudio(ca));
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000631
632 if (use_new_agc_ && gain_control_->is_enabled()) {
633 agc_manager_->Process(ca->split_bands_const(0)[kBand0To8kHz],
634 ca->samples_per_split_channel(),
635 split_rate_);
636 }
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000637 RETURN_ON_ERR(gain_control_->ProcessCaptureAudio(ca));
niklase@google.com470e71d2011-07-07 08:21:25 +0000638
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000639 if (synthesis_needed(data_processed)) {
aluebs@webrtc.orgbe05c742014-11-14 22:18:10 +0000640 ca->MergeFrequencyBands();
niklase@google.com470e71d2011-07-07 08:21:25 +0000641 }
642
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000643 // TODO(aluebs): Investigate if the transient suppression placement should be
644 // before or after the AGC.
645 if (transient_suppressor_enabled_) {
646 float voice_probability =
647 agc_manager_.get() ? agc_manager_->voice_probability() : 1.f;
648
649 transient_suppressor_->Suppress(ca->data_f(0),
650 ca->samples_per_channel(),
651 ca->num_channels(),
652 ca->split_bands_const_f(0)[kBand0To8kHz],
653 ca->samples_per_split_channel(),
654 ca->keyboard_data(),
655 ca->samples_per_keyboard_channel(),
656 voice_probability,
657 key_pressed_);
658 }
659
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000660 // The level estimator operates on the recombined data.
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000661 RETURN_ON_ERR(level_estimator_->ProcessStream(ca));
ajm@google.com808e0e02011-08-03 21:08:51 +0000662
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000663 was_stream_delay_set_ = false;
niklase@google.com470e71d2011-07-07 08:21:25 +0000664 return kNoError;
665}
666
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000667int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data,
668 int samples_per_channel,
669 int sample_rate_hz,
670 ChannelLayout layout) {
671 CriticalSectionScoped crit_scoped(crit_);
672 if (data == NULL) {
673 return kNullPointerError;
674 }
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000675
676 const int num_channels = ChannelsFromLayout(layout);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000677 RETURN_ON_ERR(MaybeInitializeLocked(fwd_in_format_.rate(),
678 fwd_out_format_.rate(),
679 sample_rate_hz,
680 fwd_in_format_.num_channels(),
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000681 fwd_out_format_.num_channels(),
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000682 num_channels));
683 if (samples_per_channel != rev_in_format_.samples_per_channel()) {
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000684 return kBadDataLengthError;
685 }
686
687#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
688 if (debug_file_->Open()) {
689 event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
690 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
aluebs@webrtc.org59a1b1b2014-08-28 10:43:09 +0000691 const size_t channel_size =
692 sizeof(float) * rev_in_format_.samples_per_channel();
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000693 for (int i = 0; i < num_channels; ++i)
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000694 msg->add_channel(data[i], channel_size);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000695 RETURN_ON_ERR(WriteMessageToDebugFile());
696 }
697#endif
698
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000699 render_audio_->CopyFrom(data, samples_per_channel, layout);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000700 return AnalyzeReverseStreamLocked();
701}
702
niklase@google.com470e71d2011-07-07 08:21:25 +0000703int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000704 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000705 if (frame == NULL) {
706 return kNullPointerError;
707 }
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000708 // Must be a native rate.
709 if (frame->sample_rate_hz_ != kSampleRate8kHz &&
710 frame->sample_rate_hz_ != kSampleRate16kHz &&
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000711 frame->sample_rate_hz_ != kSampleRate32kHz &&
712 frame->sample_rate_hz_ != kSampleRate48kHz) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000713 return kBadSampleRateError;
714 }
715 // This interface does not tolerate different forward and reverse rates.
716 if (frame->sample_rate_hz_ != fwd_in_format_.rate()) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000717 return kBadSampleRateError;
718 }
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000719
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000720 RETURN_ON_ERR(MaybeInitializeLocked(fwd_in_format_.rate(),
721 fwd_out_format_.rate(),
722 frame->sample_rate_hz_,
723 fwd_in_format_.num_channels(),
724 fwd_in_format_.num_channels(),
725 frame->num_channels_));
726 if (frame->samples_per_channel_ != rev_in_format_.samples_per_channel()) {
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000727 return kBadDataLengthError;
728 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000729
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000730#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000731 if (debug_file_->Open()) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000732 event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
733 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000734 const size_t data_size = sizeof(int16_t) *
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000735 frame->samples_per_channel_ *
736 frame->num_channels_;
737 msg->set_data(frame->data_, data_size);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000738 RETURN_ON_ERR(WriteMessageToDebugFile());
niklase@google.com470e71d2011-07-07 08:21:25 +0000739 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000740#endif
niklase@google.com470e71d2011-07-07 08:21:25 +0000741
742 render_audio_->DeinterleaveFrom(frame);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000743 return AnalyzeReverseStreamLocked();
744}
niklase@google.com470e71d2011-07-07 08:21:25 +0000745
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000746int AudioProcessingImpl::AnalyzeReverseStreamLocked() {
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000747 AudioBuffer* ra = render_audio_.get(); // For brevity.
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000748 if (rev_proc_format_.rate() == kSampleRate32kHz) {
aluebs@webrtc.orgbe05c742014-11-14 22:18:10 +0000749 ra->SplitIntoFrequencyBands();
niklase@google.com470e71d2011-07-07 08:21:25 +0000750 }
751
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000752 RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(ra));
753 RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(ra));
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000754 if (!use_new_agc_) {
755 RETURN_ON_ERR(gain_control_->ProcessRenderAudio(ra));
756 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000757
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000758 return kNoError;
niklase@google.com470e71d2011-07-07 08:21:25 +0000759}
760
761int AudioProcessingImpl::set_stream_delay_ms(int delay) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000762 Error retval = kNoError;
niklase@google.com470e71d2011-07-07 08:21:25 +0000763 was_stream_delay_set_ = true;
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000764 delay += delay_offset_ms_;
765
niklase@google.com470e71d2011-07-07 08:21:25 +0000766 if (delay < 0) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000767 delay = 0;
768 retval = kBadStreamParameterWarning;
niklase@google.com470e71d2011-07-07 08:21:25 +0000769 }
770
771 // TODO(ajm): the max is rather arbitrarily chosen; investigate.
772 if (delay > 500) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000773 delay = 500;
774 retval = kBadStreamParameterWarning;
niklase@google.com470e71d2011-07-07 08:21:25 +0000775 }
776
777 stream_delay_ms_ = delay;
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000778 return retval;
niklase@google.com470e71d2011-07-07 08:21:25 +0000779}
780
781int AudioProcessingImpl::stream_delay_ms() const {
782 return stream_delay_ms_;
783}
784
785bool AudioProcessingImpl::was_stream_delay_set() const {
786 return was_stream_delay_set_;
787}
788
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000789void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) {
790 key_pressed_ = key_pressed;
791}
792
793bool AudioProcessingImpl::stream_key_pressed() const {
794 return key_pressed_;
795}
796
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000797void AudioProcessingImpl::set_delay_offset_ms(int offset) {
798 CriticalSectionScoped crit_scoped(crit_);
799 delay_offset_ms_ = offset;
800}
801
802int AudioProcessingImpl::delay_offset_ms() const {
803 return delay_offset_ms_;
804}
805
niklase@google.com470e71d2011-07-07 08:21:25 +0000806int AudioProcessingImpl::StartDebugRecording(
807 const char filename[AudioProcessing::kMaxFilenameSize]) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000808 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000809 assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize);
810
811 if (filename == NULL) {
812 return kNullPointerError;
813 }
814
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000815#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000816 // Stop any ongoing recording.
817 if (debug_file_->Open()) {
818 if (debug_file_->CloseFile() == -1) {
819 return kFileError;
820 }
821 }
822
823 if (debug_file_->OpenFile(filename, false) == -1) {
824 debug_file_->CloseFile();
825 return kFileError;
826 }
827
ajm@google.com808e0e02011-08-03 21:08:51 +0000828 int err = WriteInitMessage();
829 if (err != kNoError) {
830 return err;
niklase@google.com470e71d2011-07-07 08:21:25 +0000831 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000832 return kNoError;
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000833#else
834 return kUnsupportedFunctionError;
835#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000836}
837
henrikg@webrtc.org863b5362013-12-06 16:05:17 +0000838int AudioProcessingImpl::StartDebugRecording(FILE* handle) {
839 CriticalSectionScoped crit_scoped(crit_);
840
841 if (handle == NULL) {
842 return kNullPointerError;
843 }
844
845#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
846 // Stop any ongoing recording.
847 if (debug_file_->Open()) {
848 if (debug_file_->CloseFile() == -1) {
849 return kFileError;
850 }
851 }
852
853 if (debug_file_->OpenFromFileHandle(handle, true, false) == -1) {
854 return kFileError;
855 }
856
857 int err = WriteInitMessage();
858 if (err != kNoError) {
859 return err;
860 }
861 return kNoError;
862#else
863 return kUnsupportedFunctionError;
864#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
865}
866
xians@webrtc.orge46bc772014-10-10 08:36:56 +0000867int AudioProcessingImpl::StartDebugRecordingForPlatformFile(
868 rtc::PlatformFile handle) {
869 FILE* stream = rtc::FdopenPlatformFileForWriting(handle);
870 return StartDebugRecording(stream);
871}
872
niklase@google.com470e71d2011-07-07 08:21:25 +0000873int AudioProcessingImpl::StopDebugRecording() {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000874 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000875
876#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000877 // We just return if recording hasn't started.
878 if (debug_file_->Open()) {
879 if (debug_file_->CloseFile() == -1) {
880 return kFileError;
881 }
882 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000883 return kNoError;
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000884#else
885 return kUnsupportedFunctionError;
886#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000887}
888
889EchoCancellation* AudioProcessingImpl::echo_cancellation() const {
890 return echo_cancellation_;
891}
892
893EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const {
894 return echo_control_mobile_;
895}
896
897GainControl* AudioProcessingImpl::gain_control() const {
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000898 if (use_new_agc_) {
899 return gain_control_for_new_agc_.get();
900 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000901 return gain_control_;
902}
903
904HighPassFilter* AudioProcessingImpl::high_pass_filter() const {
905 return high_pass_filter_;
906}
907
908LevelEstimator* AudioProcessingImpl::level_estimator() const {
909 return level_estimator_;
910}
911
912NoiseSuppression* AudioProcessingImpl::noise_suppression() const {
913 return noise_suppression_;
914}
915
916VoiceDetection* AudioProcessingImpl::voice_detection() const {
917 return voice_detection_;
918}
919
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000920bool AudioProcessingImpl::is_data_processed() const {
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000921 if (beamformer_enabled_) {
922 return true;
923 }
924
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000925 int enabled_count = 0;
926 std::list<ProcessingComponent*>::const_iterator it;
927 for (it = component_list_.begin(); it != component_list_.end(); it++) {
928 if ((*it)->is_component_enabled()) {
929 enabled_count++;
930 }
931 }
932
933 // Data is unchanged if no components are enabled, or if only level_estimator_
934 // or voice_detection_ is enabled.
935 if (enabled_count == 0) {
936 return false;
937 } else if (enabled_count == 1) {
938 if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) {
939 return false;
940 }
941 } else if (enabled_count == 2) {
942 if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) {
943 return false;
944 }
945 }
946 return true;
947}
948
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000949bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const {
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000950 // Check if we've upmixed or downmixed the audio.
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000951 return ((fwd_out_format_.num_channels() != fwd_in_format_.num_channels()) ||
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000952 is_data_processed || transient_suppressor_enabled_);
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000953}
954
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000955bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const {
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000956 return (is_data_processed && (fwd_proc_format_.rate() == kSampleRate32kHz ||
957 fwd_proc_format_.rate() == kSampleRate48kHz));
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000958}
959
960bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const {
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000961 if (!is_data_processed && !voice_detection_->is_enabled() &&
962 !transient_suppressor_enabled_) {
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000963 // Only level_estimator_ is enabled.
964 return false;
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000965 } else if (fwd_proc_format_.rate() == kSampleRate32kHz ||
966 fwd_proc_format_.rate() == kSampleRate48kHz) {
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000967 // Something besides level_estimator_ is enabled, and we have super-wb.
968 return true;
969 }
970 return false;
971}
972
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000973int AudioProcessingImpl::InitializeExperimentalAgc() {
974 if (use_new_agc_) {
975 if (!agc_manager_.get()) {
976 agc_manager_.reset(
977 new AgcManagerDirect(gain_control_, gain_control_for_new_agc_.get()));
978 }
979 agc_manager_->Initialize();
980 agc_manager_->SetCaptureMuted(output_will_be_muted_);
981 }
982 return kNoError;
983}
984
985int AudioProcessingImpl::InitializeTransient() {
986 if (transient_suppressor_enabled_) {
987 if (!transient_suppressor_.get()) {
988 transient_suppressor_.reset(new TransientSuppressor());
989 }
990 transient_suppressor_->Initialize(fwd_proc_format_.rate(),
991 split_rate_,
992 fwd_out_format_.num_channels());
993 }
994 return kNoError;
995}
996
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000997void AudioProcessingImpl::InitializeBeamformer() {
998 if (beamformer_enabled_) {
999#ifdef WEBRTC_BEAMFORMER
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +00001000 beamformer_.reset(new Beamformer(kChunkSizeMs,
1001 split_rate_,
aluebs@webrtc.orgfb7a0392015-01-05 21:58:58 +00001002 array_geometry_));
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +00001003#else
1004 assert(false);
1005#endif
1006 }
1007}
1008
andrew@webrtc.org7bf26462011-12-03 00:03:31 +00001009#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
ajm@google.com808e0e02011-08-03 21:08:51 +00001010int AudioProcessingImpl::WriteMessageToDebugFile() {
1011 int32_t size = event_msg_->ByteSize();
1012 if (size <= 0) {
1013 return kUnspecifiedError;
1014 }
andrew@webrtc.org621df672013-10-22 10:27:23 +00001015#if defined(WEBRTC_ARCH_BIG_ENDIAN)
ajm@google.com808e0e02011-08-03 21:08:51 +00001016 // TODO(ajm): Use little-endian "on the wire". For the moment, we can be
1017 // pretty safe in assuming little-endian.
1018#endif
1019
1020 if (!event_msg_->SerializeToString(&event_str_)) {
1021 return kUnspecifiedError;
1022 }
1023
1024 // Write message preceded by its size.
1025 if (!debug_file_->Write(&size, sizeof(int32_t))) {
1026 return kFileError;
1027 }
1028 if (!debug_file_->Write(event_str_.data(), event_str_.length())) {
1029 return kFileError;
1030 }
1031
1032 event_msg_->Clear();
1033
andrew@webrtc.org17e40642014-03-04 20:58:13 +00001034 return kNoError;
ajm@google.com808e0e02011-08-03 21:08:51 +00001035}
1036
1037int AudioProcessingImpl::WriteInitMessage() {
1038 event_msg_->set_type(audioproc::Event::INIT);
1039 audioproc::Init* msg = event_msg_->mutable_init();
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +00001040 msg->set_sample_rate(fwd_in_format_.rate());
1041 msg->set_num_input_channels(fwd_in_format_.num_channels());
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +00001042 msg->set_num_output_channels(fwd_out_format_.num_channels());
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +00001043 msg->set_num_reverse_channels(rev_in_format_.num_channels());
1044 msg->set_reverse_sample_rate(rev_in_format_.rate());
1045 msg->set_output_sample_rate(fwd_out_format_.rate());
ajm@google.com808e0e02011-08-03 21:08:51 +00001046
1047 int err = WriteMessageToDebugFile();
1048 if (err != kNoError) {
1049 return err;
1050 }
1051
1052 return kNoError;
1053}
andrew@webrtc.org7bf26462011-12-03 00:03:31 +00001054#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +00001055
niklase@google.com470e71d2011-07-07 08:21:25 +00001056} // namespace webrtc