blob: 92e63f19b863bd3ca67a68874f45c99259515324 [file] [log] [blame]
niklase@google.com470e71d2011-07-07 08:21:25 +00001/*
andrew@webrtc.org40654032012-01-30 20:51:15 +00002 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
niklase@google.com470e71d2011-07-07 08:21:25 +00003 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000011#include "webrtc/modules/audio_processing/audio_processing_impl.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000012
ajm@google.com808e0e02011-08-03 21:08:51 +000013#include <assert.h>
niklase@google.com470e71d2011-07-07 08:21:25 +000014
kwiberg@webrtc.org3df38b42015-01-13 11:37:48 +000015#include "webrtc/base/compile_assert.h"
xians@webrtc.orge46bc772014-10-10 08:36:56 +000016#include "webrtc/base/platform_file.h"
andrew@webrtc.org17e40642014-03-04 20:58:13 +000017#include "webrtc/common_audio/include/audio_util.h"
andrew@webrtc.org60730cf2014-01-07 17:45:09 +000018#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
pbos@webrtc.org788acd12014-12-15 09:41:24 +000019#include "webrtc/modules/audio_processing/agc/agc_manager_direct.h"
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000020#include "webrtc/modules/audio_processing/audio_buffer.h"
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +000021#include "webrtc/modules/audio_processing/beamformer/beamformer.h"
aluebs@webrtc.org87893762014-11-27 23:40:25 +000022#include "webrtc/modules/audio_processing/channel_buffer.h"
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000023#include "webrtc/modules/audio_processing/common.h"
andrew@webrtc.org56e4a052014-02-27 22:23:17 +000024#include "webrtc/modules/audio_processing/echo_cancellation_impl.h"
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000025#include "webrtc/modules/audio_processing/echo_control_mobile_impl.h"
26#include "webrtc/modules/audio_processing/gain_control_impl.h"
27#include "webrtc/modules/audio_processing/high_pass_filter_impl.h"
28#include "webrtc/modules/audio_processing/level_estimator_impl.h"
29#include "webrtc/modules/audio_processing/noise_suppression_impl.h"
30#include "webrtc/modules/audio_processing/processing_component.h"
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +000031#include "webrtc/modules/audio_processing/transient/transient_suppressor.h"
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000032#include "webrtc/modules/audio_processing/voice_detection_impl.h"
33#include "webrtc/modules/interface/module_common_types.h"
34#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
35#include "webrtc/system_wrappers/interface/file_wrapper.h"
36#include "webrtc/system_wrappers/interface/logging.h"
andrew@webrtc.org7bf26462011-12-03 00:03:31 +000037
38#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
39// Files generated at build-time by the protobuf compiler.
leozwang@webrtc.orga3736342012-03-16 21:36:00 +000040#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
leozwang@webrtc.org534e4952012-10-22 21:21:52 +000041#include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h"
leozwang@google.comce9bfbb2011-08-03 23:34:31 +000042#else
ajm@google.com808e0e02011-08-03 21:08:51 +000043#include "webrtc/audio_processing/debug.pb.h"
leozwang@google.comce9bfbb2011-08-03 23:34:31 +000044#endif
andrew@webrtc.org7bf26462011-12-03 00:03:31 +000045#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +000046
andrew@webrtc.org60730cf2014-01-07 17:45:09 +000047#define RETURN_ON_ERR(expr) \
48 do { \
49 int err = expr; \
50 if (err != kNoError) { \
51 return err; \
52 } \
53 } while (0)
54
niklase@google.com470e71d2011-07-07 08:21:25 +000055namespace webrtc {
andrew@webrtc.org60730cf2014-01-07 17:45:09 +000056
57// Throughout webrtc, it's assumed that success is represented by zero.
58COMPILE_ASSERT(AudioProcessing::kNoError == 0, no_error_must_be_zero);
59
pbos@webrtc.org788acd12014-12-15 09:41:24 +000060// This class has two main functionalities:
61//
62// 1) It is returned instead of the real GainControl after the new AGC has been
63// enabled in order to prevent an outside user from overriding compression
64// settings. It doesn't do anything in its implementation, except for
65// delegating the const methods and Enable calls to the real GainControl, so
66// AGC can still be disabled.
67//
68// 2) It is injected into AgcManagerDirect and implements volume callbacks for
69// getting and setting the volume level. It just caches this value to be used
70// in VoiceEngine later.
71class GainControlForNewAgc : public GainControl, public VolumeCallbacks {
72 public:
73 explicit GainControlForNewAgc(GainControlImpl* gain_control)
74 : real_gain_control_(gain_control),
75 volume_(0) {
76 }
77
78 // GainControl implementation.
79 virtual int Enable(bool enable) OVERRIDE {
80 return real_gain_control_->Enable(enable);
81 }
82 virtual bool is_enabled() const OVERRIDE {
83 return real_gain_control_->is_enabled();
84 }
85 virtual int set_stream_analog_level(int level) OVERRIDE {
86 volume_ = level;
87 return AudioProcessing::kNoError;
88 }
89 virtual int stream_analog_level() OVERRIDE {
90 return volume_;
91 }
92 virtual int set_mode(Mode mode) OVERRIDE { return AudioProcessing::kNoError; }
93 virtual Mode mode() const OVERRIDE { return GainControl::kAdaptiveAnalog; }
94 virtual int set_target_level_dbfs(int level) OVERRIDE {
95 return AudioProcessing::kNoError;
96 }
97 virtual int target_level_dbfs() const OVERRIDE {
98 return real_gain_control_->target_level_dbfs();
99 }
100 virtual int set_compression_gain_db(int gain) OVERRIDE {
101 return AudioProcessing::kNoError;
102 }
103 virtual int compression_gain_db() const OVERRIDE {
104 return real_gain_control_->compression_gain_db();
105 }
106 virtual int enable_limiter(bool enable) OVERRIDE {
107 return AudioProcessing::kNoError;
108 }
109 virtual bool is_limiter_enabled() const OVERRIDE {
110 return real_gain_control_->is_limiter_enabled();
111 }
112 virtual int set_analog_level_limits(int minimum,
113 int maximum) OVERRIDE {
114 return AudioProcessing::kNoError;
115 }
116 virtual int analog_level_minimum() const OVERRIDE {
117 return real_gain_control_->analog_level_minimum();
118 }
119 virtual int analog_level_maximum() const OVERRIDE {
120 return real_gain_control_->analog_level_maximum();
121 }
122 virtual bool stream_is_saturated() const OVERRIDE {
123 return real_gain_control_->stream_is_saturated();
124 }
125
126 // VolumeCallbacks implementation.
127 virtual void SetMicVolume(int volume) OVERRIDE {
128 volume_ = volume;
129 }
130 virtual int GetMicVolume() OVERRIDE {
131 return volume_;
132 }
133
134 private:
135 GainControl* real_gain_control_;
136 int volume_;
137};
138
andrew@webrtc.orge84978f2014-01-25 02:09:06 +0000139AudioProcessing* AudioProcessing::Create() {
140 Config config;
141 return Create(config);
142}
143
144AudioProcessing* AudioProcessing::Create(const Config& config) {
145 AudioProcessingImpl* apm = new AudioProcessingImpl(config);
niklase@google.com470e71d2011-07-07 08:21:25 +0000146 if (apm->Initialize() != kNoError) {
147 delete apm;
148 apm = NULL;
149 }
150
151 return apm;
152}
153
andrew@webrtc.orge84978f2014-01-25 02:09:06 +0000154AudioProcessingImpl::AudioProcessingImpl(const Config& config)
andrew@webrtc.org60730cf2014-01-07 17:45:09 +0000155 : echo_cancellation_(NULL),
niklase@google.com470e71d2011-07-07 08:21:25 +0000156 echo_control_mobile_(NULL),
157 gain_control_(NULL),
158 high_pass_filter_(NULL),
159 level_estimator_(NULL),
160 noise_suppression_(NULL),
161 voice_detection_(NULL),
niklase@google.com470e71d2011-07-07 08:21:25 +0000162 crit_(CriticalSectionWrapper::CreateCriticalSection()),
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000163#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
164 debug_file_(FileWrapper::Create()),
165 event_msg_(new audioproc::Event()),
166#endif
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000167 fwd_in_format_(kSampleRate16kHz, 1),
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000168 fwd_proc_format_(kSampleRate16kHz),
169 fwd_out_format_(kSampleRate16kHz, 1),
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000170 rev_in_format_(kSampleRate16kHz, 1),
171 rev_proc_format_(kSampleRate16kHz, 1),
172 split_rate_(kSampleRate16kHz),
niklase@google.com470e71d2011-07-07 08:21:25 +0000173 stream_delay_ms_(0),
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000174 delay_offset_ms_(0),
niklase@google.com470e71d2011-07-07 08:21:25 +0000175 was_stream_delay_set_(false),
andrew@webrtc.org38bf2492014-02-13 17:43:44 +0000176 output_will_be_muted_(false),
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000177 key_pressed_(false),
178#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
179 use_new_agc_(false),
180#else
181 use_new_agc_(config.Get<ExperimentalAgc>().enabled),
182#endif
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000183 transient_suppressor_enabled_(config.Get<ExperimentalNs>().enabled),
aluebs@webrtc.orgfb7a0392015-01-05 21:58:58 +0000184 beamformer_enabled_(config.Get<Beamforming>().enabled),
185 array_geometry_(config.Get<Beamforming>().array_geometry) {
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000186 echo_cancellation_ = new EchoCancellationImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000187 component_list_.push_back(echo_cancellation_);
188
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000189 echo_control_mobile_ = new EchoControlMobileImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000190 component_list_.push_back(echo_control_mobile_);
191
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000192 gain_control_ = new GainControlImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000193 component_list_.push_back(gain_control_);
194
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000195 high_pass_filter_ = new HighPassFilterImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000196 component_list_.push_back(high_pass_filter_);
197
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000198 level_estimator_ = new LevelEstimatorImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000199 component_list_.push_back(level_estimator_);
200
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000201 noise_suppression_ = new NoiseSuppressionImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000202 component_list_.push_back(noise_suppression_);
203
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000204 voice_detection_ = new VoiceDetectionImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000205 component_list_.push_back(voice_detection_);
andrew@webrtc.orge84978f2014-01-25 02:09:06 +0000206
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000207 gain_control_for_new_agc_.reset(new GainControlForNewAgc(gain_control_));
208
andrew@webrtc.orge84978f2014-01-25 02:09:06 +0000209 SetExtraOptions(config);
niklase@google.com470e71d2011-07-07 08:21:25 +0000210}
211
212AudioProcessingImpl::~AudioProcessingImpl() {
andrew@webrtc.org81865342012-10-27 00:28:27 +0000213 {
214 CriticalSectionScoped crit_scoped(crit_);
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000215 // Depends on gain_control_ and gain_control_for_new_agc_.
216 agc_manager_.reset();
217 // Depends on gain_control_.
218 gain_control_for_new_agc_.reset();
andrew@webrtc.org81865342012-10-27 00:28:27 +0000219 while (!component_list_.empty()) {
220 ProcessingComponent* component = component_list_.front();
221 component->Destroy();
222 delete component;
223 component_list_.pop_front();
224 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000225
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000226#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
andrew@webrtc.org81865342012-10-27 00:28:27 +0000227 if (debug_file_->Open()) {
228 debug_file_->CloseFile();
229 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000230#endif
niklase@google.com470e71d2011-07-07 08:21:25 +0000231 }
andrew@webrtc.org16cfbe22012-08-29 16:58:25 +0000232 delete crit_;
233 crit_ = NULL;
niklase@google.com470e71d2011-07-07 08:21:25 +0000234}
235
niklase@google.com470e71d2011-07-07 08:21:25 +0000236int AudioProcessingImpl::Initialize() {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000237 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000238 return InitializeLocked();
239}
240
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000241int AudioProcessingImpl::set_sample_rate_hz(int rate) {
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000242 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000243 return InitializeLocked(rate,
244 rate,
245 rev_in_format_.rate(),
246 fwd_in_format_.num_channels(),
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000247 fwd_out_format_.num_channels(),
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000248 rev_in_format_.num_channels());
249}
250
251int AudioProcessingImpl::Initialize(int input_sample_rate_hz,
252 int output_sample_rate_hz,
253 int reverse_sample_rate_hz,
254 ChannelLayout input_layout,
255 ChannelLayout output_layout,
256 ChannelLayout reverse_layout) {
257 CriticalSectionScoped crit_scoped(crit_);
258 return InitializeLocked(input_sample_rate_hz,
259 output_sample_rate_hz,
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000260 reverse_sample_rate_hz,
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000261 ChannelsFromLayout(input_layout),
262 ChannelsFromLayout(output_layout),
263 ChannelsFromLayout(reverse_layout));
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000264}
265
niklase@google.com470e71d2011-07-07 08:21:25 +0000266int AudioProcessingImpl::InitializeLocked() {
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000267 const int fwd_audio_buffer_channels = beamformer_enabled_ ?
268 fwd_in_format_.num_channels() :
269 fwd_out_format_.num_channels();
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000270 render_audio_.reset(new AudioBuffer(rev_in_format_.samples_per_channel(),
271 rev_in_format_.num_channels(),
272 rev_proc_format_.samples_per_channel(),
273 rev_proc_format_.num_channels(),
274 rev_proc_format_.samples_per_channel()));
275 capture_audio_.reset(new AudioBuffer(fwd_in_format_.samples_per_channel(),
276 fwd_in_format_.num_channels(),
277 fwd_proc_format_.samples_per_channel(),
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000278 fwd_audio_buffer_channels,
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000279 fwd_out_format_.samples_per_channel()));
niklase@google.com470e71d2011-07-07 08:21:25 +0000280
niklase@google.com470e71d2011-07-07 08:21:25 +0000281 // Initialize all components.
282 std::list<ProcessingComponent*>::iterator it;
andrew@webrtc.org81865342012-10-27 00:28:27 +0000283 for (it = component_list_.begin(); it != component_list_.end(); ++it) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000284 int err = (*it)->Initialize();
285 if (err != kNoError) {
286 return err;
287 }
288 }
289
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000290 int err = InitializeExperimentalAgc();
291 if (err != kNoError) {
292 return err;
293 }
294
295 err = InitializeTransient();
296 if (err != kNoError) {
297 return err;
298 }
299
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000300 InitializeBeamformer();
301
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000302#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
ajm@google.com808e0e02011-08-03 21:08:51 +0000303 if (debug_file_->Open()) {
304 int err = WriteInitMessage();
305 if (err != kNoError) {
306 return err;
307 }
308 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000309#endif
ajm@google.com808e0e02011-08-03 21:08:51 +0000310
niklase@google.com470e71d2011-07-07 08:21:25 +0000311 return kNoError;
312}
313
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000314int AudioProcessingImpl::InitializeLocked(int input_sample_rate_hz,
315 int output_sample_rate_hz,
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000316 int reverse_sample_rate_hz,
317 int num_input_channels,
318 int num_output_channels,
319 int num_reverse_channels) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000320 if (input_sample_rate_hz <= 0 ||
321 output_sample_rate_hz <= 0 ||
322 reverse_sample_rate_hz <= 0) {
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000323 return kBadSampleRateError;
324 }
325 if (num_output_channels > num_input_channels) {
326 return kBadNumberChannelsError;
327 }
328 // Only mono and stereo supported currently.
329 if (num_input_channels > 2 || num_input_channels < 1 ||
330 num_output_channels > 2 || num_output_channels < 1 ||
331 num_reverse_channels > 2 || num_reverse_channels < 1) {
332 return kBadNumberChannelsError;
333 }
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000334
335 fwd_in_format_.set(input_sample_rate_hz, num_input_channels);
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000336 fwd_out_format_.set(output_sample_rate_hz, num_output_channels);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000337 rev_in_format_.set(reverse_sample_rate_hz, num_reverse_channels);
338
339 // We process at the closest native rate >= min(input rate, output rate)...
340 int min_proc_rate = std::min(fwd_in_format_.rate(), fwd_out_format_.rate());
341 int fwd_proc_rate;
342 if (min_proc_rate > kSampleRate16kHz) {
343 fwd_proc_rate = kSampleRate32kHz;
344 } else if (min_proc_rate > kSampleRate8kHz) {
345 fwd_proc_rate = kSampleRate16kHz;
346 } else {
347 fwd_proc_rate = kSampleRate8kHz;
348 }
349 // ...with one exception.
350 if (echo_control_mobile_->is_enabled() && min_proc_rate > kSampleRate16kHz) {
351 fwd_proc_rate = kSampleRate16kHz;
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000352 }
353
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000354 fwd_proc_format_.set(fwd_proc_rate);
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000355
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000356 // We normally process the reverse stream at 16 kHz. Unless...
357 int rev_proc_rate = kSampleRate16kHz;
358 if (fwd_proc_format_.rate() == kSampleRate8kHz) {
359 // ...the forward stream is at 8 kHz.
360 rev_proc_rate = kSampleRate8kHz;
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000361 } else {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000362 if (rev_in_format_.rate() == kSampleRate32kHz) {
363 // ...or the input is at 32 kHz, in which case we use the splitting
364 // filter rather than the resampler.
365 rev_proc_rate = kSampleRate32kHz;
366 }
367 }
368
andrew@webrtc.org30be8272014-09-24 20:06:23 +0000369 // Always downmix the reverse stream to mono for analysis. This has been
370 // demonstrated to work well for AEC in most practical scenarios.
371 rev_proc_format_.set(rev_proc_rate, 1);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000372
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000373 if (fwd_proc_format_.rate() == kSampleRate32kHz ||
374 fwd_proc_format_.rate() == kSampleRate48kHz) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000375 split_rate_ = kSampleRate16kHz;
376 } else {
377 split_rate_ = fwd_proc_format_.rate();
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000378 }
379
380 return InitializeLocked();
381}
382
383// Calls InitializeLocked() if any of the audio parameters have changed from
384// their current values.
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000385int AudioProcessingImpl::MaybeInitializeLocked(int input_sample_rate_hz,
386 int output_sample_rate_hz,
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000387 int reverse_sample_rate_hz,
388 int num_input_channels,
389 int num_output_channels,
390 int num_reverse_channels) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000391 if (input_sample_rate_hz == fwd_in_format_.rate() &&
392 output_sample_rate_hz == fwd_out_format_.rate() &&
393 reverse_sample_rate_hz == rev_in_format_.rate() &&
394 num_input_channels == fwd_in_format_.num_channels() &&
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000395 num_output_channels == fwd_out_format_.num_channels() &&
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000396 num_reverse_channels == rev_in_format_.num_channels()) {
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000397 return kNoError;
398 }
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000399 if (beamformer_enabled_ &&
aluebs@webrtc.orgfb7a0392015-01-05 21:58:58 +0000400 (static_cast<size_t>(num_input_channels) != array_geometry_.size() ||
401 num_output_channels > 1)) {
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000402 return kBadNumberChannelsError;
403 }
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000404 return InitializeLocked(input_sample_rate_hz,
405 output_sample_rate_hz,
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000406 reverse_sample_rate_hz,
407 num_input_channels,
408 num_output_channels,
409 num_reverse_channels);
410}
411
andrew@webrtc.org61e596f2013-07-25 18:28:29 +0000412void AudioProcessingImpl::SetExtraOptions(const Config& config) {
andrew@webrtc.orge84978f2014-01-25 02:09:06 +0000413 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org61e596f2013-07-25 18:28:29 +0000414 std::list<ProcessingComponent*>::iterator it;
415 for (it = component_list_.begin(); it != component_list_.end(); ++it)
416 (*it)->SetExtraOptions(config);
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000417
418 if (transient_suppressor_enabled_ != config.Get<ExperimentalNs>().enabled) {
419 transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled;
420 InitializeTransient();
421 }
andrew@webrtc.org61e596f2013-07-25 18:28:29 +0000422}
423
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000424int AudioProcessingImpl::input_sample_rate_hz() const {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000425 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000426 return fwd_in_format_.rate();
niklase@google.com470e71d2011-07-07 08:21:25 +0000427}
428
andrew@webrtc.org46b31b12014-04-23 03:33:54 +0000429int AudioProcessingImpl::sample_rate_hz() const {
430 CriticalSectionScoped crit_scoped(crit_);
431 return fwd_in_format_.rate();
432}
433
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000434int AudioProcessingImpl::proc_sample_rate_hz() const {
435 return fwd_proc_format_.rate();
niklase@google.com470e71d2011-07-07 08:21:25 +0000436}
437
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000438int AudioProcessingImpl::proc_split_sample_rate_hz() const {
439 return split_rate_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000440}
441
442int AudioProcessingImpl::num_reverse_channels() const {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000443 return rev_proc_format_.num_channels();
niklase@google.com470e71d2011-07-07 08:21:25 +0000444}
445
446int AudioProcessingImpl::num_input_channels() const {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000447 return fwd_in_format_.num_channels();
niklase@google.com470e71d2011-07-07 08:21:25 +0000448}
449
450int AudioProcessingImpl::num_output_channels() const {
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000451 return fwd_out_format_.num_channels();
niklase@google.com470e71d2011-07-07 08:21:25 +0000452}
453
andrew@webrtc.org17342e52014-02-12 22:28:31 +0000454void AudioProcessingImpl::set_output_will_be_muted(bool muted) {
455 output_will_be_muted_ = muted;
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000456 CriticalSectionScoped lock(crit_);
457 if (agc_manager_.get()) {
458 agc_manager_->SetCaptureMuted(output_will_be_muted_);
459 }
andrew@webrtc.org17342e52014-02-12 22:28:31 +0000460}
461
462bool AudioProcessingImpl::output_will_be_muted() const {
463 return output_will_be_muted_;
464}
465
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000466int AudioProcessingImpl::ProcessStream(const float* const* src,
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000467 int samples_per_channel,
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000468 int input_sample_rate_hz,
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000469 ChannelLayout input_layout,
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000470 int output_sample_rate_hz,
471 ChannelLayout output_layout,
472 float* const* dest) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000473 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000474 if (!src || !dest) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000475 return kNullPointerError;
476 }
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000477
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000478 RETURN_ON_ERR(MaybeInitializeLocked(input_sample_rate_hz,
479 output_sample_rate_hz,
480 rev_in_format_.rate(),
481 ChannelsFromLayout(input_layout),
482 ChannelsFromLayout(output_layout),
483 rev_in_format_.num_channels()));
484 if (samples_per_channel != fwd_in_format_.samples_per_channel()) {
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000485 return kBadDataLengthError;
486 }
487
488#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
489 if (debug_file_->Open()) {
490 event_msg_->set_type(audioproc::Event::STREAM);
491 audioproc::Stream* msg = event_msg_->mutable_stream();
aluebs@webrtc.org59a1b1b2014-08-28 10:43:09 +0000492 const size_t channel_size =
493 sizeof(float) * fwd_in_format_.samples_per_channel();
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000494 for (int i = 0; i < fwd_in_format_.num_channels(); ++i)
495 msg->add_input_channel(src[i], channel_size);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000496 }
497#endif
498
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000499 capture_audio_->CopyFrom(src, samples_per_channel, input_layout);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000500 RETURN_ON_ERR(ProcessStreamLocked());
mgraczyk@chromium.orgd6e84d92015-01-14 01:33:54 +0000501 capture_audio_->CopyTo(fwd_out_format_.samples_per_channel(),
502 output_layout,
503 dest);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000504
505#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
506 if (debug_file_->Open()) {
507 audioproc::Stream* msg = event_msg_->mutable_stream();
aluebs@webrtc.org59a1b1b2014-08-28 10:43:09 +0000508 const size_t channel_size =
509 sizeof(float) * fwd_out_format_.samples_per_channel();
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000510 for (int i = 0; i < fwd_out_format_.num_channels(); ++i)
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000511 msg->add_output_channel(dest[i], channel_size);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000512 RETURN_ON_ERR(WriteMessageToDebugFile());
513 }
514#endif
515
516 return kNoError;
517}
518
519int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
520 CriticalSectionScoped crit_scoped(crit_);
521 if (!frame) {
522 return kNullPointerError;
523 }
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000524 // Must be a native rate.
525 if (frame->sample_rate_hz_ != kSampleRate8kHz &&
526 frame->sample_rate_hz_ != kSampleRate16kHz &&
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000527 frame->sample_rate_hz_ != kSampleRate32kHz &&
528 frame->sample_rate_hz_ != kSampleRate48kHz) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000529 return kBadSampleRateError;
530 }
531 if (echo_control_mobile_->is_enabled() &&
532 frame->sample_rate_hz_ > kSampleRate16kHz) {
533 LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates";
534 return kUnsupportedComponentError;
535 }
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000536
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000537 // TODO(ajm): The input and output rates and channels are currently
538 // constrained to be identical in the int16 interface.
andrew@webrtc.org60730cf2014-01-07 17:45:09 +0000539 RETURN_ON_ERR(MaybeInitializeLocked(frame->sample_rate_hz_,
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000540 frame->sample_rate_hz_,
541 rev_in_format_.rate(),
542 frame->num_channels_,
543 frame->num_channels_,
544 rev_in_format_.num_channels()));
545 if (frame->samples_per_channel_ != fwd_in_format_.samples_per_channel()) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000546 return kBadDataLengthError;
547 }
548
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000549#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000550 if (debug_file_->Open()) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000551 event_msg_->set_type(audioproc::Event::STREAM);
552 audioproc::Stream* msg = event_msg_->mutable_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000553 const size_t data_size = sizeof(int16_t) *
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000554 frame->samples_per_channel_ *
555 frame->num_channels_;
556 msg->set_input_data(frame->data_, data_size);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000557 }
558#endif
559
560 capture_audio_->DeinterleaveFrom(frame);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000561 RETURN_ON_ERR(ProcessStreamLocked());
562 capture_audio_->InterleaveTo(frame, output_copy_needed(is_data_processed()));
563
564#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
565 if (debug_file_->Open()) {
566 audioproc::Stream* msg = event_msg_->mutable_stream();
567 const size_t data_size = sizeof(int16_t) *
568 frame->samples_per_channel_ *
569 frame->num_channels_;
570 msg->set_output_data(frame->data_, data_size);
571 RETURN_ON_ERR(WriteMessageToDebugFile());
572 }
573#endif
574
575 return kNoError;
576}
577
578
579int AudioProcessingImpl::ProcessStreamLocked() {
580#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
581 if (debug_file_->Open()) {
582 audioproc::Stream* msg = event_msg_->mutable_stream();
ajm@google.com808e0e02011-08-03 21:08:51 +0000583 msg->set_delay(stream_delay_ms_);
584 msg->set_drift(echo_cancellation_->stream_drift_samples());
585 msg->set_level(gain_control_->stream_analog_level());
andrew@webrtc.orgce8e0772014-02-12 15:28:30 +0000586 msg->set_keypress(key_pressed_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000587 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000588#endif
niklase@google.com470e71d2011-07-07 08:21:25 +0000589
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000590 AudioBuffer* ca = capture_audio_.get(); // For brevity.
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000591 if (use_new_agc_ && gain_control_->is_enabled()) {
592 agc_manager_->AnalyzePreProcess(ca->data(0),
593 ca->num_channels(),
594 fwd_proc_format_.samples_per_channel());
595 }
596
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000597 bool data_processed = is_data_processed();
598 if (analysis_needed(data_processed)) {
aluebs@webrtc.orgbe05c742014-11-14 22:18:10 +0000599 ca->SplitIntoFrequencyBands();
niklase@google.com470e71d2011-07-07 08:21:25 +0000600 }
601
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000602#ifdef WEBRTC_BEAMFORMER
603 if (beamformer_enabled_) {
604 beamformer_->ProcessChunk(ca->split_channels_const_f(kBand0To8kHz),
605 ca->split_channels_const_f(kBand8To16kHz),
606 ca->num_channels(),
607 ca->samples_per_split_channel(),
608 ca->split_channels_f(kBand0To8kHz),
609 ca->split_channels_f(kBand8To16kHz));
610 ca->set_num_channels(1);
611 }
612#endif
613
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000614 RETURN_ON_ERR(high_pass_filter_->ProcessCaptureAudio(ca));
615 RETURN_ON_ERR(gain_control_->AnalyzeCaptureAudio(ca));
aluebs@webrtc.orga0ce9fa2014-09-24 14:18:03 +0000616 RETURN_ON_ERR(noise_suppression_->AnalyzeCaptureAudio(ca));
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000617 RETURN_ON_ERR(echo_cancellation_->ProcessCaptureAudio(ca));
niklase@google.com470e71d2011-07-07 08:21:25 +0000618
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000619 if (echo_control_mobile_->is_enabled() && noise_suppression_->is_enabled()) {
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000620 ca->CopyLowPassToReference();
niklase@google.com470e71d2011-07-07 08:21:25 +0000621 }
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000622 RETURN_ON_ERR(noise_suppression_->ProcessCaptureAudio(ca));
623 RETURN_ON_ERR(echo_control_mobile_->ProcessCaptureAudio(ca));
624 RETURN_ON_ERR(voice_detection_->ProcessCaptureAudio(ca));
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000625
626 if (use_new_agc_ && gain_control_->is_enabled()) {
627 agc_manager_->Process(ca->split_bands_const(0)[kBand0To8kHz],
628 ca->samples_per_split_channel(),
629 split_rate_);
630 }
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000631 RETURN_ON_ERR(gain_control_->ProcessCaptureAudio(ca));
niklase@google.com470e71d2011-07-07 08:21:25 +0000632
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000633 if (synthesis_needed(data_processed)) {
aluebs@webrtc.orgbe05c742014-11-14 22:18:10 +0000634 ca->MergeFrequencyBands();
niklase@google.com470e71d2011-07-07 08:21:25 +0000635 }
636
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000637 // TODO(aluebs): Investigate if the transient suppression placement should be
638 // before or after the AGC.
639 if (transient_suppressor_enabled_) {
640 float voice_probability =
641 agc_manager_.get() ? agc_manager_->voice_probability() : 1.f;
642
643 transient_suppressor_->Suppress(ca->data_f(0),
644 ca->samples_per_channel(),
645 ca->num_channels(),
646 ca->split_bands_const_f(0)[kBand0To8kHz],
647 ca->samples_per_split_channel(),
648 ca->keyboard_data(),
649 ca->samples_per_keyboard_channel(),
650 voice_probability,
651 key_pressed_);
652 }
653
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000654 // The level estimator operates on the recombined data.
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000655 RETURN_ON_ERR(level_estimator_->ProcessStream(ca));
ajm@google.com808e0e02011-08-03 21:08:51 +0000656
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000657 was_stream_delay_set_ = false;
niklase@google.com470e71d2011-07-07 08:21:25 +0000658 return kNoError;
659}
660
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000661int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data,
662 int samples_per_channel,
663 int sample_rate_hz,
664 ChannelLayout layout) {
665 CriticalSectionScoped crit_scoped(crit_);
666 if (data == NULL) {
667 return kNullPointerError;
668 }
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000669
670 const int num_channels = ChannelsFromLayout(layout);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000671 RETURN_ON_ERR(MaybeInitializeLocked(fwd_in_format_.rate(),
672 fwd_out_format_.rate(),
673 sample_rate_hz,
674 fwd_in_format_.num_channels(),
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000675 fwd_out_format_.num_channels(),
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000676 num_channels));
677 if (samples_per_channel != rev_in_format_.samples_per_channel()) {
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000678 return kBadDataLengthError;
679 }
680
681#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
682 if (debug_file_->Open()) {
683 event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
684 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
aluebs@webrtc.org59a1b1b2014-08-28 10:43:09 +0000685 const size_t channel_size =
686 sizeof(float) * rev_in_format_.samples_per_channel();
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000687 for (int i = 0; i < num_channels; ++i)
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000688 msg->add_channel(data[i], channel_size);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000689 RETURN_ON_ERR(WriteMessageToDebugFile());
690 }
691#endif
692
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000693 render_audio_->CopyFrom(data, samples_per_channel, layout);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000694 return AnalyzeReverseStreamLocked();
695}
696
niklase@google.com470e71d2011-07-07 08:21:25 +0000697int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000698 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000699 if (frame == NULL) {
700 return kNullPointerError;
701 }
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000702 // Must be a native rate.
703 if (frame->sample_rate_hz_ != kSampleRate8kHz &&
704 frame->sample_rate_hz_ != kSampleRate16kHz &&
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000705 frame->sample_rate_hz_ != kSampleRate32kHz &&
706 frame->sample_rate_hz_ != kSampleRate48kHz) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000707 return kBadSampleRateError;
708 }
709 // This interface does not tolerate different forward and reverse rates.
710 if (frame->sample_rate_hz_ != fwd_in_format_.rate()) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000711 return kBadSampleRateError;
712 }
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000713
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000714 RETURN_ON_ERR(MaybeInitializeLocked(fwd_in_format_.rate(),
715 fwd_out_format_.rate(),
716 frame->sample_rate_hz_,
717 fwd_in_format_.num_channels(),
718 fwd_in_format_.num_channels(),
719 frame->num_channels_));
720 if (frame->samples_per_channel_ != rev_in_format_.samples_per_channel()) {
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000721 return kBadDataLengthError;
722 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000723
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000724#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000725 if (debug_file_->Open()) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000726 event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
727 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000728 const size_t data_size = sizeof(int16_t) *
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000729 frame->samples_per_channel_ *
730 frame->num_channels_;
731 msg->set_data(frame->data_, data_size);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000732 RETURN_ON_ERR(WriteMessageToDebugFile());
niklase@google.com470e71d2011-07-07 08:21:25 +0000733 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000734#endif
niklase@google.com470e71d2011-07-07 08:21:25 +0000735
736 render_audio_->DeinterleaveFrom(frame);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000737 return AnalyzeReverseStreamLocked();
738}
niklase@google.com470e71d2011-07-07 08:21:25 +0000739
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000740int AudioProcessingImpl::AnalyzeReverseStreamLocked() {
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000741 AudioBuffer* ra = render_audio_.get(); // For brevity.
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000742 if (rev_proc_format_.rate() == kSampleRate32kHz) {
aluebs@webrtc.orgbe05c742014-11-14 22:18:10 +0000743 ra->SplitIntoFrequencyBands();
niklase@google.com470e71d2011-07-07 08:21:25 +0000744 }
745
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000746 RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(ra));
747 RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(ra));
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000748 if (!use_new_agc_) {
749 RETURN_ON_ERR(gain_control_->ProcessRenderAudio(ra));
750 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000751
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000752 return kNoError;
niklase@google.com470e71d2011-07-07 08:21:25 +0000753}
754
755int AudioProcessingImpl::set_stream_delay_ms(int delay) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000756 Error retval = kNoError;
niklase@google.com470e71d2011-07-07 08:21:25 +0000757 was_stream_delay_set_ = true;
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000758 delay += delay_offset_ms_;
759
niklase@google.com470e71d2011-07-07 08:21:25 +0000760 if (delay < 0) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000761 delay = 0;
762 retval = kBadStreamParameterWarning;
niklase@google.com470e71d2011-07-07 08:21:25 +0000763 }
764
765 // TODO(ajm): the max is rather arbitrarily chosen; investigate.
766 if (delay > 500) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000767 delay = 500;
768 retval = kBadStreamParameterWarning;
niklase@google.com470e71d2011-07-07 08:21:25 +0000769 }
770
771 stream_delay_ms_ = delay;
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000772 return retval;
niklase@google.com470e71d2011-07-07 08:21:25 +0000773}
774
775int AudioProcessingImpl::stream_delay_ms() const {
776 return stream_delay_ms_;
777}
778
779bool AudioProcessingImpl::was_stream_delay_set() const {
780 return was_stream_delay_set_;
781}
782
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000783void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) {
784 key_pressed_ = key_pressed;
785}
786
787bool AudioProcessingImpl::stream_key_pressed() const {
788 return key_pressed_;
789}
790
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000791void AudioProcessingImpl::set_delay_offset_ms(int offset) {
792 CriticalSectionScoped crit_scoped(crit_);
793 delay_offset_ms_ = offset;
794}
795
796int AudioProcessingImpl::delay_offset_ms() const {
797 return delay_offset_ms_;
798}
799
niklase@google.com470e71d2011-07-07 08:21:25 +0000800int AudioProcessingImpl::StartDebugRecording(
801 const char filename[AudioProcessing::kMaxFilenameSize]) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000802 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000803 assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize);
804
805 if (filename == NULL) {
806 return kNullPointerError;
807 }
808
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000809#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000810 // Stop any ongoing recording.
811 if (debug_file_->Open()) {
812 if (debug_file_->CloseFile() == -1) {
813 return kFileError;
814 }
815 }
816
817 if (debug_file_->OpenFile(filename, false) == -1) {
818 debug_file_->CloseFile();
819 return kFileError;
820 }
821
ajm@google.com808e0e02011-08-03 21:08:51 +0000822 int err = WriteInitMessage();
823 if (err != kNoError) {
824 return err;
niklase@google.com470e71d2011-07-07 08:21:25 +0000825 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000826 return kNoError;
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000827#else
828 return kUnsupportedFunctionError;
829#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000830}
831
henrikg@webrtc.org863b5362013-12-06 16:05:17 +0000832int AudioProcessingImpl::StartDebugRecording(FILE* handle) {
833 CriticalSectionScoped crit_scoped(crit_);
834
835 if (handle == NULL) {
836 return kNullPointerError;
837 }
838
839#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
840 // Stop any ongoing recording.
841 if (debug_file_->Open()) {
842 if (debug_file_->CloseFile() == -1) {
843 return kFileError;
844 }
845 }
846
847 if (debug_file_->OpenFromFileHandle(handle, true, false) == -1) {
848 return kFileError;
849 }
850
851 int err = WriteInitMessage();
852 if (err != kNoError) {
853 return err;
854 }
855 return kNoError;
856#else
857 return kUnsupportedFunctionError;
858#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
859}
860
xians@webrtc.orge46bc772014-10-10 08:36:56 +0000861int AudioProcessingImpl::StartDebugRecordingForPlatformFile(
862 rtc::PlatformFile handle) {
863 FILE* stream = rtc::FdopenPlatformFileForWriting(handle);
864 return StartDebugRecording(stream);
865}
866
niklase@google.com470e71d2011-07-07 08:21:25 +0000867int AudioProcessingImpl::StopDebugRecording() {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000868 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000869
870#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000871 // We just return if recording hasn't started.
872 if (debug_file_->Open()) {
873 if (debug_file_->CloseFile() == -1) {
874 return kFileError;
875 }
876 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000877 return kNoError;
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000878#else
879 return kUnsupportedFunctionError;
880#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000881}
882
883EchoCancellation* AudioProcessingImpl::echo_cancellation() const {
884 return echo_cancellation_;
885}
886
887EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const {
888 return echo_control_mobile_;
889}
890
891GainControl* AudioProcessingImpl::gain_control() const {
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000892 if (use_new_agc_) {
893 return gain_control_for_new_agc_.get();
894 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000895 return gain_control_;
896}
897
898HighPassFilter* AudioProcessingImpl::high_pass_filter() const {
899 return high_pass_filter_;
900}
901
902LevelEstimator* AudioProcessingImpl::level_estimator() const {
903 return level_estimator_;
904}
905
906NoiseSuppression* AudioProcessingImpl::noise_suppression() const {
907 return noise_suppression_;
908}
909
910VoiceDetection* AudioProcessingImpl::voice_detection() const {
911 return voice_detection_;
912}
913
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000914bool AudioProcessingImpl::is_data_processed() const {
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000915 if (beamformer_enabled_) {
916 return true;
917 }
918
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000919 int enabled_count = 0;
920 std::list<ProcessingComponent*>::const_iterator it;
921 for (it = component_list_.begin(); it != component_list_.end(); it++) {
922 if ((*it)->is_component_enabled()) {
923 enabled_count++;
924 }
925 }
926
927 // Data is unchanged if no components are enabled, or if only level_estimator_
928 // or voice_detection_ is enabled.
929 if (enabled_count == 0) {
930 return false;
931 } else if (enabled_count == 1) {
932 if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) {
933 return false;
934 }
935 } else if (enabled_count == 2) {
936 if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) {
937 return false;
938 }
939 }
940 return true;
941}
942
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000943bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const {
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000944 // Check if we've upmixed or downmixed the audio.
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000945 return ((fwd_out_format_.num_channels() != fwd_in_format_.num_channels()) ||
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000946 is_data_processed || transient_suppressor_enabled_);
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000947}
948
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000949bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const {
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000950 return (is_data_processed && (fwd_proc_format_.rate() == kSampleRate32kHz ||
951 fwd_proc_format_.rate() == kSampleRate48kHz));
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000952}
953
954bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const {
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000955 if (!is_data_processed && !voice_detection_->is_enabled() &&
956 !transient_suppressor_enabled_) {
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000957 // Only level_estimator_ is enabled.
958 return false;
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000959 } else if (fwd_proc_format_.rate() == kSampleRate32kHz ||
960 fwd_proc_format_.rate() == kSampleRate48kHz) {
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000961 // Something besides level_estimator_ is enabled, and we have super-wb.
962 return true;
963 }
964 return false;
965}
966
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000967int AudioProcessingImpl::InitializeExperimentalAgc() {
968 if (use_new_agc_) {
969 if (!agc_manager_.get()) {
970 agc_manager_.reset(
971 new AgcManagerDirect(gain_control_, gain_control_for_new_agc_.get()));
972 }
973 agc_manager_->Initialize();
974 agc_manager_->SetCaptureMuted(output_will_be_muted_);
975 }
976 return kNoError;
977}
978
979int AudioProcessingImpl::InitializeTransient() {
980 if (transient_suppressor_enabled_) {
981 if (!transient_suppressor_.get()) {
982 transient_suppressor_.reset(new TransientSuppressor());
983 }
984 transient_suppressor_->Initialize(fwd_proc_format_.rate(),
985 split_rate_,
986 fwd_out_format_.num_channels());
987 }
988 return kNoError;
989}
990
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000991void AudioProcessingImpl::InitializeBeamformer() {
992 if (beamformer_enabled_) {
993#ifdef WEBRTC_BEAMFORMER
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000994 beamformer_.reset(new Beamformer(kChunkSizeMs,
995 split_rate_,
aluebs@webrtc.orgfb7a0392015-01-05 21:58:58 +0000996 array_geometry_));
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000997#else
998 assert(false);
999#endif
1000 }
1001}
1002
andrew@webrtc.org7bf26462011-12-03 00:03:31 +00001003#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
ajm@google.com808e0e02011-08-03 21:08:51 +00001004int AudioProcessingImpl::WriteMessageToDebugFile() {
1005 int32_t size = event_msg_->ByteSize();
1006 if (size <= 0) {
1007 return kUnspecifiedError;
1008 }
andrew@webrtc.org621df672013-10-22 10:27:23 +00001009#if defined(WEBRTC_ARCH_BIG_ENDIAN)
ajm@google.com808e0e02011-08-03 21:08:51 +00001010 // TODO(ajm): Use little-endian "on the wire". For the moment, we can be
1011 // pretty safe in assuming little-endian.
1012#endif
1013
1014 if (!event_msg_->SerializeToString(&event_str_)) {
1015 return kUnspecifiedError;
1016 }
1017
1018 // Write message preceded by its size.
1019 if (!debug_file_->Write(&size, sizeof(int32_t))) {
1020 return kFileError;
1021 }
1022 if (!debug_file_->Write(event_str_.data(), event_str_.length())) {
1023 return kFileError;
1024 }
1025
1026 event_msg_->Clear();
1027
andrew@webrtc.org17e40642014-03-04 20:58:13 +00001028 return kNoError;
ajm@google.com808e0e02011-08-03 21:08:51 +00001029}
1030
1031int AudioProcessingImpl::WriteInitMessage() {
1032 event_msg_->set_type(audioproc::Event::INIT);
1033 audioproc::Init* msg = event_msg_->mutable_init();
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +00001034 msg->set_sample_rate(fwd_in_format_.rate());
1035 msg->set_num_input_channels(fwd_in_format_.num_channels());
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +00001036 msg->set_num_output_channels(fwd_out_format_.num_channels());
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +00001037 msg->set_num_reverse_channels(rev_in_format_.num_channels());
1038 msg->set_reverse_sample_rate(rev_in_format_.rate());
1039 msg->set_output_sample_rate(fwd_out_format_.rate());
ajm@google.com808e0e02011-08-03 21:08:51 +00001040
1041 int err = WriteMessageToDebugFile();
1042 if (err != kNoError) {
1043 return err;
1044 }
1045
1046 return kNoError;
1047}
andrew@webrtc.org7bf26462011-12-03 00:03:31 +00001048#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +00001049
niklase@google.com470e71d2011-07-07 08:21:25 +00001050} // namespace webrtc