blob: 200aa8bd088ca120c9263521cc94481021096b6d [file] [log] [blame]
niklase@google.com470e71d2011-07-07 08:21:25 +00001/*
andrew@webrtc.org40654032012-01-30 20:51:15 +00002 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
niklase@google.com470e71d2011-07-07 08:21:25 +00003 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000011#include "webrtc/modules/audio_processing/audio_processing_impl.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000012
ajm@google.com808e0e02011-08-03 21:08:51 +000013#include <assert.h>
niklase@google.com470e71d2011-07-07 08:21:25 +000014
xians@webrtc.orge46bc772014-10-10 08:36:56 +000015#include "webrtc/base/platform_file.h"
andrew@webrtc.org17e40642014-03-04 20:58:13 +000016#include "webrtc/common_audio/include/audio_util.h"
andrew@webrtc.org60730cf2014-01-07 17:45:09 +000017#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
pbos@webrtc.org788acd12014-12-15 09:41:24 +000018#include "webrtc/modules/audio_processing/agc/agc_manager_direct.h"
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000019#include "webrtc/modules/audio_processing/audio_buffer.h"
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +000020#include "webrtc/modules/audio_processing/beamformer/beamformer.h"
kjellander@webrtc.org035e9122015-01-28 19:57:00 +000021#include "webrtc/common_audio/channel_buffer.h"
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000022#include "webrtc/modules/audio_processing/common.h"
andrew@webrtc.org56e4a052014-02-27 22:23:17 +000023#include "webrtc/modules/audio_processing/echo_cancellation_impl.h"
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000024#include "webrtc/modules/audio_processing/echo_control_mobile_impl.h"
25#include "webrtc/modules/audio_processing/gain_control_impl.h"
26#include "webrtc/modules/audio_processing/high_pass_filter_impl.h"
27#include "webrtc/modules/audio_processing/level_estimator_impl.h"
28#include "webrtc/modules/audio_processing/noise_suppression_impl.h"
29#include "webrtc/modules/audio_processing/processing_component.h"
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +000030#include "webrtc/modules/audio_processing/transient/transient_suppressor.h"
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000031#include "webrtc/modules/audio_processing/voice_detection_impl.h"
32#include "webrtc/modules/interface/module_common_types.h"
33#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
34#include "webrtc/system_wrappers/interface/file_wrapper.h"
35#include "webrtc/system_wrappers/interface/logging.h"
andrew@webrtc.org7bf26462011-12-03 00:03:31 +000036
37#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
38// Files generated at build-time by the protobuf compiler.
leozwang@webrtc.orga3736342012-03-16 21:36:00 +000039#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
leozwang@webrtc.org534e4952012-10-22 21:21:52 +000040#include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h"
leozwang@google.comce9bfbb2011-08-03 23:34:31 +000041#else
ajm@google.com808e0e02011-08-03 21:08:51 +000042#include "webrtc/audio_processing/debug.pb.h"
leozwang@google.comce9bfbb2011-08-03 23:34:31 +000043#endif
andrew@webrtc.org7bf26462011-12-03 00:03:31 +000044#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +000045
andrew@webrtc.org60730cf2014-01-07 17:45:09 +000046#define RETURN_ON_ERR(expr) \
47 do { \
48 int err = expr; \
49 if (err != kNoError) { \
50 return err; \
51 } \
52 } while (0)
53
niklase@google.com470e71d2011-07-07 08:21:25 +000054namespace webrtc {
andrew@webrtc.org60730cf2014-01-07 17:45:09 +000055
56// Throughout webrtc, it's assumed that success is represented by zero.
kwiberg@webrtc.org2ebfac52015-01-14 10:51:54 +000057static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero");
andrew@webrtc.org60730cf2014-01-07 17:45:09 +000058
pbos@webrtc.org788acd12014-12-15 09:41:24 +000059// This class has two main functionalities:
60//
61// 1) It is returned instead of the real GainControl after the new AGC has been
62// enabled in order to prevent an outside user from overriding compression
63// settings. It doesn't do anything in its implementation, except for
64// delegating the const methods and Enable calls to the real GainControl, so
65// AGC can still be disabled.
66//
67// 2) It is injected into AgcManagerDirect and implements volume callbacks for
68// getting and setting the volume level. It just caches this value to be used
69// in VoiceEngine later.
70class GainControlForNewAgc : public GainControl, public VolumeCallbacks {
71 public:
72 explicit GainControlForNewAgc(GainControlImpl* gain_control)
73 : real_gain_control_(gain_control),
74 volume_(0) {
75 }
76
77 // GainControl implementation.
kjellander@webrtc.org14665ff2015-03-04 12:58:35 +000078 int Enable(bool enable) override {
pbos@webrtc.org788acd12014-12-15 09:41:24 +000079 return real_gain_control_->Enable(enable);
80 }
kjellander@webrtc.org14665ff2015-03-04 12:58:35 +000081 bool is_enabled() const override { return real_gain_control_->is_enabled(); }
82 int set_stream_analog_level(int level) override {
pbos@webrtc.org788acd12014-12-15 09:41:24 +000083 volume_ = level;
84 return AudioProcessing::kNoError;
85 }
kjellander@webrtc.org14665ff2015-03-04 12:58:35 +000086 int stream_analog_level() override { return volume_; }
87 int set_mode(Mode mode) override { return AudioProcessing::kNoError; }
88 Mode mode() const override { return GainControl::kAdaptiveAnalog; }
89 int set_target_level_dbfs(int level) override {
pbos@webrtc.org788acd12014-12-15 09:41:24 +000090 return AudioProcessing::kNoError;
91 }
kjellander@webrtc.org14665ff2015-03-04 12:58:35 +000092 int target_level_dbfs() const override {
pbos@webrtc.org788acd12014-12-15 09:41:24 +000093 return real_gain_control_->target_level_dbfs();
94 }
kjellander@webrtc.org14665ff2015-03-04 12:58:35 +000095 int set_compression_gain_db(int gain) override {
pbos@webrtc.org788acd12014-12-15 09:41:24 +000096 return AudioProcessing::kNoError;
97 }
kjellander@webrtc.org14665ff2015-03-04 12:58:35 +000098 int compression_gain_db() const override {
pbos@webrtc.org788acd12014-12-15 09:41:24 +000099 return real_gain_control_->compression_gain_db();
100 }
kjellander@webrtc.org14665ff2015-03-04 12:58:35 +0000101 int enable_limiter(bool enable) override { return AudioProcessing::kNoError; }
102 bool is_limiter_enabled() const override {
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000103 return real_gain_control_->is_limiter_enabled();
104 }
kjellander@webrtc.org14665ff2015-03-04 12:58:35 +0000105 int set_analog_level_limits(int minimum, int maximum) override {
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000106 return AudioProcessing::kNoError;
107 }
kjellander@webrtc.org14665ff2015-03-04 12:58:35 +0000108 int analog_level_minimum() const override {
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000109 return real_gain_control_->analog_level_minimum();
110 }
kjellander@webrtc.org14665ff2015-03-04 12:58:35 +0000111 int analog_level_maximum() const override {
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000112 return real_gain_control_->analog_level_maximum();
113 }
kjellander@webrtc.org14665ff2015-03-04 12:58:35 +0000114 bool stream_is_saturated() const override {
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000115 return real_gain_control_->stream_is_saturated();
116 }
117
118 // VolumeCallbacks implementation.
kjellander@webrtc.org14665ff2015-03-04 12:58:35 +0000119 void SetMicVolume(int volume) override { volume_ = volume; }
120 int GetMicVolume() override { return volume_; }
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000121
122 private:
123 GainControl* real_gain_control_;
124 int volume_;
125};
126
andrew@webrtc.orge84978f2014-01-25 02:09:06 +0000127AudioProcessing* AudioProcessing::Create() {
128 Config config;
aluebs@webrtc.orgd82f55d2015-01-15 18:07:21 +0000129 return Create(config, nullptr);
andrew@webrtc.orge84978f2014-01-25 02:09:06 +0000130}
131
132AudioProcessing* AudioProcessing::Create(const Config& config) {
aluebs@webrtc.orgd82f55d2015-01-15 18:07:21 +0000133 return Create(config, nullptr);
134}
135
136AudioProcessing* AudioProcessing::Create(const Config& config,
137 Beamformer* beamformer) {
138 AudioProcessingImpl* apm = new AudioProcessingImpl(config, beamformer);
niklase@google.com470e71d2011-07-07 08:21:25 +0000139 if (apm->Initialize() != kNoError) {
140 delete apm;
141 apm = NULL;
142 }
143
144 return apm;
145}
146
andrew@webrtc.orge84978f2014-01-25 02:09:06 +0000147AudioProcessingImpl::AudioProcessingImpl(const Config& config)
aluebs@webrtc.orgd82f55d2015-01-15 18:07:21 +0000148 : AudioProcessingImpl(config, nullptr) {}
149
150AudioProcessingImpl::AudioProcessingImpl(const Config& config,
151 Beamformer* beamformer)
andrew@webrtc.org60730cf2014-01-07 17:45:09 +0000152 : echo_cancellation_(NULL),
niklase@google.com470e71d2011-07-07 08:21:25 +0000153 echo_control_mobile_(NULL),
154 gain_control_(NULL),
155 high_pass_filter_(NULL),
156 level_estimator_(NULL),
157 noise_suppression_(NULL),
158 voice_detection_(NULL),
niklase@google.com470e71d2011-07-07 08:21:25 +0000159 crit_(CriticalSectionWrapper::CreateCriticalSection()),
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000160#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
161 debug_file_(FileWrapper::Create()),
162 event_msg_(new audioproc::Event()),
163#endif
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000164 fwd_in_format_(kSampleRate16kHz, 1),
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000165 fwd_proc_format_(kSampleRate16kHz),
166 fwd_out_format_(kSampleRate16kHz, 1),
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000167 rev_in_format_(kSampleRate16kHz, 1),
168 rev_proc_format_(kSampleRate16kHz, 1),
169 split_rate_(kSampleRate16kHz),
niklase@google.com470e71d2011-07-07 08:21:25 +0000170 stream_delay_ms_(0),
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000171 delay_offset_ms_(0),
niklase@google.com470e71d2011-07-07 08:21:25 +0000172 was_stream_delay_set_(false),
andrew@webrtc.org38bf2492014-02-13 17:43:44 +0000173 output_will_be_muted_(false),
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000174 key_pressed_(false),
175#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
176 use_new_agc_(false),
177#else
178 use_new_agc_(config.Get<ExperimentalAgc>().enabled),
179#endif
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000180 transient_suppressor_enabled_(config.Get<ExperimentalNs>().enabled),
aluebs@webrtc.orgfb7a0392015-01-05 21:58:58 +0000181 beamformer_enabled_(config.Get<Beamforming>().enabled),
aluebs@webrtc.orgd82f55d2015-01-15 18:07:21 +0000182 beamformer_(beamformer),
aluebs@webrtc.orgc9ce07e2015-03-02 20:07:31 +0000183 array_geometry_(config.Get<Beamforming>().array_geometry),
184 supports_48kHz_(config.Get<AudioProcessing48kHzSupport>().enabled) {
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000185 echo_cancellation_ = new EchoCancellationImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000186 component_list_.push_back(echo_cancellation_);
187
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000188 echo_control_mobile_ = new EchoControlMobileImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000189 component_list_.push_back(echo_control_mobile_);
190
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000191 gain_control_ = new GainControlImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000192 component_list_.push_back(gain_control_);
193
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000194 high_pass_filter_ = new HighPassFilterImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000195 component_list_.push_back(high_pass_filter_);
196
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000197 level_estimator_ = new LevelEstimatorImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000198 component_list_.push_back(level_estimator_);
199
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000200 noise_suppression_ = new NoiseSuppressionImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000201 component_list_.push_back(noise_suppression_);
202
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000203 voice_detection_ = new VoiceDetectionImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000204 component_list_.push_back(voice_detection_);
andrew@webrtc.orge84978f2014-01-25 02:09:06 +0000205
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000206 gain_control_for_new_agc_.reset(new GainControlForNewAgc(gain_control_));
207
andrew@webrtc.orge84978f2014-01-25 02:09:06 +0000208 SetExtraOptions(config);
niklase@google.com470e71d2011-07-07 08:21:25 +0000209}
210
211AudioProcessingImpl::~AudioProcessingImpl() {
andrew@webrtc.org81865342012-10-27 00:28:27 +0000212 {
213 CriticalSectionScoped crit_scoped(crit_);
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000214 // Depends on gain_control_ and gain_control_for_new_agc_.
215 agc_manager_.reset();
216 // Depends on gain_control_.
217 gain_control_for_new_agc_.reset();
andrew@webrtc.org81865342012-10-27 00:28:27 +0000218 while (!component_list_.empty()) {
219 ProcessingComponent* component = component_list_.front();
220 component->Destroy();
221 delete component;
222 component_list_.pop_front();
223 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000224
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000225#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
andrew@webrtc.org81865342012-10-27 00:28:27 +0000226 if (debug_file_->Open()) {
227 debug_file_->CloseFile();
228 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000229#endif
niklase@google.com470e71d2011-07-07 08:21:25 +0000230 }
andrew@webrtc.org16cfbe22012-08-29 16:58:25 +0000231 delete crit_;
232 crit_ = NULL;
niklase@google.com470e71d2011-07-07 08:21:25 +0000233}
234
niklase@google.com470e71d2011-07-07 08:21:25 +0000235int AudioProcessingImpl::Initialize() {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000236 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000237 return InitializeLocked();
238}
239
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000240int AudioProcessingImpl::set_sample_rate_hz(int rate) {
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000241 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000242 return InitializeLocked(rate,
243 rate,
244 rev_in_format_.rate(),
245 fwd_in_format_.num_channels(),
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000246 fwd_out_format_.num_channels(),
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000247 rev_in_format_.num_channels());
248}
249
250int AudioProcessingImpl::Initialize(int input_sample_rate_hz,
251 int output_sample_rate_hz,
252 int reverse_sample_rate_hz,
253 ChannelLayout input_layout,
254 ChannelLayout output_layout,
255 ChannelLayout reverse_layout) {
256 CriticalSectionScoped crit_scoped(crit_);
257 return InitializeLocked(input_sample_rate_hz,
258 output_sample_rate_hz,
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000259 reverse_sample_rate_hz,
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000260 ChannelsFromLayout(input_layout),
261 ChannelsFromLayout(output_layout),
262 ChannelsFromLayout(reverse_layout));
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000263}
264
niklase@google.com470e71d2011-07-07 08:21:25 +0000265int AudioProcessingImpl::InitializeLocked() {
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000266 const int fwd_audio_buffer_channels = beamformer_enabled_ ?
267 fwd_in_format_.num_channels() :
268 fwd_out_format_.num_channels();
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000269 render_audio_.reset(new AudioBuffer(rev_in_format_.samples_per_channel(),
270 rev_in_format_.num_channels(),
271 rev_proc_format_.samples_per_channel(),
272 rev_proc_format_.num_channels(),
273 rev_proc_format_.samples_per_channel()));
274 capture_audio_.reset(new AudioBuffer(fwd_in_format_.samples_per_channel(),
275 fwd_in_format_.num_channels(),
276 fwd_proc_format_.samples_per_channel(),
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000277 fwd_audio_buffer_channels,
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000278 fwd_out_format_.samples_per_channel()));
niklase@google.com470e71d2011-07-07 08:21:25 +0000279
niklase@google.com470e71d2011-07-07 08:21:25 +0000280 // Initialize all components.
281 std::list<ProcessingComponent*>::iterator it;
andrew@webrtc.org81865342012-10-27 00:28:27 +0000282 for (it = component_list_.begin(); it != component_list_.end(); ++it) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000283 int err = (*it)->Initialize();
284 if (err != kNoError) {
285 return err;
286 }
287 }
288
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000289 int err = InitializeExperimentalAgc();
290 if (err != kNoError) {
291 return err;
292 }
293
294 err = InitializeTransient();
295 if (err != kNoError) {
296 return err;
297 }
298
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000299 InitializeBeamformer();
300
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000301#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
ajm@google.com808e0e02011-08-03 21:08:51 +0000302 if (debug_file_->Open()) {
303 int err = WriteInitMessage();
304 if (err != kNoError) {
305 return err;
306 }
307 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000308#endif
ajm@google.com808e0e02011-08-03 21:08:51 +0000309
niklase@google.com470e71d2011-07-07 08:21:25 +0000310 return kNoError;
311}
312
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000313int AudioProcessingImpl::InitializeLocked(int input_sample_rate_hz,
314 int output_sample_rate_hz,
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000315 int reverse_sample_rate_hz,
316 int num_input_channels,
317 int num_output_channels,
318 int num_reverse_channels) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000319 if (input_sample_rate_hz <= 0 ||
320 output_sample_rate_hz <= 0 ||
321 reverse_sample_rate_hz <= 0) {
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000322 return kBadSampleRateError;
323 }
324 if (num_output_channels > num_input_channels) {
325 return kBadNumberChannelsError;
326 }
327 // Only mono and stereo supported currently.
328 if (num_input_channels > 2 || num_input_channels < 1 ||
329 num_output_channels > 2 || num_output_channels < 1 ||
330 num_reverse_channels > 2 || num_reverse_channels < 1) {
331 return kBadNumberChannelsError;
332 }
aluebs@webrtc.orgd82f55d2015-01-15 18:07:21 +0000333 if (beamformer_enabled_ &&
334 (static_cast<size_t>(num_input_channels) != array_geometry_.size() ||
335 num_output_channels > 1)) {
336 return kBadNumberChannelsError;
337 }
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000338
339 fwd_in_format_.set(input_sample_rate_hz, num_input_channels);
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000340 fwd_out_format_.set(output_sample_rate_hz, num_output_channels);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000341 rev_in_format_.set(reverse_sample_rate_hz, num_reverse_channels);
342
343 // We process at the closest native rate >= min(input rate, output rate)...
344 int min_proc_rate = std::min(fwd_in_format_.rate(), fwd_out_format_.rate());
345 int fwd_proc_rate;
aluebs@webrtc.orgc9ce07e2015-03-02 20:07:31 +0000346 if (supports_48kHz_ && min_proc_rate > kSampleRate32kHz) {
347 fwd_proc_rate = kSampleRate48kHz;
348 } else if (min_proc_rate > kSampleRate16kHz) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000349 fwd_proc_rate = kSampleRate32kHz;
350 } else if (min_proc_rate > kSampleRate8kHz) {
351 fwd_proc_rate = kSampleRate16kHz;
352 } else {
353 fwd_proc_rate = kSampleRate8kHz;
354 }
355 // ...with one exception.
356 if (echo_control_mobile_->is_enabled() && min_proc_rate > kSampleRate16kHz) {
357 fwd_proc_rate = kSampleRate16kHz;
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000358 }
359
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000360 fwd_proc_format_.set(fwd_proc_rate);
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000361
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000362 // We normally process the reverse stream at 16 kHz. Unless...
363 int rev_proc_rate = kSampleRate16kHz;
364 if (fwd_proc_format_.rate() == kSampleRate8kHz) {
365 // ...the forward stream is at 8 kHz.
366 rev_proc_rate = kSampleRate8kHz;
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000367 } else {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000368 if (rev_in_format_.rate() == kSampleRate32kHz) {
369 // ...or the input is at 32 kHz, in which case we use the splitting
370 // filter rather than the resampler.
371 rev_proc_rate = kSampleRate32kHz;
372 }
373 }
374
andrew@webrtc.org30be8272014-09-24 20:06:23 +0000375 // Always downmix the reverse stream to mono for analysis. This has been
376 // demonstrated to work well for AEC in most practical scenarios.
377 rev_proc_format_.set(rev_proc_rate, 1);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000378
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000379 if (fwd_proc_format_.rate() == kSampleRate32kHz ||
380 fwd_proc_format_.rate() == kSampleRate48kHz) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000381 split_rate_ = kSampleRate16kHz;
382 } else {
383 split_rate_ = fwd_proc_format_.rate();
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000384 }
385
386 return InitializeLocked();
387}
388
389// Calls InitializeLocked() if any of the audio parameters have changed from
390// their current values.
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000391int AudioProcessingImpl::MaybeInitializeLocked(int input_sample_rate_hz,
392 int output_sample_rate_hz,
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000393 int reverse_sample_rate_hz,
394 int num_input_channels,
395 int num_output_channels,
396 int num_reverse_channels) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000397 if (input_sample_rate_hz == fwd_in_format_.rate() &&
398 output_sample_rate_hz == fwd_out_format_.rate() &&
399 reverse_sample_rate_hz == rev_in_format_.rate() &&
400 num_input_channels == fwd_in_format_.num_channels() &&
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000401 num_output_channels == fwd_out_format_.num_channels() &&
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000402 num_reverse_channels == rev_in_format_.num_channels()) {
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000403 return kNoError;
404 }
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000405 return InitializeLocked(input_sample_rate_hz,
406 output_sample_rate_hz,
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000407 reverse_sample_rate_hz,
408 num_input_channels,
409 num_output_channels,
410 num_reverse_channels);
411}
412
andrew@webrtc.org61e596f2013-07-25 18:28:29 +0000413void AudioProcessingImpl::SetExtraOptions(const Config& config) {
andrew@webrtc.orge84978f2014-01-25 02:09:06 +0000414 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org61e596f2013-07-25 18:28:29 +0000415 std::list<ProcessingComponent*>::iterator it;
416 for (it = component_list_.begin(); it != component_list_.end(); ++it)
417 (*it)->SetExtraOptions(config);
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000418
419 if (transient_suppressor_enabled_ != config.Get<ExperimentalNs>().enabled) {
420 transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled;
421 InitializeTransient();
422 }
andrew@webrtc.org61e596f2013-07-25 18:28:29 +0000423}
424
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000425int AudioProcessingImpl::input_sample_rate_hz() const {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000426 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000427 return fwd_in_format_.rate();
niklase@google.com470e71d2011-07-07 08:21:25 +0000428}
429
andrew@webrtc.org46b31b12014-04-23 03:33:54 +0000430int AudioProcessingImpl::sample_rate_hz() const {
431 CriticalSectionScoped crit_scoped(crit_);
432 return fwd_in_format_.rate();
433}
434
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000435int AudioProcessingImpl::proc_sample_rate_hz() const {
436 return fwd_proc_format_.rate();
niklase@google.com470e71d2011-07-07 08:21:25 +0000437}
438
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000439int AudioProcessingImpl::proc_split_sample_rate_hz() const {
440 return split_rate_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000441}
442
443int AudioProcessingImpl::num_reverse_channels() const {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000444 return rev_proc_format_.num_channels();
niklase@google.com470e71d2011-07-07 08:21:25 +0000445}
446
447int AudioProcessingImpl::num_input_channels() const {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000448 return fwd_in_format_.num_channels();
niklase@google.com470e71d2011-07-07 08:21:25 +0000449}
450
451int AudioProcessingImpl::num_output_channels() const {
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000452 return fwd_out_format_.num_channels();
niklase@google.com470e71d2011-07-07 08:21:25 +0000453}
454
andrew@webrtc.org17342e52014-02-12 22:28:31 +0000455void AudioProcessingImpl::set_output_will_be_muted(bool muted) {
456 output_will_be_muted_ = muted;
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000457 CriticalSectionScoped lock(crit_);
458 if (agc_manager_.get()) {
459 agc_manager_->SetCaptureMuted(output_will_be_muted_);
460 }
andrew@webrtc.org17342e52014-02-12 22:28:31 +0000461}
462
463bool AudioProcessingImpl::output_will_be_muted() const {
464 return output_will_be_muted_;
465}
466
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000467int AudioProcessingImpl::ProcessStream(const float* const* src,
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000468 int samples_per_channel,
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000469 int input_sample_rate_hz,
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000470 ChannelLayout input_layout,
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000471 int output_sample_rate_hz,
472 ChannelLayout output_layout,
473 float* const* dest) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000474 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000475 if (!src || !dest) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000476 return kNullPointerError;
477 }
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000478
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000479 RETURN_ON_ERR(MaybeInitializeLocked(input_sample_rate_hz,
480 output_sample_rate_hz,
481 rev_in_format_.rate(),
482 ChannelsFromLayout(input_layout),
483 ChannelsFromLayout(output_layout),
484 rev_in_format_.num_channels()));
485 if (samples_per_channel != fwd_in_format_.samples_per_channel()) {
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000486 return kBadDataLengthError;
487 }
488
489#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
490 if (debug_file_->Open()) {
491 event_msg_->set_type(audioproc::Event::STREAM);
492 audioproc::Stream* msg = event_msg_->mutable_stream();
aluebs@webrtc.org59a1b1b2014-08-28 10:43:09 +0000493 const size_t channel_size =
494 sizeof(float) * fwd_in_format_.samples_per_channel();
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000495 for (int i = 0; i < fwd_in_format_.num_channels(); ++i)
496 msg->add_input_channel(src[i], channel_size);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000497 }
498#endif
499
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000500 capture_audio_->CopyFrom(src, samples_per_channel, input_layout);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000501 RETURN_ON_ERR(ProcessStreamLocked());
mgraczyk@chromium.orgd6e84d92015-01-14 01:33:54 +0000502 capture_audio_->CopyTo(fwd_out_format_.samples_per_channel(),
503 output_layout,
504 dest);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000505
506#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
507 if (debug_file_->Open()) {
508 audioproc::Stream* msg = event_msg_->mutable_stream();
aluebs@webrtc.org59a1b1b2014-08-28 10:43:09 +0000509 const size_t channel_size =
510 sizeof(float) * fwd_out_format_.samples_per_channel();
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000511 for (int i = 0; i < fwd_out_format_.num_channels(); ++i)
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000512 msg->add_output_channel(dest[i], channel_size);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000513 RETURN_ON_ERR(WriteMessageToDebugFile());
514 }
515#endif
516
517 return kNoError;
518}
519
520int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
521 CriticalSectionScoped crit_scoped(crit_);
522 if (!frame) {
523 return kNullPointerError;
524 }
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000525 // Must be a native rate.
526 if (frame->sample_rate_hz_ != kSampleRate8kHz &&
527 frame->sample_rate_hz_ != kSampleRate16kHz &&
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000528 frame->sample_rate_hz_ != kSampleRate32kHz &&
529 frame->sample_rate_hz_ != kSampleRate48kHz) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000530 return kBadSampleRateError;
531 }
532 if (echo_control_mobile_->is_enabled() &&
533 frame->sample_rate_hz_ > kSampleRate16kHz) {
534 LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates";
535 return kUnsupportedComponentError;
536 }
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000537
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000538 // TODO(ajm): The input and output rates and channels are currently
539 // constrained to be identical in the int16 interface.
andrew@webrtc.org60730cf2014-01-07 17:45:09 +0000540 RETURN_ON_ERR(MaybeInitializeLocked(frame->sample_rate_hz_,
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000541 frame->sample_rate_hz_,
542 rev_in_format_.rate(),
543 frame->num_channels_,
544 frame->num_channels_,
545 rev_in_format_.num_channels()));
546 if (frame->samples_per_channel_ != fwd_in_format_.samples_per_channel()) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000547 return kBadDataLengthError;
548 }
549
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000550#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000551 if (debug_file_->Open()) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000552 event_msg_->set_type(audioproc::Event::STREAM);
553 audioproc::Stream* msg = event_msg_->mutable_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000554 const size_t data_size = sizeof(int16_t) *
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000555 frame->samples_per_channel_ *
556 frame->num_channels_;
557 msg->set_input_data(frame->data_, data_size);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000558 }
559#endif
560
561 capture_audio_->DeinterleaveFrom(frame);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000562 RETURN_ON_ERR(ProcessStreamLocked());
563 capture_audio_->InterleaveTo(frame, output_copy_needed(is_data_processed()));
564
565#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
566 if (debug_file_->Open()) {
567 audioproc::Stream* msg = event_msg_->mutable_stream();
568 const size_t data_size = sizeof(int16_t) *
569 frame->samples_per_channel_ *
570 frame->num_channels_;
571 msg->set_output_data(frame->data_, data_size);
572 RETURN_ON_ERR(WriteMessageToDebugFile());
573 }
574#endif
575
576 return kNoError;
577}
578
579
580int AudioProcessingImpl::ProcessStreamLocked() {
581#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
582 if (debug_file_->Open()) {
583 audioproc::Stream* msg = event_msg_->mutable_stream();
ajm@google.com808e0e02011-08-03 21:08:51 +0000584 msg->set_delay(stream_delay_ms_);
585 msg->set_drift(echo_cancellation_->stream_drift_samples());
bjornv@webrtc.org63da1dd2015-02-06 19:44:21 +0000586 msg->set_level(gain_control()->stream_analog_level());
andrew@webrtc.orgce8e0772014-02-12 15:28:30 +0000587 msg->set_keypress(key_pressed_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000588 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000589#endif
niklase@google.com470e71d2011-07-07 08:21:25 +0000590
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000591 AudioBuffer* ca = capture_audio_.get(); // For brevity.
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000592 if (use_new_agc_ && gain_control_->is_enabled()) {
aluebs@webrtc.orgd35a5c32015-02-10 22:52:15 +0000593 agc_manager_->AnalyzePreProcess(ca->channels()[0],
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000594 ca->num_channels(),
595 fwd_proc_format_.samples_per_channel());
596 }
597
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000598 bool data_processed = is_data_processed();
599 if (analysis_needed(data_processed)) {
aluebs@webrtc.orgbe05c742014-11-14 22:18:10 +0000600 ca->SplitIntoFrequencyBands();
niklase@google.com470e71d2011-07-07 08:21:25 +0000601 }
602
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000603#ifdef WEBRTC_BEAMFORMER
604 if (beamformer_enabled_) {
aluebs@webrtc.org3aca0b02015-02-26 21:52:20 +0000605 beamformer_->ProcessChunk(ca->split_data_f(), ca->split_data_f());
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000606 ca->set_num_channels(1);
607 }
608#endif
609
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000610 RETURN_ON_ERR(high_pass_filter_->ProcessCaptureAudio(ca));
611 RETURN_ON_ERR(gain_control_->AnalyzeCaptureAudio(ca));
aluebs@webrtc.orga0ce9fa2014-09-24 14:18:03 +0000612 RETURN_ON_ERR(noise_suppression_->AnalyzeCaptureAudio(ca));
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000613 RETURN_ON_ERR(echo_cancellation_->ProcessCaptureAudio(ca));
niklase@google.com470e71d2011-07-07 08:21:25 +0000614
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000615 if (echo_control_mobile_->is_enabled() && noise_suppression_->is_enabled()) {
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000616 ca->CopyLowPassToReference();
niklase@google.com470e71d2011-07-07 08:21:25 +0000617 }
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000618 RETURN_ON_ERR(noise_suppression_->ProcessCaptureAudio(ca));
619 RETURN_ON_ERR(echo_control_mobile_->ProcessCaptureAudio(ca));
620 RETURN_ON_ERR(voice_detection_->ProcessCaptureAudio(ca));
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000621
aluebs@webrtc.orgd82f55d2015-01-15 18:07:21 +0000622 if (use_new_agc_ &&
623 gain_control_->is_enabled() &&
624 (!beamformer_enabled_ || beamformer_->is_target_present())) {
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000625 agc_manager_->Process(ca->split_bands_const(0)[kBand0To8kHz],
aluebs@webrtc.orgd35a5c32015-02-10 22:52:15 +0000626 ca->num_frames_per_band(),
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000627 split_rate_);
628 }
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000629 RETURN_ON_ERR(gain_control_->ProcessCaptureAudio(ca));
niklase@google.com470e71d2011-07-07 08:21:25 +0000630
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000631 if (synthesis_needed(data_processed)) {
aluebs@webrtc.orgbe05c742014-11-14 22:18:10 +0000632 ca->MergeFrequencyBands();
niklase@google.com470e71d2011-07-07 08:21:25 +0000633 }
634
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000635 // TODO(aluebs): Investigate if the transient suppression placement should be
636 // before or after the AGC.
637 if (transient_suppressor_enabled_) {
638 float voice_probability =
639 agc_manager_.get() ? agc_manager_->voice_probability() : 1.f;
640
aluebs@webrtc.orgd35a5c32015-02-10 22:52:15 +0000641 transient_suppressor_->Suppress(ca->channels_f()[0],
642 ca->num_frames(),
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000643 ca->num_channels(),
644 ca->split_bands_const_f(0)[kBand0To8kHz],
aluebs@webrtc.orgd35a5c32015-02-10 22:52:15 +0000645 ca->num_frames_per_band(),
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000646 ca->keyboard_data(),
aluebs@webrtc.orgd35a5c32015-02-10 22:52:15 +0000647 ca->num_keyboard_frames(),
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000648 voice_probability,
649 key_pressed_);
650 }
651
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000652 // The level estimator operates on the recombined data.
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000653 RETURN_ON_ERR(level_estimator_->ProcessStream(ca));
ajm@google.com808e0e02011-08-03 21:08:51 +0000654
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000655 was_stream_delay_set_ = false;
niklase@google.com470e71d2011-07-07 08:21:25 +0000656 return kNoError;
657}
658
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000659int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data,
660 int samples_per_channel,
661 int sample_rate_hz,
662 ChannelLayout layout) {
663 CriticalSectionScoped crit_scoped(crit_);
664 if (data == NULL) {
665 return kNullPointerError;
666 }
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000667
668 const int num_channels = ChannelsFromLayout(layout);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000669 RETURN_ON_ERR(MaybeInitializeLocked(fwd_in_format_.rate(),
670 fwd_out_format_.rate(),
671 sample_rate_hz,
672 fwd_in_format_.num_channels(),
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000673 fwd_out_format_.num_channels(),
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000674 num_channels));
675 if (samples_per_channel != rev_in_format_.samples_per_channel()) {
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000676 return kBadDataLengthError;
677 }
678
679#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
680 if (debug_file_->Open()) {
681 event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
682 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
aluebs@webrtc.org59a1b1b2014-08-28 10:43:09 +0000683 const size_t channel_size =
684 sizeof(float) * rev_in_format_.samples_per_channel();
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000685 for (int i = 0; i < num_channels; ++i)
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000686 msg->add_channel(data[i], channel_size);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000687 RETURN_ON_ERR(WriteMessageToDebugFile());
688 }
689#endif
690
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000691 render_audio_->CopyFrom(data, samples_per_channel, layout);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000692 return AnalyzeReverseStreamLocked();
693}
694
niklase@google.com470e71d2011-07-07 08:21:25 +0000695int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000696 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000697 if (frame == NULL) {
698 return kNullPointerError;
699 }
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000700 // Must be a native rate.
701 if (frame->sample_rate_hz_ != kSampleRate8kHz &&
702 frame->sample_rate_hz_ != kSampleRate16kHz &&
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000703 frame->sample_rate_hz_ != kSampleRate32kHz &&
704 frame->sample_rate_hz_ != kSampleRate48kHz) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000705 return kBadSampleRateError;
706 }
707 // This interface does not tolerate different forward and reverse rates.
708 if (frame->sample_rate_hz_ != fwd_in_format_.rate()) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000709 return kBadSampleRateError;
710 }
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000711
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000712 RETURN_ON_ERR(MaybeInitializeLocked(fwd_in_format_.rate(),
713 fwd_out_format_.rate(),
714 frame->sample_rate_hz_,
715 fwd_in_format_.num_channels(),
716 fwd_in_format_.num_channels(),
717 frame->num_channels_));
718 if (frame->samples_per_channel_ != rev_in_format_.samples_per_channel()) {
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000719 return kBadDataLengthError;
720 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000721
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000722#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000723 if (debug_file_->Open()) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000724 event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
725 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000726 const size_t data_size = sizeof(int16_t) *
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000727 frame->samples_per_channel_ *
728 frame->num_channels_;
729 msg->set_data(frame->data_, data_size);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000730 RETURN_ON_ERR(WriteMessageToDebugFile());
niklase@google.com470e71d2011-07-07 08:21:25 +0000731 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000732#endif
niklase@google.com470e71d2011-07-07 08:21:25 +0000733
734 render_audio_->DeinterleaveFrom(frame);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000735 return AnalyzeReverseStreamLocked();
736}
niklase@google.com470e71d2011-07-07 08:21:25 +0000737
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000738int AudioProcessingImpl::AnalyzeReverseStreamLocked() {
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000739 AudioBuffer* ra = render_audio_.get(); // For brevity.
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000740 if (rev_proc_format_.rate() == kSampleRate32kHz) {
aluebs@webrtc.orgbe05c742014-11-14 22:18:10 +0000741 ra->SplitIntoFrequencyBands();
niklase@google.com470e71d2011-07-07 08:21:25 +0000742 }
743
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000744 RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(ra));
745 RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(ra));
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000746 if (!use_new_agc_) {
747 RETURN_ON_ERR(gain_control_->ProcessRenderAudio(ra));
748 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000749
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000750 return kNoError;
niklase@google.com470e71d2011-07-07 08:21:25 +0000751}
752
753int AudioProcessingImpl::set_stream_delay_ms(int delay) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000754 Error retval = kNoError;
niklase@google.com470e71d2011-07-07 08:21:25 +0000755 was_stream_delay_set_ = true;
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000756 delay += delay_offset_ms_;
757
niklase@google.com470e71d2011-07-07 08:21:25 +0000758 if (delay < 0) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000759 delay = 0;
760 retval = kBadStreamParameterWarning;
niklase@google.com470e71d2011-07-07 08:21:25 +0000761 }
762
763 // TODO(ajm): the max is rather arbitrarily chosen; investigate.
764 if (delay > 500) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000765 delay = 500;
766 retval = kBadStreamParameterWarning;
niklase@google.com470e71d2011-07-07 08:21:25 +0000767 }
768
769 stream_delay_ms_ = delay;
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000770 return retval;
niklase@google.com470e71d2011-07-07 08:21:25 +0000771}
772
773int AudioProcessingImpl::stream_delay_ms() const {
774 return stream_delay_ms_;
775}
776
777bool AudioProcessingImpl::was_stream_delay_set() const {
778 return was_stream_delay_set_;
779}
780
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000781void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) {
782 key_pressed_ = key_pressed;
783}
784
785bool AudioProcessingImpl::stream_key_pressed() const {
786 return key_pressed_;
787}
788
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000789void AudioProcessingImpl::set_delay_offset_ms(int offset) {
790 CriticalSectionScoped crit_scoped(crit_);
791 delay_offset_ms_ = offset;
792}
793
794int AudioProcessingImpl::delay_offset_ms() const {
795 return delay_offset_ms_;
796}
797
niklase@google.com470e71d2011-07-07 08:21:25 +0000798int AudioProcessingImpl::StartDebugRecording(
799 const char filename[AudioProcessing::kMaxFilenameSize]) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000800 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000801 assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize);
802
803 if (filename == NULL) {
804 return kNullPointerError;
805 }
806
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000807#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000808 // Stop any ongoing recording.
809 if (debug_file_->Open()) {
810 if (debug_file_->CloseFile() == -1) {
811 return kFileError;
812 }
813 }
814
815 if (debug_file_->OpenFile(filename, false) == -1) {
816 debug_file_->CloseFile();
817 return kFileError;
818 }
819
ajm@google.com808e0e02011-08-03 21:08:51 +0000820 int err = WriteInitMessage();
821 if (err != kNoError) {
822 return err;
niklase@google.com470e71d2011-07-07 08:21:25 +0000823 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000824 return kNoError;
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000825#else
826 return kUnsupportedFunctionError;
827#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000828}
829
henrikg@webrtc.org863b5362013-12-06 16:05:17 +0000830int AudioProcessingImpl::StartDebugRecording(FILE* handle) {
831 CriticalSectionScoped crit_scoped(crit_);
832
833 if (handle == NULL) {
834 return kNullPointerError;
835 }
836
837#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
838 // Stop any ongoing recording.
839 if (debug_file_->Open()) {
840 if (debug_file_->CloseFile() == -1) {
841 return kFileError;
842 }
843 }
844
845 if (debug_file_->OpenFromFileHandle(handle, true, false) == -1) {
846 return kFileError;
847 }
848
849 int err = WriteInitMessage();
850 if (err != kNoError) {
851 return err;
852 }
853 return kNoError;
854#else
855 return kUnsupportedFunctionError;
856#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
857}
858
xians@webrtc.orge46bc772014-10-10 08:36:56 +0000859int AudioProcessingImpl::StartDebugRecordingForPlatformFile(
860 rtc::PlatformFile handle) {
861 FILE* stream = rtc::FdopenPlatformFileForWriting(handle);
862 return StartDebugRecording(stream);
863}
864
niklase@google.com470e71d2011-07-07 08:21:25 +0000865int AudioProcessingImpl::StopDebugRecording() {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000866 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000867
868#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000869 // We just return if recording hasn't started.
870 if (debug_file_->Open()) {
871 if (debug_file_->CloseFile() == -1) {
872 return kFileError;
873 }
874 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000875 return kNoError;
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000876#else
877 return kUnsupportedFunctionError;
878#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000879}
880
881EchoCancellation* AudioProcessingImpl::echo_cancellation() const {
882 return echo_cancellation_;
883}
884
885EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const {
886 return echo_control_mobile_;
887}
888
889GainControl* AudioProcessingImpl::gain_control() const {
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000890 if (use_new_agc_) {
891 return gain_control_for_new_agc_.get();
892 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000893 return gain_control_;
894}
895
896HighPassFilter* AudioProcessingImpl::high_pass_filter() const {
897 return high_pass_filter_;
898}
899
900LevelEstimator* AudioProcessingImpl::level_estimator() const {
901 return level_estimator_;
902}
903
904NoiseSuppression* AudioProcessingImpl::noise_suppression() const {
905 return noise_suppression_;
906}
907
908VoiceDetection* AudioProcessingImpl::voice_detection() const {
909 return voice_detection_;
910}
911
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000912bool AudioProcessingImpl::is_data_processed() const {
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000913 if (beamformer_enabled_) {
914 return true;
915 }
916
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000917 int enabled_count = 0;
918 std::list<ProcessingComponent*>::const_iterator it;
919 for (it = component_list_.begin(); it != component_list_.end(); it++) {
920 if ((*it)->is_component_enabled()) {
921 enabled_count++;
922 }
923 }
924
925 // Data is unchanged if no components are enabled, or if only level_estimator_
926 // or voice_detection_ is enabled.
927 if (enabled_count == 0) {
928 return false;
929 } else if (enabled_count == 1) {
930 if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) {
931 return false;
932 }
933 } else if (enabled_count == 2) {
934 if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) {
935 return false;
936 }
937 }
938 return true;
939}
940
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000941bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const {
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000942 // Check if we've upmixed or downmixed the audio.
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000943 return ((fwd_out_format_.num_channels() != fwd_in_format_.num_channels()) ||
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000944 is_data_processed || transient_suppressor_enabled_);
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000945}
946
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000947bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const {
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000948 return (is_data_processed && (fwd_proc_format_.rate() == kSampleRate32kHz ||
949 fwd_proc_format_.rate() == kSampleRate48kHz));
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000950}
951
952bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const {
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000953 if (!is_data_processed && !voice_detection_->is_enabled() &&
954 !transient_suppressor_enabled_) {
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000955 // Only level_estimator_ is enabled.
956 return false;
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000957 } else if (fwd_proc_format_.rate() == kSampleRate32kHz ||
958 fwd_proc_format_.rate() == kSampleRate48kHz) {
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000959 // Something besides level_estimator_ is enabled, and we have super-wb.
960 return true;
961 }
962 return false;
963}
964
pbos@webrtc.org788acd12014-12-15 09:41:24 +0000965int AudioProcessingImpl::InitializeExperimentalAgc() {
966 if (use_new_agc_) {
967 if (!agc_manager_.get()) {
968 agc_manager_.reset(
969 new AgcManagerDirect(gain_control_, gain_control_for_new_agc_.get()));
970 }
971 agc_manager_->Initialize();
972 agc_manager_->SetCaptureMuted(output_will_be_muted_);
973 }
974 return kNoError;
975}
976
977int AudioProcessingImpl::InitializeTransient() {
978 if (transient_suppressor_enabled_) {
979 if (!transient_suppressor_.get()) {
980 transient_suppressor_.reset(new TransientSuppressor());
981 }
982 transient_suppressor_->Initialize(fwd_proc_format_.rate(),
983 split_rate_,
984 fwd_out_format_.num_channels());
985 }
986 return kNoError;
987}
988
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000989void AudioProcessingImpl::InitializeBeamformer() {
990 if (beamformer_enabled_) {
991#ifdef WEBRTC_BEAMFORMER
aluebs@webrtc.orgd82f55d2015-01-15 18:07:21 +0000992 if (!beamformer_) {
993 beamformer_.reset(new Beamformer(array_geometry_));
994 }
995 beamformer_->Initialize(kChunkSizeMs, split_rate_);
aluebs@webrtc.orgae643ce2014-12-19 19:57:34 +0000996#else
997 assert(false);
998#endif
999 }
1000}
1001
andrew@webrtc.org7bf26462011-12-03 00:03:31 +00001002#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
ajm@google.com808e0e02011-08-03 21:08:51 +00001003int AudioProcessingImpl::WriteMessageToDebugFile() {
1004 int32_t size = event_msg_->ByteSize();
1005 if (size <= 0) {
1006 return kUnspecifiedError;
1007 }
andrew@webrtc.org621df672013-10-22 10:27:23 +00001008#if defined(WEBRTC_ARCH_BIG_ENDIAN)
ajm@google.com808e0e02011-08-03 21:08:51 +00001009 // TODO(ajm): Use little-endian "on the wire". For the moment, we can be
1010 // pretty safe in assuming little-endian.
1011#endif
1012
1013 if (!event_msg_->SerializeToString(&event_str_)) {
1014 return kUnspecifiedError;
1015 }
1016
1017 // Write message preceded by its size.
1018 if (!debug_file_->Write(&size, sizeof(int32_t))) {
1019 return kFileError;
1020 }
1021 if (!debug_file_->Write(event_str_.data(), event_str_.length())) {
1022 return kFileError;
1023 }
1024
1025 event_msg_->Clear();
1026
andrew@webrtc.org17e40642014-03-04 20:58:13 +00001027 return kNoError;
ajm@google.com808e0e02011-08-03 21:08:51 +00001028}
1029
1030int AudioProcessingImpl::WriteInitMessage() {
1031 event_msg_->set_type(audioproc::Event::INIT);
1032 audioproc::Init* msg = event_msg_->mutable_init();
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +00001033 msg->set_sample_rate(fwd_in_format_.rate());
1034 msg->set_num_input_channels(fwd_in_format_.num_channels());
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +00001035 msg->set_num_output_channels(fwd_out_format_.num_channels());
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +00001036 msg->set_num_reverse_channels(rev_in_format_.num_channels());
1037 msg->set_reverse_sample_rate(rev_in_format_.rate());
1038 msg->set_output_sample_rate(fwd_out_format_.rate());
ajm@google.com808e0e02011-08-03 21:08:51 +00001039
1040 int err = WriteMessageToDebugFile();
1041 if (err != kNoError) {
1042 return err;
1043 }
1044
1045 return kNoError;
1046}
andrew@webrtc.org7bf26462011-12-03 00:03:31 +00001047#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +00001048
niklase@google.com470e71d2011-07-07 08:21:25 +00001049} // namespace webrtc