blob: 4d36ff7e7b427993957a28157e670ff05f4a8b98 [file] [log] [blame]
niklase@google.com470e71d2011-07-07 08:21:25 +00001/*
andrew@webrtc.org40654032012-01-30 20:51:15 +00002 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
niklase@google.com470e71d2011-07-07 08:21:25 +00003 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000011#include "webrtc/modules/audio_processing/audio_processing_impl.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000012
ajm@google.com808e0e02011-08-03 21:08:51 +000013#include <assert.h>
niklase@google.com470e71d2011-07-07 08:21:25 +000014
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000015#include "webrtc/modules/audio_processing/audio_buffer.h"
andrew@webrtc.org61e596f2013-07-25 18:28:29 +000016#include "webrtc/modules/audio_processing/echo_cancellation_impl_wrapper.h"
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000017#include "webrtc/modules/audio_processing/echo_control_mobile_impl.h"
18#include "webrtc/modules/audio_processing/gain_control_impl.h"
19#include "webrtc/modules/audio_processing/high_pass_filter_impl.h"
20#include "webrtc/modules/audio_processing/level_estimator_impl.h"
21#include "webrtc/modules/audio_processing/noise_suppression_impl.h"
22#include "webrtc/modules/audio_processing/processing_component.h"
23#include "webrtc/modules/audio_processing/splitting_filter.h"
24#include "webrtc/modules/audio_processing/voice_detection_impl.h"
25#include "webrtc/modules/interface/module_common_types.h"
26#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
27#include "webrtc/system_wrappers/interface/file_wrapper.h"
28#include "webrtc/system_wrappers/interface/logging.h"
andrew@webrtc.org7bf26462011-12-03 00:03:31 +000029
30#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
31// Files generated at build-time by the protobuf compiler.
leozwang@webrtc.orga3736342012-03-16 21:36:00 +000032#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
leozwang@webrtc.org534e4952012-10-22 21:21:52 +000033#include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h"
leozwang@google.comce9bfbb2011-08-03 23:34:31 +000034#else
ajm@google.com808e0e02011-08-03 21:08:51 +000035#include "webrtc/audio_processing/debug.pb.h"
leozwang@google.comce9bfbb2011-08-03 23:34:31 +000036#endif
andrew@webrtc.org7bf26462011-12-03 00:03:31 +000037#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +000038
39namespace webrtc {
niklase@google.com470e71d2011-07-07 08:21:25 +000040AudioProcessing* AudioProcessing::Create(int id) {
niklase@google.com470e71d2011-07-07 08:21:25 +000041 AudioProcessingImpl* apm = new AudioProcessingImpl(id);
42 if (apm->Initialize() != kNoError) {
43 delete apm;
44 apm = NULL;
45 }
46
47 return apm;
48}
49
pbos@webrtc.org91620802013-08-02 11:44:11 +000050int32_t AudioProcessing::TimeUntilNextProcess() { return -1; }
51int32_t AudioProcessing::Process() { return -1; }
52
niklase@google.com470e71d2011-07-07 08:21:25 +000053AudioProcessingImpl::AudioProcessingImpl(int id)
54 : id_(id),
55 echo_cancellation_(NULL),
56 echo_control_mobile_(NULL),
57 gain_control_(NULL),
58 high_pass_filter_(NULL),
59 level_estimator_(NULL),
60 noise_suppression_(NULL),
61 voice_detection_(NULL),
niklase@google.com470e71d2011-07-07 08:21:25 +000062 crit_(CriticalSectionWrapper::CreateCriticalSection()),
63 render_audio_(NULL),
64 capture_audio_(NULL),
andrew@webrtc.org7bf26462011-12-03 00:03:31 +000065#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
66 debug_file_(FileWrapper::Create()),
67 event_msg_(new audioproc::Event()),
68#endif
niklase@google.com470e71d2011-07-07 08:21:25 +000069 sample_rate_hz_(kSampleRate16kHz),
70 split_sample_rate_hz_(kSampleRate16kHz),
71 samples_per_channel_(sample_rate_hz_ / 100),
72 stream_delay_ms_(0),
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +000073 delay_offset_ms_(0),
niklase@google.com470e71d2011-07-07 08:21:25 +000074 was_stream_delay_set_(false),
ajm@google.com808e0e02011-08-03 21:08:51 +000075 num_reverse_channels_(1),
76 num_input_channels_(1),
77 num_output_channels_(1) {
andrew@webrtc.org61e596f2013-07-25 18:28:29 +000078 echo_cancellation_ = EchoCancellationImplWrapper::Create(this);
niklase@google.com470e71d2011-07-07 08:21:25 +000079 component_list_.push_back(echo_cancellation_);
80
81 echo_control_mobile_ = new EchoControlMobileImpl(this);
82 component_list_.push_back(echo_control_mobile_);
83
84 gain_control_ = new GainControlImpl(this);
85 component_list_.push_back(gain_control_);
86
87 high_pass_filter_ = new HighPassFilterImpl(this);
88 component_list_.push_back(high_pass_filter_);
89
90 level_estimator_ = new LevelEstimatorImpl(this);
91 component_list_.push_back(level_estimator_);
92
93 noise_suppression_ = new NoiseSuppressionImpl(this);
94 component_list_.push_back(noise_suppression_);
95
96 voice_detection_ = new VoiceDetectionImpl(this);
97 component_list_.push_back(voice_detection_);
98}
99
100AudioProcessingImpl::~AudioProcessingImpl() {
andrew@webrtc.org81865342012-10-27 00:28:27 +0000101 {
102 CriticalSectionScoped crit_scoped(crit_);
103 while (!component_list_.empty()) {
104 ProcessingComponent* component = component_list_.front();
105 component->Destroy();
106 delete component;
107 component_list_.pop_front();
108 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000109
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000110#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
andrew@webrtc.org81865342012-10-27 00:28:27 +0000111 if (debug_file_->Open()) {
112 debug_file_->CloseFile();
113 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000114#endif
ajm@google.com808e0e02011-08-03 21:08:51 +0000115
andrew@webrtc.org81865342012-10-27 00:28:27 +0000116 if (render_audio_) {
117 delete render_audio_;
118 render_audio_ = NULL;
119 }
120
121 if (capture_audio_) {
122 delete capture_audio_;
123 capture_audio_ = NULL;
124 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000125 }
126
andrew@webrtc.org16cfbe22012-08-29 16:58:25 +0000127 delete crit_;
128 crit_ = NULL;
niklase@google.com470e71d2011-07-07 08:21:25 +0000129}
130
131CriticalSectionWrapper* AudioProcessingImpl::crit() const {
132 return crit_;
133}
134
135int AudioProcessingImpl::split_sample_rate_hz() const {
136 return split_sample_rate_hz_;
137}
138
139int AudioProcessingImpl::Initialize() {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000140 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000141 return InitializeLocked();
142}
143
144int AudioProcessingImpl::InitializeLocked() {
145 if (render_audio_ != NULL) {
146 delete render_audio_;
147 render_audio_ = NULL;
148 }
149
150 if (capture_audio_ != NULL) {
151 delete capture_audio_;
152 capture_audio_ = NULL;
153 }
154
ajm@google.com808e0e02011-08-03 21:08:51 +0000155 render_audio_ = new AudioBuffer(num_reverse_channels_,
niklase@google.com470e71d2011-07-07 08:21:25 +0000156 samples_per_channel_);
ajm@google.com808e0e02011-08-03 21:08:51 +0000157 capture_audio_ = new AudioBuffer(num_input_channels_,
niklase@google.com470e71d2011-07-07 08:21:25 +0000158 samples_per_channel_);
159
160 was_stream_delay_set_ = false;
161
162 // Initialize all components.
163 std::list<ProcessingComponent*>::iterator it;
andrew@webrtc.org81865342012-10-27 00:28:27 +0000164 for (it = component_list_.begin(); it != component_list_.end(); ++it) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000165 int err = (*it)->Initialize();
166 if (err != kNoError) {
167 return err;
168 }
169 }
170
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000171#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
ajm@google.com808e0e02011-08-03 21:08:51 +0000172 if (debug_file_->Open()) {
173 int err = WriteInitMessage();
174 if (err != kNoError) {
175 return err;
176 }
177 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000178#endif
ajm@google.com808e0e02011-08-03 21:08:51 +0000179
niklase@google.com470e71d2011-07-07 08:21:25 +0000180 return kNoError;
181}
182
andrew@webrtc.org61e596f2013-07-25 18:28:29 +0000183void AudioProcessingImpl::SetExtraOptions(const Config& config) {
184 std::list<ProcessingComponent*>::iterator it;
185 for (it = component_list_.begin(); it != component_list_.end(); ++it)
186 (*it)->SetExtraOptions(config);
187}
188
aluebs@webrtc.org0b72f582013-11-19 15:17:51 +0000189int AudioProcessingImpl::EnableExperimentalNs(bool enable) {
190 return kNoError;
191}
192
niklase@google.com470e71d2011-07-07 08:21:25 +0000193int AudioProcessingImpl::set_sample_rate_hz(int rate) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000194 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org81865342012-10-27 00:28:27 +0000195 if (rate == sample_rate_hz_) {
196 return kNoError;
197 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000198 if (rate != kSampleRate8kHz &&
199 rate != kSampleRate16kHz &&
200 rate != kSampleRate32kHz) {
201 return kBadParameterError;
202 }
andrew@webrtc.org78693fe2013-03-01 16:36:19 +0000203 if (echo_control_mobile_->is_enabled() && rate > kSampleRate16kHz) {
204 LOG(LS_ERROR) << "AECM only supports 16 kHz or lower sample rates";
205 return kUnsupportedComponentError;
206 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000207
208 sample_rate_hz_ = rate;
209 samples_per_channel_ = rate / 100;
210
211 if (sample_rate_hz_ == kSampleRate32kHz) {
212 split_sample_rate_hz_ = kSampleRate16kHz;
213 } else {
214 split_sample_rate_hz_ = sample_rate_hz_;
215 }
216
217 return InitializeLocked();
218}
219
220int AudioProcessingImpl::sample_rate_hz() const {
henrika@webrtc.org19da7192013-04-05 14:34:57 +0000221 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000222 return sample_rate_hz_;
223}
224
225int AudioProcessingImpl::set_num_reverse_channels(int channels) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000226 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org81865342012-10-27 00:28:27 +0000227 if (channels == num_reverse_channels_) {
228 return kNoError;
229 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000230 // Only stereo supported currently.
231 if (channels > 2 || channels < 1) {
232 return kBadParameterError;
233 }
234
ajm@google.com808e0e02011-08-03 21:08:51 +0000235 num_reverse_channels_ = channels;
niklase@google.com470e71d2011-07-07 08:21:25 +0000236
237 return InitializeLocked();
238}
239
240int AudioProcessingImpl::num_reverse_channels() const {
ajm@google.com808e0e02011-08-03 21:08:51 +0000241 return num_reverse_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000242}
243
244int AudioProcessingImpl::set_num_channels(
245 int input_channels,
246 int output_channels) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000247 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org81865342012-10-27 00:28:27 +0000248 if (input_channels == num_input_channels_ &&
249 output_channels == num_output_channels_) {
250 return kNoError;
251 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000252 if (output_channels > input_channels) {
253 return kBadParameterError;
254 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000255 // Only stereo supported currently.
andrew@webrtc.org81865342012-10-27 00:28:27 +0000256 if (input_channels > 2 || input_channels < 1 ||
257 output_channels > 2 || output_channels < 1) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000258 return kBadParameterError;
259 }
260
ajm@google.com808e0e02011-08-03 21:08:51 +0000261 num_input_channels_ = input_channels;
262 num_output_channels_ = output_channels;
niklase@google.com470e71d2011-07-07 08:21:25 +0000263
264 return InitializeLocked();
265}
266
267int AudioProcessingImpl::num_input_channels() const {
ajm@google.com808e0e02011-08-03 21:08:51 +0000268 return num_input_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000269}
270
271int AudioProcessingImpl::num_output_channels() const {
ajm@google.com808e0e02011-08-03 21:08:51 +0000272 return num_output_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000273}
274
275int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000276 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000277 int err = kNoError;
278
279 if (frame == NULL) {
280 return kNullPointerError;
281 }
282
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000283 if (frame->sample_rate_hz_ != sample_rate_hz_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000284 return kBadSampleRateError;
285 }
286
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000287 if (frame->num_channels_ != num_input_channels_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000288 return kBadNumberChannelsError;
289 }
290
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000291 if (frame->samples_per_channel_ != samples_per_channel_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000292 return kBadDataLengthError;
293 }
294
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000295#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000296 if (debug_file_->Open()) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000297 event_msg_->set_type(audioproc::Event::STREAM);
298 audioproc::Stream* msg = event_msg_->mutable_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000299 const size_t data_size = sizeof(int16_t) *
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000300 frame->samples_per_channel_ *
301 frame->num_channels_;
302 msg->set_input_data(frame->data_, data_size);
ajm@google.com808e0e02011-08-03 21:08:51 +0000303 msg->set_delay(stream_delay_ms_);
304 msg->set_drift(echo_cancellation_->stream_drift_samples());
305 msg->set_level(gain_control_->stream_analog_level());
niklase@google.com470e71d2011-07-07 08:21:25 +0000306 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000307#endif
niklase@google.com470e71d2011-07-07 08:21:25 +0000308
309 capture_audio_->DeinterleaveFrom(frame);
310
311 // TODO(ajm): experiment with mixing and AEC placement.
ajm@google.com808e0e02011-08-03 21:08:51 +0000312 if (num_output_channels_ < num_input_channels_) {
313 capture_audio_->Mix(num_output_channels_);
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000314 frame->num_channels_ = num_output_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000315 }
316
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000317 bool data_processed = is_data_processed();
318 if (analysis_needed(data_processed)) {
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000319 for (int i = 0; i < num_output_channels_; i++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000320 // Split into a low and high band.
321 SplittingFilterAnalysis(capture_audio_->data(i),
322 capture_audio_->low_pass_split_data(i),
323 capture_audio_->high_pass_split_data(i),
324 capture_audio_->analysis_filter_state1(i),
325 capture_audio_->analysis_filter_state2(i));
326 }
327 }
328
329 err = high_pass_filter_->ProcessCaptureAudio(capture_audio_);
330 if (err != kNoError) {
331 return err;
332 }
333
334 err = gain_control_->AnalyzeCaptureAudio(capture_audio_);
335 if (err != kNoError) {
336 return err;
337 }
338
339 err = echo_cancellation_->ProcessCaptureAudio(capture_audio_);
340 if (err != kNoError) {
341 return err;
342 }
343
344 if (echo_control_mobile_->is_enabled() &&
345 noise_suppression_->is_enabled()) {
346 capture_audio_->CopyLowPassToReference();
347 }
348
349 err = noise_suppression_->ProcessCaptureAudio(capture_audio_);
350 if (err != kNoError) {
351 return err;
352 }
353
354 err = echo_control_mobile_->ProcessCaptureAudio(capture_audio_);
355 if (err != kNoError) {
356 return err;
357 }
358
359 err = voice_detection_->ProcessCaptureAudio(capture_audio_);
360 if (err != kNoError) {
361 return err;
362 }
363
364 err = gain_control_->ProcessCaptureAudio(capture_audio_);
365 if (err != kNoError) {
366 return err;
367 }
368
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000369 if (synthesis_needed(data_processed)) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000370 for (int i = 0; i < num_output_channels_; i++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000371 // Recombine low and high bands.
372 SplittingFilterSynthesis(capture_audio_->low_pass_split_data(i),
373 capture_audio_->high_pass_split_data(i),
374 capture_audio_->data(i),
375 capture_audio_->synthesis_filter_state1(i),
376 capture_audio_->synthesis_filter_state2(i));
377 }
378 }
379
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000380 // The level estimator operates on the recombined data.
381 err = level_estimator_->ProcessStream(capture_audio_);
382 if (err != kNoError) {
383 return err;
384 }
385
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000386 capture_audio_->InterleaveTo(frame, interleave_needed(data_processed));
niklase@google.com470e71d2011-07-07 08:21:25 +0000387
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000388#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
ajm@google.com808e0e02011-08-03 21:08:51 +0000389 if (debug_file_->Open()) {
390 audioproc::Stream* msg = event_msg_->mutable_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000391 const size_t data_size = sizeof(int16_t) *
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000392 frame->samples_per_channel_ *
393 frame->num_channels_;
394 msg->set_output_data(frame->data_, data_size);
ajm@google.com808e0e02011-08-03 21:08:51 +0000395 err = WriteMessageToDebugFile();
396 if (err != kNoError) {
397 return err;
398 }
399 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000400#endif
ajm@google.com808e0e02011-08-03 21:08:51 +0000401
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000402 was_stream_delay_set_ = false;
niklase@google.com470e71d2011-07-07 08:21:25 +0000403 return kNoError;
404}
405
406int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000407 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000408 int err = kNoError;
409
410 if (frame == NULL) {
411 return kNullPointerError;
412 }
413
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000414 if (frame->sample_rate_hz_ != sample_rate_hz_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000415 return kBadSampleRateError;
416 }
417
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000418 if (frame->num_channels_ != num_reverse_channels_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000419 return kBadNumberChannelsError;
420 }
421
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000422 if (frame->samples_per_channel_ != samples_per_channel_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000423 return kBadDataLengthError;
424 }
425
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000426#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000427 if (debug_file_->Open()) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000428 event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
429 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000430 const size_t data_size = sizeof(int16_t) *
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000431 frame->samples_per_channel_ *
432 frame->num_channels_;
433 msg->set_data(frame->data_, data_size);
ajm@google.com808e0e02011-08-03 21:08:51 +0000434 err = WriteMessageToDebugFile();
435 if (err != kNoError) {
436 return err;
niklase@google.com470e71d2011-07-07 08:21:25 +0000437 }
438 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000439#endif
niklase@google.com470e71d2011-07-07 08:21:25 +0000440
441 render_audio_->DeinterleaveFrom(frame);
442
443 // TODO(ajm): turn the splitting filter into a component?
444 if (sample_rate_hz_ == kSampleRate32kHz) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000445 for (int i = 0; i < num_reverse_channels_; i++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000446 // Split into low and high band.
447 SplittingFilterAnalysis(render_audio_->data(i),
448 render_audio_->low_pass_split_data(i),
449 render_audio_->high_pass_split_data(i),
450 render_audio_->analysis_filter_state1(i),
451 render_audio_->analysis_filter_state2(i));
452 }
453 }
454
455 // TODO(ajm): warnings possible from components?
456 err = echo_cancellation_->ProcessRenderAudio(render_audio_);
457 if (err != kNoError) {
458 return err;
459 }
460
461 err = echo_control_mobile_->ProcessRenderAudio(render_audio_);
462 if (err != kNoError) {
463 return err;
464 }
465
466 err = gain_control_->ProcessRenderAudio(render_audio_);
467 if (err != kNoError) {
468 return err;
469 }
470
niklase@google.com470e71d2011-07-07 08:21:25 +0000471 return err; // TODO(ajm): this is for returning warnings; necessary?
472}
473
474int AudioProcessingImpl::set_stream_delay_ms(int delay) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000475 Error retval = kNoError;
niklase@google.com470e71d2011-07-07 08:21:25 +0000476 was_stream_delay_set_ = true;
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000477 delay += delay_offset_ms_;
478
niklase@google.com470e71d2011-07-07 08:21:25 +0000479 if (delay < 0) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000480 delay = 0;
481 retval = kBadStreamParameterWarning;
niklase@google.com470e71d2011-07-07 08:21:25 +0000482 }
483
484 // TODO(ajm): the max is rather arbitrarily chosen; investigate.
485 if (delay > 500) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000486 delay = 500;
487 retval = kBadStreamParameterWarning;
niklase@google.com470e71d2011-07-07 08:21:25 +0000488 }
489
490 stream_delay_ms_ = delay;
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000491 return retval;
niklase@google.com470e71d2011-07-07 08:21:25 +0000492}
493
494int AudioProcessingImpl::stream_delay_ms() const {
495 return stream_delay_ms_;
496}
497
498bool AudioProcessingImpl::was_stream_delay_set() const {
499 return was_stream_delay_set_;
500}
501
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000502void AudioProcessingImpl::set_delay_offset_ms(int offset) {
503 CriticalSectionScoped crit_scoped(crit_);
504 delay_offset_ms_ = offset;
505}
506
507int AudioProcessingImpl::delay_offset_ms() const {
508 return delay_offset_ms_;
509}
510
niklase@google.com470e71d2011-07-07 08:21:25 +0000511int AudioProcessingImpl::StartDebugRecording(
512 const char filename[AudioProcessing::kMaxFilenameSize]) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000513 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000514 assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize);
515
516 if (filename == NULL) {
517 return kNullPointerError;
518 }
519
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000520#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000521 // Stop any ongoing recording.
522 if (debug_file_->Open()) {
523 if (debug_file_->CloseFile() == -1) {
524 return kFileError;
525 }
526 }
527
528 if (debug_file_->OpenFile(filename, false) == -1) {
529 debug_file_->CloseFile();
530 return kFileError;
531 }
532
ajm@google.com808e0e02011-08-03 21:08:51 +0000533 int err = WriteInitMessage();
534 if (err != kNoError) {
535 return err;
niklase@google.com470e71d2011-07-07 08:21:25 +0000536 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000537 return kNoError;
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000538#else
539 return kUnsupportedFunctionError;
540#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000541}
542
henrikg@webrtc.org863b5362013-12-06 16:05:17 +0000543int AudioProcessingImpl::StartDebugRecording(FILE* handle) {
544 CriticalSectionScoped crit_scoped(crit_);
545
546 if (handle == NULL) {
547 return kNullPointerError;
548 }
549
550#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
551 // Stop any ongoing recording.
552 if (debug_file_->Open()) {
553 if (debug_file_->CloseFile() == -1) {
554 return kFileError;
555 }
556 }
557
558 if (debug_file_->OpenFromFileHandle(handle, true, false) == -1) {
559 return kFileError;
560 }
561
562 int err = WriteInitMessage();
563 if (err != kNoError) {
564 return err;
565 }
566 return kNoError;
567#else
568 return kUnsupportedFunctionError;
569#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
570}
571
niklase@google.com470e71d2011-07-07 08:21:25 +0000572int AudioProcessingImpl::StopDebugRecording() {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000573 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000574
575#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000576 // We just return if recording hasn't started.
577 if (debug_file_->Open()) {
578 if (debug_file_->CloseFile() == -1) {
579 return kFileError;
580 }
581 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000582 return kNoError;
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000583#else
584 return kUnsupportedFunctionError;
585#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000586}
587
588EchoCancellation* AudioProcessingImpl::echo_cancellation() const {
589 return echo_cancellation_;
590}
591
592EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const {
593 return echo_control_mobile_;
594}
595
596GainControl* AudioProcessingImpl::gain_control() const {
597 return gain_control_;
598}
599
600HighPassFilter* AudioProcessingImpl::high_pass_filter() const {
601 return high_pass_filter_;
602}
603
604LevelEstimator* AudioProcessingImpl::level_estimator() const {
605 return level_estimator_;
606}
607
608NoiseSuppression* AudioProcessingImpl::noise_suppression() const {
609 return noise_suppression_;
610}
611
612VoiceDetection* AudioProcessingImpl::voice_detection() const {
613 return voice_detection_;
614}
615
pbos@webrtc.orgb7192b82013-04-10 07:50:54 +0000616int32_t AudioProcessingImpl::ChangeUniqueId(const int32_t id) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000617 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000618 id_ = id;
619
620 return kNoError;
621}
ajm@google.com808e0e02011-08-03 21:08:51 +0000622
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000623bool AudioProcessingImpl::is_data_processed() const {
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000624 int enabled_count = 0;
625 std::list<ProcessingComponent*>::const_iterator it;
626 for (it = component_list_.begin(); it != component_list_.end(); it++) {
627 if ((*it)->is_component_enabled()) {
628 enabled_count++;
629 }
630 }
631
632 // Data is unchanged if no components are enabled, or if only level_estimator_
633 // or voice_detection_ is enabled.
634 if (enabled_count == 0) {
635 return false;
636 } else if (enabled_count == 1) {
637 if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) {
638 return false;
639 }
640 } else if (enabled_count == 2) {
641 if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) {
642 return false;
643 }
644 }
645 return true;
646}
647
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000648bool AudioProcessingImpl::interleave_needed(bool is_data_processed) const {
649 // Check if we've upmixed or downmixed the audio.
650 return (num_output_channels_ != num_input_channels_ || is_data_processed);
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000651}
652
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000653bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const {
654 return (is_data_processed && sample_rate_hz_ == kSampleRate32kHz);
655}
656
657bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const {
658 if (!is_data_processed && !voice_detection_->is_enabled()) {
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000659 // Only level_estimator_ is enabled.
660 return false;
661 } else if (sample_rate_hz_ == kSampleRate32kHz) {
662 // Something besides level_estimator_ is enabled, and we have super-wb.
663 return true;
664 }
665 return false;
666}
667
668#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
ajm@google.com808e0e02011-08-03 21:08:51 +0000669int AudioProcessingImpl::WriteMessageToDebugFile() {
670 int32_t size = event_msg_->ByteSize();
671 if (size <= 0) {
672 return kUnspecifiedError;
673 }
andrew@webrtc.org621df672013-10-22 10:27:23 +0000674#if defined(WEBRTC_ARCH_BIG_ENDIAN)
ajm@google.com808e0e02011-08-03 21:08:51 +0000675 // TODO(ajm): Use little-endian "on the wire". For the moment, we can be
676 // pretty safe in assuming little-endian.
677#endif
678
679 if (!event_msg_->SerializeToString(&event_str_)) {
680 return kUnspecifiedError;
681 }
682
683 // Write message preceded by its size.
684 if (!debug_file_->Write(&size, sizeof(int32_t))) {
685 return kFileError;
686 }
687 if (!debug_file_->Write(event_str_.data(), event_str_.length())) {
688 return kFileError;
689 }
690
691 event_msg_->Clear();
692
693 return 0;
694}
695
696int AudioProcessingImpl::WriteInitMessage() {
697 event_msg_->set_type(audioproc::Event::INIT);
698 audioproc::Init* msg = event_msg_->mutable_init();
699 msg->set_sample_rate(sample_rate_hz_);
700 msg->set_device_sample_rate(echo_cancellation_->device_sample_rate_hz());
701 msg->set_num_input_channels(num_input_channels_);
702 msg->set_num_output_channels(num_output_channels_);
703 msg->set_num_reverse_channels(num_reverse_channels_);
704
705 int err = WriteMessageToDebugFile();
706 if (err != kNoError) {
707 return err;
708 }
709
710 return kNoError;
711}
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000712#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000713} // namespace webrtc