blob: 143da44cdfcff5e9d04c3ccc3bb729f80307ecd5 [file] [log] [blame]
niklase@google.com470e71d2011-07-07 08:21:25 +00001/*
andrew@webrtc.org40654032012-01-30 20:51:15 +00002 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
niklase@google.com470e71d2011-07-07 08:21:25 +00003 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000011#include "webrtc/modules/audio_processing/audio_processing_impl.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000012
ajm@google.com808e0e02011-08-03 21:08:51 +000013#include <assert.h>
niklase@google.com470e71d2011-07-07 08:21:25 +000014
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000015#include "webrtc/modules/audio_processing/audio_buffer.h"
andrew@webrtc.org61e596f2013-07-25 18:28:29 +000016#include "webrtc/modules/audio_processing/echo_cancellation_impl_wrapper.h"
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000017#include "webrtc/modules/audio_processing/echo_control_mobile_impl.h"
18#include "webrtc/modules/audio_processing/gain_control_impl.h"
19#include "webrtc/modules/audio_processing/high_pass_filter_impl.h"
20#include "webrtc/modules/audio_processing/level_estimator_impl.h"
21#include "webrtc/modules/audio_processing/noise_suppression_impl.h"
22#include "webrtc/modules/audio_processing/processing_component.h"
23#include "webrtc/modules/audio_processing/splitting_filter.h"
24#include "webrtc/modules/audio_processing/voice_detection_impl.h"
25#include "webrtc/modules/interface/module_common_types.h"
26#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
27#include "webrtc/system_wrappers/interface/file_wrapper.h"
28#include "webrtc/system_wrappers/interface/logging.h"
andrew@webrtc.org7bf26462011-12-03 00:03:31 +000029
30#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
31// Files generated at build-time by the protobuf compiler.
leozwang@webrtc.orga3736342012-03-16 21:36:00 +000032#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
leozwang@webrtc.org534e4952012-10-22 21:21:52 +000033#include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h"
leozwang@google.comce9bfbb2011-08-03 23:34:31 +000034#else
ajm@google.com808e0e02011-08-03 21:08:51 +000035#include "webrtc/audio_processing/debug.pb.h"
leozwang@google.comce9bfbb2011-08-03 23:34:31 +000036#endif
andrew@webrtc.org7bf26462011-12-03 00:03:31 +000037#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +000038
39namespace webrtc {
niklase@google.com470e71d2011-07-07 08:21:25 +000040AudioProcessing* AudioProcessing::Create(int id) {
niklase@google.com470e71d2011-07-07 08:21:25 +000041 AudioProcessingImpl* apm = new AudioProcessingImpl(id);
42 if (apm->Initialize() != kNoError) {
43 delete apm;
44 apm = NULL;
45 }
46
47 return apm;
48}
49
50void AudioProcessing::Destroy(AudioProcessing* apm) {
51 delete static_cast<AudioProcessingImpl*>(apm);
52}
53
pbos@webrtc.org91620802013-08-02 11:44:11 +000054int32_t AudioProcessing::TimeUntilNextProcess() { return -1; }
55int32_t AudioProcessing::Process() { return -1; }
56
niklase@google.com470e71d2011-07-07 08:21:25 +000057AudioProcessingImpl::AudioProcessingImpl(int id)
58 : id_(id),
59 echo_cancellation_(NULL),
60 echo_control_mobile_(NULL),
61 gain_control_(NULL),
62 high_pass_filter_(NULL),
63 level_estimator_(NULL),
64 noise_suppression_(NULL),
65 voice_detection_(NULL),
niklase@google.com470e71d2011-07-07 08:21:25 +000066 crit_(CriticalSectionWrapper::CreateCriticalSection()),
67 render_audio_(NULL),
68 capture_audio_(NULL),
andrew@webrtc.org7bf26462011-12-03 00:03:31 +000069#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
70 debug_file_(FileWrapper::Create()),
71 event_msg_(new audioproc::Event()),
72#endif
niklase@google.com470e71d2011-07-07 08:21:25 +000073 sample_rate_hz_(kSampleRate16kHz),
74 split_sample_rate_hz_(kSampleRate16kHz),
75 samples_per_channel_(sample_rate_hz_ / 100),
76 stream_delay_ms_(0),
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +000077 delay_offset_ms_(0),
niklase@google.com470e71d2011-07-07 08:21:25 +000078 was_stream_delay_set_(false),
ajm@google.com808e0e02011-08-03 21:08:51 +000079 num_reverse_channels_(1),
80 num_input_channels_(1),
81 num_output_channels_(1) {
andrew@webrtc.org61e596f2013-07-25 18:28:29 +000082 echo_cancellation_ = EchoCancellationImplWrapper::Create(this);
niklase@google.com470e71d2011-07-07 08:21:25 +000083 component_list_.push_back(echo_cancellation_);
84
85 echo_control_mobile_ = new EchoControlMobileImpl(this);
86 component_list_.push_back(echo_control_mobile_);
87
88 gain_control_ = new GainControlImpl(this);
89 component_list_.push_back(gain_control_);
90
91 high_pass_filter_ = new HighPassFilterImpl(this);
92 component_list_.push_back(high_pass_filter_);
93
94 level_estimator_ = new LevelEstimatorImpl(this);
95 component_list_.push_back(level_estimator_);
96
97 noise_suppression_ = new NoiseSuppressionImpl(this);
98 component_list_.push_back(noise_suppression_);
99
100 voice_detection_ = new VoiceDetectionImpl(this);
101 component_list_.push_back(voice_detection_);
102}
103
104AudioProcessingImpl::~AudioProcessingImpl() {
andrew@webrtc.org81865342012-10-27 00:28:27 +0000105 {
106 CriticalSectionScoped crit_scoped(crit_);
107 while (!component_list_.empty()) {
108 ProcessingComponent* component = component_list_.front();
109 component->Destroy();
110 delete component;
111 component_list_.pop_front();
112 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000113
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000114#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
andrew@webrtc.org81865342012-10-27 00:28:27 +0000115 if (debug_file_->Open()) {
116 debug_file_->CloseFile();
117 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000118#endif
ajm@google.com808e0e02011-08-03 21:08:51 +0000119
andrew@webrtc.org81865342012-10-27 00:28:27 +0000120 if (render_audio_) {
121 delete render_audio_;
122 render_audio_ = NULL;
123 }
124
125 if (capture_audio_) {
126 delete capture_audio_;
127 capture_audio_ = NULL;
128 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000129 }
130
andrew@webrtc.org16cfbe22012-08-29 16:58:25 +0000131 delete crit_;
132 crit_ = NULL;
niklase@google.com470e71d2011-07-07 08:21:25 +0000133}
134
135CriticalSectionWrapper* AudioProcessingImpl::crit() const {
136 return crit_;
137}
138
139int AudioProcessingImpl::split_sample_rate_hz() const {
140 return split_sample_rate_hz_;
141}
142
143int AudioProcessingImpl::Initialize() {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000144 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000145 return InitializeLocked();
146}
147
148int AudioProcessingImpl::InitializeLocked() {
149 if (render_audio_ != NULL) {
150 delete render_audio_;
151 render_audio_ = NULL;
152 }
153
154 if (capture_audio_ != NULL) {
155 delete capture_audio_;
156 capture_audio_ = NULL;
157 }
158
ajm@google.com808e0e02011-08-03 21:08:51 +0000159 render_audio_ = new AudioBuffer(num_reverse_channels_,
niklase@google.com470e71d2011-07-07 08:21:25 +0000160 samples_per_channel_);
ajm@google.com808e0e02011-08-03 21:08:51 +0000161 capture_audio_ = new AudioBuffer(num_input_channels_,
niklase@google.com470e71d2011-07-07 08:21:25 +0000162 samples_per_channel_);
163
164 was_stream_delay_set_ = false;
165
166 // Initialize all components.
167 std::list<ProcessingComponent*>::iterator it;
andrew@webrtc.org81865342012-10-27 00:28:27 +0000168 for (it = component_list_.begin(); it != component_list_.end(); ++it) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000169 int err = (*it)->Initialize();
170 if (err != kNoError) {
171 return err;
172 }
173 }
174
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000175#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
ajm@google.com808e0e02011-08-03 21:08:51 +0000176 if (debug_file_->Open()) {
177 int err = WriteInitMessage();
178 if (err != kNoError) {
179 return err;
180 }
181 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000182#endif
ajm@google.com808e0e02011-08-03 21:08:51 +0000183
niklase@google.com470e71d2011-07-07 08:21:25 +0000184 return kNoError;
185}
186
andrew@webrtc.org61e596f2013-07-25 18:28:29 +0000187void AudioProcessingImpl::SetExtraOptions(const Config& config) {
188 std::list<ProcessingComponent*>::iterator it;
189 for (it = component_list_.begin(); it != component_list_.end(); ++it)
190 (*it)->SetExtraOptions(config);
191}
192
niklase@google.com470e71d2011-07-07 08:21:25 +0000193int AudioProcessingImpl::set_sample_rate_hz(int rate) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000194 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org81865342012-10-27 00:28:27 +0000195 if (rate == sample_rate_hz_) {
196 return kNoError;
197 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000198 if (rate != kSampleRate8kHz &&
199 rate != kSampleRate16kHz &&
200 rate != kSampleRate32kHz) {
201 return kBadParameterError;
202 }
andrew@webrtc.org78693fe2013-03-01 16:36:19 +0000203 if (echo_control_mobile_->is_enabled() && rate > kSampleRate16kHz) {
204 LOG(LS_ERROR) << "AECM only supports 16 kHz or lower sample rates";
205 return kUnsupportedComponentError;
206 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000207
208 sample_rate_hz_ = rate;
209 samples_per_channel_ = rate / 100;
210
211 if (sample_rate_hz_ == kSampleRate32kHz) {
212 split_sample_rate_hz_ = kSampleRate16kHz;
213 } else {
214 split_sample_rate_hz_ = sample_rate_hz_;
215 }
216
217 return InitializeLocked();
218}
219
220int AudioProcessingImpl::sample_rate_hz() const {
henrika@webrtc.org19da7192013-04-05 14:34:57 +0000221 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000222 return sample_rate_hz_;
223}
224
225int AudioProcessingImpl::set_num_reverse_channels(int channels) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000226 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org81865342012-10-27 00:28:27 +0000227 if (channels == num_reverse_channels_) {
228 return kNoError;
229 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000230 // Only stereo supported currently.
231 if (channels > 2 || channels < 1) {
232 return kBadParameterError;
233 }
234
ajm@google.com808e0e02011-08-03 21:08:51 +0000235 num_reverse_channels_ = channels;
niklase@google.com470e71d2011-07-07 08:21:25 +0000236
237 return InitializeLocked();
238}
239
240int AudioProcessingImpl::num_reverse_channels() const {
ajm@google.com808e0e02011-08-03 21:08:51 +0000241 return num_reverse_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000242}
243
244int AudioProcessingImpl::set_num_channels(
245 int input_channels,
246 int output_channels) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000247 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org81865342012-10-27 00:28:27 +0000248 if (input_channels == num_input_channels_ &&
249 output_channels == num_output_channels_) {
250 return kNoError;
251 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000252 if (output_channels > input_channels) {
253 return kBadParameterError;
254 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000255 // Only stereo supported currently.
andrew@webrtc.org81865342012-10-27 00:28:27 +0000256 if (input_channels > 2 || input_channels < 1 ||
257 output_channels > 2 || output_channels < 1) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000258 return kBadParameterError;
259 }
260
ajm@google.com808e0e02011-08-03 21:08:51 +0000261 num_input_channels_ = input_channels;
262 num_output_channels_ = output_channels;
niklase@google.com470e71d2011-07-07 08:21:25 +0000263
264 return InitializeLocked();
265}
266
267int AudioProcessingImpl::num_input_channels() const {
ajm@google.com808e0e02011-08-03 21:08:51 +0000268 return num_input_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000269}
270
271int AudioProcessingImpl::num_output_channels() const {
ajm@google.com808e0e02011-08-03 21:08:51 +0000272 return num_output_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000273}
274
275int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000276 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000277 int err = kNoError;
278
279 if (frame == NULL) {
280 return kNullPointerError;
281 }
282
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000283 if (frame->sample_rate_hz_ != sample_rate_hz_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000284 return kBadSampleRateError;
285 }
286
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000287 if (frame->num_channels_ != num_input_channels_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000288 return kBadNumberChannelsError;
289 }
290
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000291 if (frame->samples_per_channel_ != samples_per_channel_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000292 return kBadDataLengthError;
293 }
294
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000295#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000296 if (debug_file_->Open()) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000297 event_msg_->set_type(audioproc::Event::STREAM);
298 audioproc::Stream* msg = event_msg_->mutable_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000299 const size_t data_size = sizeof(int16_t) *
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000300 frame->samples_per_channel_ *
301 frame->num_channels_;
302 msg->set_input_data(frame->data_, data_size);
ajm@google.com808e0e02011-08-03 21:08:51 +0000303 msg->set_delay(stream_delay_ms_);
304 msg->set_drift(echo_cancellation_->stream_drift_samples());
305 msg->set_level(gain_control_->stream_analog_level());
niklase@google.com470e71d2011-07-07 08:21:25 +0000306 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000307#endif
niklase@google.com470e71d2011-07-07 08:21:25 +0000308
309 capture_audio_->DeinterleaveFrom(frame);
310
311 // TODO(ajm): experiment with mixing and AEC placement.
ajm@google.com808e0e02011-08-03 21:08:51 +0000312 if (num_output_channels_ < num_input_channels_) {
313 capture_audio_->Mix(num_output_channels_);
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000314 frame->num_channels_ = num_output_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000315 }
316
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000317 bool data_processed = is_data_processed();
318 if (analysis_needed(data_processed)) {
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000319 for (int i = 0; i < num_output_channels_; i++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000320 // Split into a low and high band.
321 SplittingFilterAnalysis(capture_audio_->data(i),
322 capture_audio_->low_pass_split_data(i),
323 capture_audio_->high_pass_split_data(i),
324 capture_audio_->analysis_filter_state1(i),
325 capture_audio_->analysis_filter_state2(i));
326 }
327 }
328
329 err = high_pass_filter_->ProcessCaptureAudio(capture_audio_);
330 if (err != kNoError) {
331 return err;
332 }
333
334 err = gain_control_->AnalyzeCaptureAudio(capture_audio_);
335 if (err != kNoError) {
336 return err;
337 }
338
339 err = echo_cancellation_->ProcessCaptureAudio(capture_audio_);
340 if (err != kNoError) {
341 return err;
342 }
343
344 if (echo_control_mobile_->is_enabled() &&
345 noise_suppression_->is_enabled()) {
346 capture_audio_->CopyLowPassToReference();
347 }
348
349 err = noise_suppression_->ProcessCaptureAudio(capture_audio_);
350 if (err != kNoError) {
351 return err;
352 }
353
354 err = echo_control_mobile_->ProcessCaptureAudio(capture_audio_);
355 if (err != kNoError) {
356 return err;
357 }
358
359 err = voice_detection_->ProcessCaptureAudio(capture_audio_);
360 if (err != kNoError) {
361 return err;
362 }
363
364 err = gain_control_->ProcessCaptureAudio(capture_audio_);
365 if (err != kNoError) {
366 return err;
367 }
368
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000369 if (synthesis_needed(data_processed)) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000370 for (int i = 0; i < num_output_channels_; i++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000371 // Recombine low and high bands.
372 SplittingFilterSynthesis(capture_audio_->low_pass_split_data(i),
373 capture_audio_->high_pass_split_data(i),
374 capture_audio_->data(i),
375 capture_audio_->synthesis_filter_state1(i),
376 capture_audio_->synthesis_filter_state2(i));
377 }
378 }
379
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000380 // The level estimator operates on the recombined data.
381 err = level_estimator_->ProcessStream(capture_audio_);
382 if (err != kNoError) {
383 return err;
384 }
385
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000386 capture_audio_->InterleaveTo(frame, interleave_needed(data_processed));
niklase@google.com470e71d2011-07-07 08:21:25 +0000387
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000388#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
ajm@google.com808e0e02011-08-03 21:08:51 +0000389 if (debug_file_->Open()) {
390 audioproc::Stream* msg = event_msg_->mutable_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000391 const size_t data_size = sizeof(int16_t) *
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000392 frame->samples_per_channel_ *
393 frame->num_channels_;
394 msg->set_output_data(frame->data_, data_size);
ajm@google.com808e0e02011-08-03 21:08:51 +0000395 err = WriteMessageToDebugFile();
396 if (err != kNoError) {
397 return err;
398 }
399 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000400#endif
ajm@google.com808e0e02011-08-03 21:08:51 +0000401
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000402 was_stream_delay_set_ = false;
niklase@google.com470e71d2011-07-07 08:21:25 +0000403 return kNoError;
404}
405
406int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000407 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000408 int err = kNoError;
409
410 if (frame == NULL) {
411 return kNullPointerError;
412 }
413
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000414 if (frame->sample_rate_hz_ != sample_rate_hz_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000415 return kBadSampleRateError;
416 }
417
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000418 if (frame->num_channels_ != num_reverse_channels_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000419 return kBadNumberChannelsError;
420 }
421
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000422 if (frame->samples_per_channel_ != samples_per_channel_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000423 return kBadDataLengthError;
424 }
425
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000426#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000427 if (debug_file_->Open()) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000428 event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
429 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000430 const size_t data_size = sizeof(int16_t) *
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000431 frame->samples_per_channel_ *
432 frame->num_channels_;
433 msg->set_data(frame->data_, data_size);
ajm@google.com808e0e02011-08-03 21:08:51 +0000434 err = WriteMessageToDebugFile();
435 if (err != kNoError) {
436 return err;
niklase@google.com470e71d2011-07-07 08:21:25 +0000437 }
438 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000439#endif
niklase@google.com470e71d2011-07-07 08:21:25 +0000440
441 render_audio_->DeinterleaveFrom(frame);
442
443 // TODO(ajm): turn the splitting filter into a component?
444 if (sample_rate_hz_ == kSampleRate32kHz) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000445 for (int i = 0; i < num_reverse_channels_; i++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000446 // Split into low and high band.
447 SplittingFilterAnalysis(render_audio_->data(i),
448 render_audio_->low_pass_split_data(i),
449 render_audio_->high_pass_split_data(i),
450 render_audio_->analysis_filter_state1(i),
451 render_audio_->analysis_filter_state2(i));
452 }
453 }
454
455 // TODO(ajm): warnings possible from components?
456 err = echo_cancellation_->ProcessRenderAudio(render_audio_);
457 if (err != kNoError) {
458 return err;
459 }
460
461 err = echo_control_mobile_->ProcessRenderAudio(render_audio_);
462 if (err != kNoError) {
463 return err;
464 }
465
466 err = gain_control_->ProcessRenderAudio(render_audio_);
467 if (err != kNoError) {
468 return err;
469 }
470
niklase@google.com470e71d2011-07-07 08:21:25 +0000471 return err; // TODO(ajm): this is for returning warnings; necessary?
472}
473
474int AudioProcessingImpl::set_stream_delay_ms(int delay) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000475 Error retval = kNoError;
niklase@google.com470e71d2011-07-07 08:21:25 +0000476 was_stream_delay_set_ = true;
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000477 delay += delay_offset_ms_;
478
niklase@google.com470e71d2011-07-07 08:21:25 +0000479 if (delay < 0) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000480 delay = 0;
481 retval = kBadStreamParameterWarning;
niklase@google.com470e71d2011-07-07 08:21:25 +0000482 }
483
484 // TODO(ajm): the max is rather arbitrarily chosen; investigate.
485 if (delay > 500) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000486 delay = 500;
487 retval = kBadStreamParameterWarning;
niklase@google.com470e71d2011-07-07 08:21:25 +0000488 }
489
490 stream_delay_ms_ = delay;
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000491 return retval;
niklase@google.com470e71d2011-07-07 08:21:25 +0000492}
493
494int AudioProcessingImpl::stream_delay_ms() const {
495 return stream_delay_ms_;
496}
497
498bool AudioProcessingImpl::was_stream_delay_set() const {
499 return was_stream_delay_set_;
500}
501
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000502void AudioProcessingImpl::set_delay_offset_ms(int offset) {
503 CriticalSectionScoped crit_scoped(crit_);
504 delay_offset_ms_ = offset;
505}
506
507int AudioProcessingImpl::delay_offset_ms() const {
508 return delay_offset_ms_;
509}
510
niklase@google.com470e71d2011-07-07 08:21:25 +0000511int AudioProcessingImpl::StartDebugRecording(
512 const char filename[AudioProcessing::kMaxFilenameSize]) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000513 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000514 assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize);
515
516 if (filename == NULL) {
517 return kNullPointerError;
518 }
519
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000520#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000521 // Stop any ongoing recording.
522 if (debug_file_->Open()) {
523 if (debug_file_->CloseFile() == -1) {
524 return kFileError;
525 }
526 }
527
528 if (debug_file_->OpenFile(filename, false) == -1) {
529 debug_file_->CloseFile();
530 return kFileError;
531 }
532
ajm@google.com808e0e02011-08-03 21:08:51 +0000533 int err = WriteInitMessage();
534 if (err != kNoError) {
535 return err;
niklase@google.com470e71d2011-07-07 08:21:25 +0000536 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000537 return kNoError;
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000538#else
539 return kUnsupportedFunctionError;
540#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000541}
542
543int AudioProcessingImpl::StopDebugRecording() {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000544 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000545
546#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000547 // We just return if recording hasn't started.
548 if (debug_file_->Open()) {
549 if (debug_file_->CloseFile() == -1) {
550 return kFileError;
551 }
552 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000553 return kNoError;
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000554#else
555 return kUnsupportedFunctionError;
556#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000557}
558
559EchoCancellation* AudioProcessingImpl::echo_cancellation() const {
560 return echo_cancellation_;
561}
562
563EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const {
564 return echo_control_mobile_;
565}
566
567GainControl* AudioProcessingImpl::gain_control() const {
568 return gain_control_;
569}
570
571HighPassFilter* AudioProcessingImpl::high_pass_filter() const {
572 return high_pass_filter_;
573}
574
575LevelEstimator* AudioProcessingImpl::level_estimator() const {
576 return level_estimator_;
577}
578
579NoiseSuppression* AudioProcessingImpl::noise_suppression() const {
580 return noise_suppression_;
581}
582
583VoiceDetection* AudioProcessingImpl::voice_detection() const {
584 return voice_detection_;
585}
586
pbos@webrtc.orgb7192b82013-04-10 07:50:54 +0000587int32_t AudioProcessingImpl::ChangeUniqueId(const int32_t id) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000588 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000589 id_ = id;
590
591 return kNoError;
592}
ajm@google.com808e0e02011-08-03 21:08:51 +0000593
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000594bool AudioProcessingImpl::is_data_processed() const {
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000595 int enabled_count = 0;
596 std::list<ProcessingComponent*>::const_iterator it;
597 for (it = component_list_.begin(); it != component_list_.end(); it++) {
598 if ((*it)->is_component_enabled()) {
599 enabled_count++;
600 }
601 }
602
603 // Data is unchanged if no components are enabled, or if only level_estimator_
604 // or voice_detection_ is enabled.
605 if (enabled_count == 0) {
606 return false;
607 } else if (enabled_count == 1) {
608 if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) {
609 return false;
610 }
611 } else if (enabled_count == 2) {
612 if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) {
613 return false;
614 }
615 }
616 return true;
617}
618
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000619bool AudioProcessingImpl::interleave_needed(bool is_data_processed) const {
620 // Check if we've upmixed or downmixed the audio.
621 return (num_output_channels_ != num_input_channels_ || is_data_processed);
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000622}
623
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000624bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const {
625 return (is_data_processed && sample_rate_hz_ == kSampleRate32kHz);
626}
627
628bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const {
629 if (!is_data_processed && !voice_detection_->is_enabled()) {
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000630 // Only level_estimator_ is enabled.
631 return false;
632 } else if (sample_rate_hz_ == kSampleRate32kHz) {
633 // Something besides level_estimator_ is enabled, and we have super-wb.
634 return true;
635 }
636 return false;
637}
638
639#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
ajm@google.com808e0e02011-08-03 21:08:51 +0000640int AudioProcessingImpl::WriteMessageToDebugFile() {
641 int32_t size = event_msg_->ByteSize();
642 if (size <= 0) {
643 return kUnspecifiedError;
644 }
645#if defined(WEBRTC_BIG_ENDIAN)
646 // TODO(ajm): Use little-endian "on the wire". For the moment, we can be
647 // pretty safe in assuming little-endian.
648#endif
649
650 if (!event_msg_->SerializeToString(&event_str_)) {
651 return kUnspecifiedError;
652 }
653
654 // Write message preceded by its size.
655 if (!debug_file_->Write(&size, sizeof(int32_t))) {
656 return kFileError;
657 }
658 if (!debug_file_->Write(event_str_.data(), event_str_.length())) {
659 return kFileError;
660 }
661
662 event_msg_->Clear();
663
664 return 0;
665}
666
667int AudioProcessingImpl::WriteInitMessage() {
668 event_msg_->set_type(audioproc::Event::INIT);
669 audioproc::Init* msg = event_msg_->mutable_init();
670 msg->set_sample_rate(sample_rate_hz_);
671 msg->set_device_sample_rate(echo_cancellation_->device_sample_rate_hz());
672 msg->set_num_input_channels(num_input_channels_);
673 msg->set_num_output_channels(num_output_channels_);
674 msg->set_num_reverse_channels(num_reverse_channels_);
675
676 int err = WriteMessageToDebugFile();
677 if (err != kNoError) {
678 return err;
679 }
680
681 return kNoError;
682}
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000683#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000684} // namespace webrtc