blob: ab5607a3d2a820ed866dff02c9019adb029d4e6d [file] [log] [blame]
niklase@google.com470e71d2011-07-07 08:21:25 +00001/*
andrew@webrtc.org40654032012-01-30 20:51:15 +00002 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
niklase@google.com470e71d2011-07-07 08:21:25 +00003 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include "audio_processing_impl.h"
12
ajm@google.com808e0e02011-08-03 21:08:51 +000013#include <assert.h>
niklase@google.com470e71d2011-07-07 08:21:25 +000014
15#include "audio_buffer.h"
ajm@google.com808e0e02011-08-03 21:08:51 +000016#include "critical_section_wrapper.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000017#include "echo_cancellation_impl.h"
18#include "echo_control_mobile_impl.h"
ajm@google.com808e0e02011-08-03 21:08:51 +000019#include "file_wrapper.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000020#include "high_pass_filter_impl.h"
21#include "gain_control_impl.h"
22#include "level_estimator_impl.h"
ajm@google.com808e0e02011-08-03 21:08:51 +000023#include "module_common_types.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000024#include "noise_suppression_impl.h"
25#include "processing_component.h"
26#include "splitting_filter.h"
27#include "voice_detection_impl.h"
andrew@webrtc.org7bf26462011-12-03 00:03:31 +000028
29#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
30// Files generated at build-time by the protobuf compiler.
leozwang@webrtc.orga3736342012-03-16 21:36:00 +000031#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
andrew@webrtc.org4d5d5c12011-10-19 01:40:33 +000032#include "external/webrtc/src/modules/audio_processing/debug.pb.h"
leozwang@google.comce9bfbb2011-08-03 23:34:31 +000033#else
ajm@google.com808e0e02011-08-03 21:08:51 +000034#include "webrtc/audio_processing/debug.pb.h"
leozwang@google.comce9bfbb2011-08-03 23:34:31 +000035#endif
andrew@webrtc.org7bf26462011-12-03 00:03:31 +000036#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +000037
38namespace webrtc {
niklase@google.com470e71d2011-07-07 08:21:25 +000039AudioProcessing* AudioProcessing::Create(int id) {
niklase@google.com470e71d2011-07-07 08:21:25 +000040
41 AudioProcessingImpl* apm = new AudioProcessingImpl(id);
42 if (apm->Initialize() != kNoError) {
43 delete apm;
44 apm = NULL;
45 }
46
47 return apm;
48}
49
50void AudioProcessing::Destroy(AudioProcessing* apm) {
51 delete static_cast<AudioProcessingImpl*>(apm);
52}
53
54AudioProcessingImpl::AudioProcessingImpl(int id)
55 : id_(id),
56 echo_cancellation_(NULL),
57 echo_control_mobile_(NULL),
58 gain_control_(NULL),
59 high_pass_filter_(NULL),
60 level_estimator_(NULL),
61 noise_suppression_(NULL),
62 voice_detection_(NULL),
niklase@google.com470e71d2011-07-07 08:21:25 +000063 crit_(CriticalSectionWrapper::CreateCriticalSection()),
64 render_audio_(NULL),
65 capture_audio_(NULL),
andrew@webrtc.org7bf26462011-12-03 00:03:31 +000066#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
67 debug_file_(FileWrapper::Create()),
68 event_msg_(new audioproc::Event()),
69#endif
niklase@google.com470e71d2011-07-07 08:21:25 +000070 sample_rate_hz_(kSampleRate16kHz),
71 split_sample_rate_hz_(kSampleRate16kHz),
72 samples_per_channel_(sample_rate_hz_ / 100),
73 stream_delay_ms_(0),
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +000074 delay_offset_ms_(0),
niklase@google.com470e71d2011-07-07 08:21:25 +000075 was_stream_delay_set_(false),
ajm@google.com808e0e02011-08-03 21:08:51 +000076 num_reverse_channels_(1),
77 num_input_channels_(1),
78 num_output_channels_(1) {
niklase@google.com470e71d2011-07-07 08:21:25 +000079
80 echo_cancellation_ = new EchoCancellationImpl(this);
81 component_list_.push_back(echo_cancellation_);
82
83 echo_control_mobile_ = new EchoControlMobileImpl(this);
84 component_list_.push_back(echo_control_mobile_);
85
86 gain_control_ = new GainControlImpl(this);
87 component_list_.push_back(gain_control_);
88
89 high_pass_filter_ = new HighPassFilterImpl(this);
90 component_list_.push_back(high_pass_filter_);
91
92 level_estimator_ = new LevelEstimatorImpl(this);
93 component_list_.push_back(level_estimator_);
94
95 noise_suppression_ = new NoiseSuppressionImpl(this);
96 component_list_.push_back(noise_suppression_);
97
98 voice_detection_ = new VoiceDetectionImpl(this);
99 component_list_.push_back(voice_detection_);
100}
101
102AudioProcessingImpl::~AudioProcessingImpl() {
andrew@webrtc.org16cfbe22012-08-29 16:58:25 +0000103 crit_->Enter();
niklase@google.com470e71d2011-07-07 08:21:25 +0000104 while (!component_list_.empty()) {
105 ProcessingComponent* component = component_list_.front();
106 component->Destroy();
107 delete component;
108 component_list_.pop_front();
109 }
110
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000111#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000112 if (debug_file_->Open()) {
113 debug_file_->CloseFile();
114 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000115#endif
ajm@google.com808e0e02011-08-03 21:08:51 +0000116
ajm@google.com808e0e02011-08-03 21:08:51 +0000117 if (render_audio_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000118 delete render_audio_;
119 render_audio_ = NULL;
120 }
121
ajm@google.com808e0e02011-08-03 21:08:51 +0000122 if (capture_audio_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000123 delete capture_audio_;
124 capture_audio_ = NULL;
125 }
andrew@webrtc.org16cfbe22012-08-29 16:58:25 +0000126
127 crit_->Leave();
128 delete crit_;
129 crit_ = NULL;
niklase@google.com470e71d2011-07-07 08:21:25 +0000130}
131
132CriticalSectionWrapper* AudioProcessingImpl::crit() const {
133 return crit_;
134}
135
136int AudioProcessingImpl::split_sample_rate_hz() const {
137 return split_sample_rate_hz_;
138}
139
140int AudioProcessingImpl::Initialize() {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000141 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000142 return InitializeLocked();
143}
144
145int AudioProcessingImpl::InitializeLocked() {
146 if (render_audio_ != NULL) {
147 delete render_audio_;
148 render_audio_ = NULL;
149 }
150
151 if (capture_audio_ != NULL) {
152 delete capture_audio_;
153 capture_audio_ = NULL;
154 }
155
ajm@google.com808e0e02011-08-03 21:08:51 +0000156 render_audio_ = new AudioBuffer(num_reverse_channels_,
niklase@google.com470e71d2011-07-07 08:21:25 +0000157 samples_per_channel_);
ajm@google.com808e0e02011-08-03 21:08:51 +0000158 capture_audio_ = new AudioBuffer(num_input_channels_,
niklase@google.com470e71d2011-07-07 08:21:25 +0000159 samples_per_channel_);
160
161 was_stream_delay_set_ = false;
162
163 // Initialize all components.
164 std::list<ProcessingComponent*>::iterator it;
165 for (it = component_list_.begin(); it != component_list_.end(); it++) {
166 int err = (*it)->Initialize();
167 if (err != kNoError) {
168 return err;
169 }
170 }
171
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000172#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
ajm@google.com808e0e02011-08-03 21:08:51 +0000173 if (debug_file_->Open()) {
174 int err = WriteInitMessage();
175 if (err != kNoError) {
176 return err;
177 }
178 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000179#endif
ajm@google.com808e0e02011-08-03 21:08:51 +0000180
niklase@google.com470e71d2011-07-07 08:21:25 +0000181 return kNoError;
182}
183
184int AudioProcessingImpl::set_sample_rate_hz(int rate) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000185 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000186 if (rate != kSampleRate8kHz &&
187 rate != kSampleRate16kHz &&
188 rate != kSampleRate32kHz) {
189 return kBadParameterError;
190 }
191
192 sample_rate_hz_ = rate;
193 samples_per_channel_ = rate / 100;
194
195 if (sample_rate_hz_ == kSampleRate32kHz) {
196 split_sample_rate_hz_ = kSampleRate16kHz;
197 } else {
198 split_sample_rate_hz_ = sample_rate_hz_;
199 }
200
201 return InitializeLocked();
202}
203
204int AudioProcessingImpl::sample_rate_hz() const {
205 return sample_rate_hz_;
206}
207
208int AudioProcessingImpl::set_num_reverse_channels(int channels) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000209 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000210 // Only stereo supported currently.
211 if (channels > 2 || channels < 1) {
212 return kBadParameterError;
213 }
214
ajm@google.com808e0e02011-08-03 21:08:51 +0000215 num_reverse_channels_ = channels;
niklase@google.com470e71d2011-07-07 08:21:25 +0000216
217 return InitializeLocked();
218}
219
220int AudioProcessingImpl::num_reverse_channels() const {
ajm@google.com808e0e02011-08-03 21:08:51 +0000221 return num_reverse_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000222}
223
224int AudioProcessingImpl::set_num_channels(
225 int input_channels,
226 int output_channels) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000227 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000228 if (output_channels > input_channels) {
229 return kBadParameterError;
230 }
231
232 // Only stereo supported currently.
233 if (input_channels > 2 || input_channels < 1) {
234 return kBadParameterError;
235 }
236
237 if (output_channels > 2 || output_channels < 1) {
238 return kBadParameterError;
239 }
240
ajm@google.com808e0e02011-08-03 21:08:51 +0000241 num_input_channels_ = input_channels;
242 num_output_channels_ = output_channels;
niklase@google.com470e71d2011-07-07 08:21:25 +0000243
244 return InitializeLocked();
245}
246
247int AudioProcessingImpl::num_input_channels() const {
ajm@google.com808e0e02011-08-03 21:08:51 +0000248 return num_input_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000249}
250
251int AudioProcessingImpl::num_output_channels() const {
ajm@google.com808e0e02011-08-03 21:08:51 +0000252 return num_output_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000253}
254
255int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000256 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000257 int err = kNoError;
258
259 if (frame == NULL) {
260 return kNullPointerError;
261 }
262
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000263 if (frame->sample_rate_hz_ != sample_rate_hz_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000264 return kBadSampleRateError;
265 }
266
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000267 if (frame->num_channels_ != num_input_channels_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000268 return kBadNumberChannelsError;
269 }
270
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000271 if (frame->samples_per_channel_ != samples_per_channel_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000272 return kBadDataLengthError;
273 }
274
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000275#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000276 if (debug_file_->Open()) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000277 event_msg_->set_type(audioproc::Event::STREAM);
278 audioproc::Stream* msg = event_msg_->mutable_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000279 const size_t data_size = sizeof(int16_t) *
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000280 frame->samples_per_channel_ *
281 frame->num_channels_;
282 msg->set_input_data(frame->data_, data_size);
ajm@google.com808e0e02011-08-03 21:08:51 +0000283 msg->set_delay(stream_delay_ms_);
284 msg->set_drift(echo_cancellation_->stream_drift_samples());
285 msg->set_level(gain_control_->stream_analog_level());
niklase@google.com470e71d2011-07-07 08:21:25 +0000286 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000287#endif
niklase@google.com470e71d2011-07-07 08:21:25 +0000288
289 capture_audio_->DeinterleaveFrom(frame);
290
291 // TODO(ajm): experiment with mixing and AEC placement.
ajm@google.com808e0e02011-08-03 21:08:51 +0000292 if (num_output_channels_ < num_input_channels_) {
293 capture_audio_->Mix(num_output_channels_);
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000294 frame->num_channels_ = num_output_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000295 }
296
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000297 bool data_processed = is_data_processed();
298 if (analysis_needed(data_processed)) {
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000299 for (int i = 0; i < num_output_channels_; i++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000300 // Split into a low and high band.
301 SplittingFilterAnalysis(capture_audio_->data(i),
302 capture_audio_->low_pass_split_data(i),
303 capture_audio_->high_pass_split_data(i),
304 capture_audio_->analysis_filter_state1(i),
305 capture_audio_->analysis_filter_state2(i));
306 }
307 }
308
309 err = high_pass_filter_->ProcessCaptureAudio(capture_audio_);
310 if (err != kNoError) {
311 return err;
312 }
313
314 err = gain_control_->AnalyzeCaptureAudio(capture_audio_);
315 if (err != kNoError) {
316 return err;
317 }
318
319 err = echo_cancellation_->ProcessCaptureAudio(capture_audio_);
320 if (err != kNoError) {
321 return err;
322 }
323
324 if (echo_control_mobile_->is_enabled() &&
325 noise_suppression_->is_enabled()) {
326 capture_audio_->CopyLowPassToReference();
327 }
328
329 err = noise_suppression_->ProcessCaptureAudio(capture_audio_);
330 if (err != kNoError) {
331 return err;
332 }
333
334 err = echo_control_mobile_->ProcessCaptureAudio(capture_audio_);
335 if (err != kNoError) {
336 return err;
337 }
338
339 err = voice_detection_->ProcessCaptureAudio(capture_audio_);
340 if (err != kNoError) {
341 return err;
342 }
343
344 err = gain_control_->ProcessCaptureAudio(capture_audio_);
345 if (err != kNoError) {
346 return err;
347 }
348
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000349 if (synthesis_needed(data_processed)) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000350 for (int i = 0; i < num_output_channels_; i++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000351 // Recombine low and high bands.
352 SplittingFilterSynthesis(capture_audio_->low_pass_split_data(i),
353 capture_audio_->high_pass_split_data(i),
354 capture_audio_->data(i),
355 capture_audio_->synthesis_filter_state1(i),
356 capture_audio_->synthesis_filter_state2(i));
357 }
358 }
359
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000360 // The level estimator operates on the recombined data.
361 err = level_estimator_->ProcessStream(capture_audio_);
362 if (err != kNoError) {
363 return err;
364 }
365
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000366 capture_audio_->InterleaveTo(frame, interleave_needed(data_processed));
niklase@google.com470e71d2011-07-07 08:21:25 +0000367
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000368#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
ajm@google.com808e0e02011-08-03 21:08:51 +0000369 if (debug_file_->Open()) {
370 audioproc::Stream* msg = event_msg_->mutable_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000371 const size_t data_size = sizeof(int16_t) *
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000372 frame->samples_per_channel_ *
373 frame->num_channels_;
374 msg->set_output_data(frame->data_, data_size);
ajm@google.com808e0e02011-08-03 21:08:51 +0000375 err = WriteMessageToDebugFile();
376 if (err != kNoError) {
377 return err;
378 }
379 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000380#endif
ajm@google.com808e0e02011-08-03 21:08:51 +0000381
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000382 was_stream_delay_set_ = false;
niklase@google.com470e71d2011-07-07 08:21:25 +0000383 return kNoError;
384}
385
386int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000387 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000388 int err = kNoError;
389
390 if (frame == NULL) {
391 return kNullPointerError;
392 }
393
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000394 if (frame->sample_rate_hz_ != sample_rate_hz_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000395 return kBadSampleRateError;
396 }
397
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000398 if (frame->num_channels_ != num_reverse_channels_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000399 return kBadNumberChannelsError;
400 }
401
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000402 if (frame->samples_per_channel_ != samples_per_channel_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000403 return kBadDataLengthError;
404 }
405
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000406#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000407 if (debug_file_->Open()) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000408 event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
409 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000410 const size_t data_size = sizeof(int16_t) *
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000411 frame->samples_per_channel_ *
412 frame->num_channels_;
413 msg->set_data(frame->data_, data_size);
ajm@google.com808e0e02011-08-03 21:08:51 +0000414 err = WriteMessageToDebugFile();
415 if (err != kNoError) {
416 return err;
niklase@google.com470e71d2011-07-07 08:21:25 +0000417 }
418 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000419#endif
niklase@google.com470e71d2011-07-07 08:21:25 +0000420
421 render_audio_->DeinterleaveFrom(frame);
422
423 // TODO(ajm): turn the splitting filter into a component?
424 if (sample_rate_hz_ == kSampleRate32kHz) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000425 for (int i = 0; i < num_reverse_channels_; i++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000426 // Split into low and high band.
427 SplittingFilterAnalysis(render_audio_->data(i),
428 render_audio_->low_pass_split_data(i),
429 render_audio_->high_pass_split_data(i),
430 render_audio_->analysis_filter_state1(i),
431 render_audio_->analysis_filter_state2(i));
432 }
433 }
434
435 // TODO(ajm): warnings possible from components?
436 err = echo_cancellation_->ProcessRenderAudio(render_audio_);
437 if (err != kNoError) {
438 return err;
439 }
440
441 err = echo_control_mobile_->ProcessRenderAudio(render_audio_);
442 if (err != kNoError) {
443 return err;
444 }
445
446 err = gain_control_->ProcessRenderAudio(render_audio_);
447 if (err != kNoError) {
448 return err;
449 }
450
niklase@google.com470e71d2011-07-07 08:21:25 +0000451 return err; // TODO(ajm): this is for returning warnings; necessary?
452}
453
454int AudioProcessingImpl::set_stream_delay_ms(int delay) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000455 Error retval = kNoError;
niklase@google.com470e71d2011-07-07 08:21:25 +0000456 was_stream_delay_set_ = true;
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000457 delay += delay_offset_ms_;
458
niklase@google.com470e71d2011-07-07 08:21:25 +0000459 if (delay < 0) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000460 delay = 0;
461 retval = kBadStreamParameterWarning;
niklase@google.com470e71d2011-07-07 08:21:25 +0000462 }
463
464 // TODO(ajm): the max is rather arbitrarily chosen; investigate.
465 if (delay > 500) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000466 delay = 500;
467 retval = kBadStreamParameterWarning;
niklase@google.com470e71d2011-07-07 08:21:25 +0000468 }
469
470 stream_delay_ms_ = delay;
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000471 return retval;
niklase@google.com470e71d2011-07-07 08:21:25 +0000472}
473
474int AudioProcessingImpl::stream_delay_ms() const {
475 return stream_delay_ms_;
476}
477
478bool AudioProcessingImpl::was_stream_delay_set() const {
479 return was_stream_delay_set_;
480}
481
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000482void AudioProcessingImpl::set_delay_offset_ms(int offset) {
483 CriticalSectionScoped crit_scoped(crit_);
484 delay_offset_ms_ = offset;
485}
486
487int AudioProcessingImpl::delay_offset_ms() const {
488 return delay_offset_ms_;
489}
490
niklase@google.com470e71d2011-07-07 08:21:25 +0000491int AudioProcessingImpl::StartDebugRecording(
492 const char filename[AudioProcessing::kMaxFilenameSize]) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000493 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000494 assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize);
495
496 if (filename == NULL) {
497 return kNullPointerError;
498 }
499
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000500#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000501 // Stop any ongoing recording.
502 if (debug_file_->Open()) {
503 if (debug_file_->CloseFile() == -1) {
504 return kFileError;
505 }
506 }
507
508 if (debug_file_->OpenFile(filename, false) == -1) {
509 debug_file_->CloseFile();
510 return kFileError;
511 }
512
ajm@google.com808e0e02011-08-03 21:08:51 +0000513 int err = WriteInitMessage();
514 if (err != kNoError) {
515 return err;
niklase@google.com470e71d2011-07-07 08:21:25 +0000516 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000517 return kNoError;
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000518#else
519 return kUnsupportedFunctionError;
520#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000521}
522
523int AudioProcessingImpl::StopDebugRecording() {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000524 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000525
526#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000527 // We just return if recording hasn't started.
528 if (debug_file_->Open()) {
529 if (debug_file_->CloseFile() == -1) {
530 return kFileError;
531 }
532 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000533 return kNoError;
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000534#else
535 return kUnsupportedFunctionError;
536#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000537}
538
539EchoCancellation* AudioProcessingImpl::echo_cancellation() const {
540 return echo_cancellation_;
541}
542
543EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const {
544 return echo_control_mobile_;
545}
546
547GainControl* AudioProcessingImpl::gain_control() const {
548 return gain_control_;
549}
550
551HighPassFilter* AudioProcessingImpl::high_pass_filter() const {
552 return high_pass_filter_;
553}
554
555LevelEstimator* AudioProcessingImpl::level_estimator() const {
556 return level_estimator_;
557}
558
559NoiseSuppression* AudioProcessingImpl::noise_suppression() const {
560 return noise_suppression_;
561}
562
563VoiceDetection* AudioProcessingImpl::voice_detection() const {
564 return voice_detection_;
565}
566
niklase@google.com470e71d2011-07-07 08:21:25 +0000567WebRtc_Word32 AudioProcessingImpl::ChangeUniqueId(const WebRtc_Word32 id) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000568 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000569 id_ = id;
570
571 return kNoError;
572}
ajm@google.com808e0e02011-08-03 21:08:51 +0000573
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000574bool AudioProcessingImpl::is_data_processed() const {
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000575 int enabled_count = 0;
576 std::list<ProcessingComponent*>::const_iterator it;
577 for (it = component_list_.begin(); it != component_list_.end(); it++) {
578 if ((*it)->is_component_enabled()) {
579 enabled_count++;
580 }
581 }
582
583 // Data is unchanged if no components are enabled, or if only level_estimator_
584 // or voice_detection_ is enabled.
585 if (enabled_count == 0) {
586 return false;
587 } else if (enabled_count == 1) {
588 if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) {
589 return false;
590 }
591 } else if (enabled_count == 2) {
592 if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) {
593 return false;
594 }
595 }
596 return true;
597}
598
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000599bool AudioProcessingImpl::interleave_needed(bool is_data_processed) const {
600 // Check if we've upmixed or downmixed the audio.
601 return (num_output_channels_ != num_input_channels_ || is_data_processed);
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000602}
603
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000604bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const {
605 return (is_data_processed && sample_rate_hz_ == kSampleRate32kHz);
606}
607
608bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const {
609 if (!is_data_processed && !voice_detection_->is_enabled()) {
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000610 // Only level_estimator_ is enabled.
611 return false;
612 } else if (sample_rate_hz_ == kSampleRate32kHz) {
613 // Something besides level_estimator_ is enabled, and we have super-wb.
614 return true;
615 }
616 return false;
617}
618
619#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
ajm@google.com808e0e02011-08-03 21:08:51 +0000620int AudioProcessingImpl::WriteMessageToDebugFile() {
621 int32_t size = event_msg_->ByteSize();
622 if (size <= 0) {
623 return kUnspecifiedError;
624 }
625#if defined(WEBRTC_BIG_ENDIAN)
626 // TODO(ajm): Use little-endian "on the wire". For the moment, we can be
627 // pretty safe in assuming little-endian.
628#endif
629
630 if (!event_msg_->SerializeToString(&event_str_)) {
631 return kUnspecifiedError;
632 }
633
634 // Write message preceded by its size.
635 if (!debug_file_->Write(&size, sizeof(int32_t))) {
636 return kFileError;
637 }
638 if (!debug_file_->Write(event_str_.data(), event_str_.length())) {
639 return kFileError;
640 }
641
642 event_msg_->Clear();
643
644 return 0;
645}
646
647int AudioProcessingImpl::WriteInitMessage() {
648 event_msg_->set_type(audioproc::Event::INIT);
649 audioproc::Init* msg = event_msg_->mutable_init();
650 msg->set_sample_rate(sample_rate_hz_);
651 msg->set_device_sample_rate(echo_cancellation_->device_sample_rate_hz());
652 msg->set_num_input_channels(num_input_channels_);
653 msg->set_num_output_channels(num_output_channels_);
654 msg->set_num_reverse_channels(num_reverse_channels_);
655
656 int err = WriteMessageToDebugFile();
657 if (err != kNoError) {
658 return err;
659 }
660
661 return kNoError;
662}
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000663#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000664} // namespace webrtc