blob: 922490b7c7bb8730f28b96eb2562e6408e697e75 [file] [log] [blame]
niklase@google.com470e71d2011-07-07 08:21:25 +00001/*
andrew@webrtc.org40654032012-01-30 20:51:15 +00002 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
niklase@google.com470e71d2011-07-07 08:21:25 +00003 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000011#include "webrtc/modules/audio_processing/audio_processing_impl.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000012
ajm@google.com808e0e02011-08-03 21:08:51 +000013#include <assert.h>
niklase@google.com470e71d2011-07-07 08:21:25 +000014
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000015#include "webrtc/modules/audio_processing/audio_buffer.h"
16#include "webrtc/modules/audio_processing/echo_cancellation_impl.h"
17#include "webrtc/modules/audio_processing/echo_control_mobile_impl.h"
18#include "webrtc/modules/audio_processing/gain_control_impl.h"
19#include "webrtc/modules/audio_processing/high_pass_filter_impl.h"
20#include "webrtc/modules/audio_processing/level_estimator_impl.h"
21#include "webrtc/modules/audio_processing/noise_suppression_impl.h"
22#include "webrtc/modules/audio_processing/processing_component.h"
23#include "webrtc/modules/audio_processing/splitting_filter.h"
24#include "webrtc/modules/audio_processing/voice_detection_impl.h"
25#include "webrtc/modules/interface/module_common_types.h"
26#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
27#include "webrtc/system_wrappers/interface/file_wrapper.h"
28#include "webrtc/system_wrappers/interface/logging.h"
andrew@webrtc.org7bf26462011-12-03 00:03:31 +000029
30#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
31// Files generated at build-time by the protobuf compiler.
leozwang@webrtc.orga3736342012-03-16 21:36:00 +000032#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
leozwang@webrtc.org534e4952012-10-22 21:21:52 +000033#include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h"
leozwang@google.comce9bfbb2011-08-03 23:34:31 +000034#else
ajm@google.com808e0e02011-08-03 21:08:51 +000035#include "webrtc/audio_processing/debug.pb.h"
leozwang@google.comce9bfbb2011-08-03 23:34:31 +000036#endif
andrew@webrtc.org7bf26462011-12-03 00:03:31 +000037#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +000038
39namespace webrtc {
niklase@google.com470e71d2011-07-07 08:21:25 +000040AudioProcessing* AudioProcessing::Create(int id) {
niklase@google.com470e71d2011-07-07 08:21:25 +000041 AudioProcessingImpl* apm = new AudioProcessingImpl(id);
42 if (apm->Initialize() != kNoError) {
43 delete apm;
44 apm = NULL;
45 }
46
47 return apm;
48}
49
50void AudioProcessing::Destroy(AudioProcessing* apm) {
51 delete static_cast<AudioProcessingImpl*>(apm);
52}
53
54AudioProcessingImpl::AudioProcessingImpl(int id)
55 : id_(id),
56 echo_cancellation_(NULL),
57 echo_control_mobile_(NULL),
58 gain_control_(NULL),
59 high_pass_filter_(NULL),
60 level_estimator_(NULL),
61 noise_suppression_(NULL),
62 voice_detection_(NULL),
niklase@google.com470e71d2011-07-07 08:21:25 +000063 crit_(CriticalSectionWrapper::CreateCriticalSection()),
64 render_audio_(NULL),
65 capture_audio_(NULL),
andrew@webrtc.org7bf26462011-12-03 00:03:31 +000066#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
67 debug_file_(FileWrapper::Create()),
68 event_msg_(new audioproc::Event()),
69#endif
niklase@google.com470e71d2011-07-07 08:21:25 +000070 sample_rate_hz_(kSampleRate16kHz),
71 split_sample_rate_hz_(kSampleRate16kHz),
72 samples_per_channel_(sample_rate_hz_ / 100),
73 stream_delay_ms_(0),
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +000074 delay_offset_ms_(0),
niklase@google.com470e71d2011-07-07 08:21:25 +000075 was_stream_delay_set_(false),
ajm@google.com808e0e02011-08-03 21:08:51 +000076 num_reverse_channels_(1),
77 num_input_channels_(1),
78 num_output_channels_(1) {
niklase@google.com470e71d2011-07-07 08:21:25 +000079 echo_cancellation_ = new EchoCancellationImpl(this);
80 component_list_.push_back(echo_cancellation_);
81
82 echo_control_mobile_ = new EchoControlMobileImpl(this);
83 component_list_.push_back(echo_control_mobile_);
84
85 gain_control_ = new GainControlImpl(this);
86 component_list_.push_back(gain_control_);
87
88 high_pass_filter_ = new HighPassFilterImpl(this);
89 component_list_.push_back(high_pass_filter_);
90
91 level_estimator_ = new LevelEstimatorImpl(this);
92 component_list_.push_back(level_estimator_);
93
94 noise_suppression_ = new NoiseSuppressionImpl(this);
95 component_list_.push_back(noise_suppression_);
96
97 voice_detection_ = new VoiceDetectionImpl(this);
98 component_list_.push_back(voice_detection_);
99}
100
101AudioProcessingImpl::~AudioProcessingImpl() {
andrew@webrtc.org81865342012-10-27 00:28:27 +0000102 {
103 CriticalSectionScoped crit_scoped(crit_);
104 while (!component_list_.empty()) {
105 ProcessingComponent* component = component_list_.front();
106 component->Destroy();
107 delete component;
108 component_list_.pop_front();
109 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000110
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000111#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
andrew@webrtc.org81865342012-10-27 00:28:27 +0000112 if (debug_file_->Open()) {
113 debug_file_->CloseFile();
114 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000115#endif
ajm@google.com808e0e02011-08-03 21:08:51 +0000116
andrew@webrtc.org81865342012-10-27 00:28:27 +0000117 if (render_audio_) {
118 delete render_audio_;
119 render_audio_ = NULL;
120 }
121
122 if (capture_audio_) {
123 delete capture_audio_;
124 capture_audio_ = NULL;
125 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000126 }
127
andrew@webrtc.org16cfbe22012-08-29 16:58:25 +0000128 delete crit_;
129 crit_ = NULL;
niklase@google.com470e71d2011-07-07 08:21:25 +0000130}
131
132CriticalSectionWrapper* AudioProcessingImpl::crit() const {
133 return crit_;
134}
135
136int AudioProcessingImpl::split_sample_rate_hz() const {
137 return split_sample_rate_hz_;
138}
139
140int AudioProcessingImpl::Initialize() {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000141 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000142 return InitializeLocked();
143}
144
145int AudioProcessingImpl::InitializeLocked() {
146 if (render_audio_ != NULL) {
147 delete render_audio_;
148 render_audio_ = NULL;
149 }
150
151 if (capture_audio_ != NULL) {
152 delete capture_audio_;
153 capture_audio_ = NULL;
154 }
155
ajm@google.com808e0e02011-08-03 21:08:51 +0000156 render_audio_ = new AudioBuffer(num_reverse_channels_,
niklase@google.com470e71d2011-07-07 08:21:25 +0000157 samples_per_channel_);
ajm@google.com808e0e02011-08-03 21:08:51 +0000158 capture_audio_ = new AudioBuffer(num_input_channels_,
niklase@google.com470e71d2011-07-07 08:21:25 +0000159 samples_per_channel_);
160
161 was_stream_delay_set_ = false;
162
163 // Initialize all components.
164 std::list<ProcessingComponent*>::iterator it;
andrew@webrtc.org81865342012-10-27 00:28:27 +0000165 for (it = component_list_.begin(); it != component_list_.end(); ++it) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000166 int err = (*it)->Initialize();
167 if (err != kNoError) {
168 return err;
169 }
170 }
171
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000172#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
ajm@google.com808e0e02011-08-03 21:08:51 +0000173 if (debug_file_->Open()) {
174 int err = WriteInitMessage();
175 if (err != kNoError) {
176 return err;
177 }
178 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000179#endif
ajm@google.com808e0e02011-08-03 21:08:51 +0000180
niklase@google.com470e71d2011-07-07 08:21:25 +0000181 return kNoError;
182}
183
184int AudioProcessingImpl::set_sample_rate_hz(int rate) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000185 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org81865342012-10-27 00:28:27 +0000186 if (rate == sample_rate_hz_) {
187 return kNoError;
188 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000189 if (rate != kSampleRate8kHz &&
190 rate != kSampleRate16kHz &&
191 rate != kSampleRate32kHz) {
192 return kBadParameterError;
193 }
andrew@webrtc.org78693fe2013-03-01 16:36:19 +0000194 if (echo_control_mobile_->is_enabled() && rate > kSampleRate16kHz) {
195 LOG(LS_ERROR) << "AECM only supports 16 kHz or lower sample rates";
196 return kUnsupportedComponentError;
197 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000198
199 sample_rate_hz_ = rate;
200 samples_per_channel_ = rate / 100;
201
202 if (sample_rate_hz_ == kSampleRate32kHz) {
203 split_sample_rate_hz_ = kSampleRate16kHz;
204 } else {
205 split_sample_rate_hz_ = sample_rate_hz_;
206 }
207
208 return InitializeLocked();
209}
210
211int AudioProcessingImpl::sample_rate_hz() const {
henrika@webrtc.org19da7192013-04-05 14:34:57 +0000212 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000213 return sample_rate_hz_;
214}
215
216int AudioProcessingImpl::set_num_reverse_channels(int channels) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000217 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org81865342012-10-27 00:28:27 +0000218 if (channels == num_reverse_channels_) {
219 return kNoError;
220 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000221 // Only stereo supported currently.
222 if (channels > 2 || channels < 1) {
223 return kBadParameterError;
224 }
225
ajm@google.com808e0e02011-08-03 21:08:51 +0000226 num_reverse_channels_ = channels;
niklase@google.com470e71d2011-07-07 08:21:25 +0000227
228 return InitializeLocked();
229}
230
231int AudioProcessingImpl::num_reverse_channels() const {
ajm@google.com808e0e02011-08-03 21:08:51 +0000232 return num_reverse_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000233}
234
235int AudioProcessingImpl::set_num_channels(
236 int input_channels,
237 int output_channels) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000238 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org81865342012-10-27 00:28:27 +0000239 if (input_channels == num_input_channels_ &&
240 output_channels == num_output_channels_) {
241 return kNoError;
242 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000243 if (output_channels > input_channels) {
244 return kBadParameterError;
245 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000246 // Only stereo supported currently.
andrew@webrtc.org81865342012-10-27 00:28:27 +0000247 if (input_channels > 2 || input_channels < 1 ||
248 output_channels > 2 || output_channels < 1) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000249 return kBadParameterError;
250 }
251
ajm@google.com808e0e02011-08-03 21:08:51 +0000252 num_input_channels_ = input_channels;
253 num_output_channels_ = output_channels;
niklase@google.com470e71d2011-07-07 08:21:25 +0000254
255 return InitializeLocked();
256}
257
258int AudioProcessingImpl::num_input_channels() const {
ajm@google.com808e0e02011-08-03 21:08:51 +0000259 return num_input_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000260}
261
262int AudioProcessingImpl::num_output_channels() const {
ajm@google.com808e0e02011-08-03 21:08:51 +0000263 return num_output_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000264}
265
266int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000267 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000268 int err = kNoError;
269
270 if (frame == NULL) {
271 return kNullPointerError;
272 }
273
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000274 if (frame->sample_rate_hz_ != sample_rate_hz_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000275 return kBadSampleRateError;
276 }
277
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000278 if (frame->num_channels_ != num_input_channels_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000279 return kBadNumberChannelsError;
280 }
281
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000282 if (frame->samples_per_channel_ != samples_per_channel_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000283 return kBadDataLengthError;
284 }
285
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000286#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000287 if (debug_file_->Open()) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000288 event_msg_->set_type(audioproc::Event::STREAM);
289 audioproc::Stream* msg = event_msg_->mutable_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000290 const size_t data_size = sizeof(int16_t) *
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000291 frame->samples_per_channel_ *
292 frame->num_channels_;
293 msg->set_input_data(frame->data_, data_size);
ajm@google.com808e0e02011-08-03 21:08:51 +0000294 msg->set_delay(stream_delay_ms_);
295 msg->set_drift(echo_cancellation_->stream_drift_samples());
296 msg->set_level(gain_control_->stream_analog_level());
niklase@google.com470e71d2011-07-07 08:21:25 +0000297 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000298#endif
niklase@google.com470e71d2011-07-07 08:21:25 +0000299
300 capture_audio_->DeinterleaveFrom(frame);
301
302 // TODO(ajm): experiment with mixing and AEC placement.
ajm@google.com808e0e02011-08-03 21:08:51 +0000303 if (num_output_channels_ < num_input_channels_) {
304 capture_audio_->Mix(num_output_channels_);
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000305 frame->num_channels_ = num_output_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000306 }
307
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000308 bool data_processed = is_data_processed();
309 if (analysis_needed(data_processed)) {
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000310 for (int i = 0; i < num_output_channels_; i++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000311 // Split into a low and high band.
312 SplittingFilterAnalysis(capture_audio_->data(i),
313 capture_audio_->low_pass_split_data(i),
314 capture_audio_->high_pass_split_data(i),
315 capture_audio_->analysis_filter_state1(i),
316 capture_audio_->analysis_filter_state2(i));
317 }
318 }
319
320 err = high_pass_filter_->ProcessCaptureAudio(capture_audio_);
321 if (err != kNoError) {
322 return err;
323 }
324
325 err = gain_control_->AnalyzeCaptureAudio(capture_audio_);
326 if (err != kNoError) {
327 return err;
328 }
329
330 err = echo_cancellation_->ProcessCaptureAudio(capture_audio_);
331 if (err != kNoError) {
332 return err;
333 }
334
335 if (echo_control_mobile_->is_enabled() &&
336 noise_suppression_->is_enabled()) {
337 capture_audio_->CopyLowPassToReference();
338 }
339
340 err = noise_suppression_->ProcessCaptureAudio(capture_audio_);
341 if (err != kNoError) {
342 return err;
343 }
344
345 err = echo_control_mobile_->ProcessCaptureAudio(capture_audio_);
346 if (err != kNoError) {
347 return err;
348 }
349
350 err = voice_detection_->ProcessCaptureAudio(capture_audio_);
351 if (err != kNoError) {
352 return err;
353 }
354
355 err = gain_control_->ProcessCaptureAudio(capture_audio_);
356 if (err != kNoError) {
357 return err;
358 }
359
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000360 if (synthesis_needed(data_processed)) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000361 for (int i = 0; i < num_output_channels_; i++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000362 // Recombine low and high bands.
363 SplittingFilterSynthesis(capture_audio_->low_pass_split_data(i),
364 capture_audio_->high_pass_split_data(i),
365 capture_audio_->data(i),
366 capture_audio_->synthesis_filter_state1(i),
367 capture_audio_->synthesis_filter_state2(i));
368 }
369 }
370
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000371 // The level estimator operates on the recombined data.
372 err = level_estimator_->ProcessStream(capture_audio_);
373 if (err != kNoError) {
374 return err;
375 }
376
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000377 capture_audio_->InterleaveTo(frame, interleave_needed(data_processed));
niklase@google.com470e71d2011-07-07 08:21:25 +0000378
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000379#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
ajm@google.com808e0e02011-08-03 21:08:51 +0000380 if (debug_file_->Open()) {
381 audioproc::Stream* msg = event_msg_->mutable_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000382 const size_t data_size = sizeof(int16_t) *
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000383 frame->samples_per_channel_ *
384 frame->num_channels_;
385 msg->set_output_data(frame->data_, data_size);
ajm@google.com808e0e02011-08-03 21:08:51 +0000386 err = WriteMessageToDebugFile();
387 if (err != kNoError) {
388 return err;
389 }
390 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000391#endif
ajm@google.com808e0e02011-08-03 21:08:51 +0000392
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000393 was_stream_delay_set_ = false;
niklase@google.com470e71d2011-07-07 08:21:25 +0000394 return kNoError;
395}
396
397int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000398 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000399 int err = kNoError;
400
401 if (frame == NULL) {
402 return kNullPointerError;
403 }
404
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000405 if (frame->sample_rate_hz_ != sample_rate_hz_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000406 return kBadSampleRateError;
407 }
408
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000409 if (frame->num_channels_ != num_reverse_channels_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000410 return kBadNumberChannelsError;
411 }
412
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000413 if (frame->samples_per_channel_ != samples_per_channel_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000414 return kBadDataLengthError;
415 }
416
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000417#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000418 if (debug_file_->Open()) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000419 event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
420 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000421 const size_t data_size = sizeof(int16_t) *
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000422 frame->samples_per_channel_ *
423 frame->num_channels_;
424 msg->set_data(frame->data_, data_size);
ajm@google.com808e0e02011-08-03 21:08:51 +0000425 err = WriteMessageToDebugFile();
426 if (err != kNoError) {
427 return err;
niklase@google.com470e71d2011-07-07 08:21:25 +0000428 }
429 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000430#endif
niklase@google.com470e71d2011-07-07 08:21:25 +0000431
432 render_audio_->DeinterleaveFrom(frame);
433
434 // TODO(ajm): turn the splitting filter into a component?
435 if (sample_rate_hz_ == kSampleRate32kHz) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000436 for (int i = 0; i < num_reverse_channels_; i++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000437 // Split into low and high band.
438 SplittingFilterAnalysis(render_audio_->data(i),
439 render_audio_->low_pass_split_data(i),
440 render_audio_->high_pass_split_data(i),
441 render_audio_->analysis_filter_state1(i),
442 render_audio_->analysis_filter_state2(i));
443 }
444 }
445
446 // TODO(ajm): warnings possible from components?
447 err = echo_cancellation_->ProcessRenderAudio(render_audio_);
448 if (err != kNoError) {
449 return err;
450 }
451
452 err = echo_control_mobile_->ProcessRenderAudio(render_audio_);
453 if (err != kNoError) {
454 return err;
455 }
456
457 err = gain_control_->ProcessRenderAudio(render_audio_);
458 if (err != kNoError) {
459 return err;
460 }
461
niklase@google.com470e71d2011-07-07 08:21:25 +0000462 return err; // TODO(ajm): this is for returning warnings; necessary?
463}
464
465int AudioProcessingImpl::set_stream_delay_ms(int delay) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000466 Error retval = kNoError;
niklase@google.com470e71d2011-07-07 08:21:25 +0000467 was_stream_delay_set_ = true;
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000468 delay += delay_offset_ms_;
469
niklase@google.com470e71d2011-07-07 08:21:25 +0000470 if (delay < 0) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000471 delay = 0;
472 retval = kBadStreamParameterWarning;
niklase@google.com470e71d2011-07-07 08:21:25 +0000473 }
474
475 // TODO(ajm): the max is rather arbitrarily chosen; investigate.
476 if (delay > 500) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000477 delay = 500;
478 retval = kBadStreamParameterWarning;
niklase@google.com470e71d2011-07-07 08:21:25 +0000479 }
480
481 stream_delay_ms_ = delay;
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000482 return retval;
niklase@google.com470e71d2011-07-07 08:21:25 +0000483}
484
485int AudioProcessingImpl::stream_delay_ms() const {
486 return stream_delay_ms_;
487}
488
489bool AudioProcessingImpl::was_stream_delay_set() const {
490 return was_stream_delay_set_;
491}
492
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000493void AudioProcessingImpl::set_delay_offset_ms(int offset) {
494 CriticalSectionScoped crit_scoped(crit_);
495 delay_offset_ms_ = offset;
496}
497
498int AudioProcessingImpl::delay_offset_ms() const {
499 return delay_offset_ms_;
500}
501
niklase@google.com470e71d2011-07-07 08:21:25 +0000502int AudioProcessingImpl::StartDebugRecording(
503 const char filename[AudioProcessing::kMaxFilenameSize]) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000504 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000505 assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize);
506
507 if (filename == NULL) {
508 return kNullPointerError;
509 }
510
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000511#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000512 // Stop any ongoing recording.
513 if (debug_file_->Open()) {
514 if (debug_file_->CloseFile() == -1) {
515 return kFileError;
516 }
517 }
518
519 if (debug_file_->OpenFile(filename, false) == -1) {
520 debug_file_->CloseFile();
521 return kFileError;
522 }
523
ajm@google.com808e0e02011-08-03 21:08:51 +0000524 int err = WriteInitMessage();
525 if (err != kNoError) {
526 return err;
niklase@google.com470e71d2011-07-07 08:21:25 +0000527 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000528 return kNoError;
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000529#else
530 return kUnsupportedFunctionError;
531#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000532}
533
534int AudioProcessingImpl::StopDebugRecording() {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000535 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000536
537#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000538 // We just return if recording hasn't started.
539 if (debug_file_->Open()) {
540 if (debug_file_->CloseFile() == -1) {
541 return kFileError;
542 }
543 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000544 return kNoError;
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000545#else
546 return kUnsupportedFunctionError;
547#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000548}
549
550EchoCancellation* AudioProcessingImpl::echo_cancellation() const {
551 return echo_cancellation_;
552}
553
554EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const {
555 return echo_control_mobile_;
556}
557
558GainControl* AudioProcessingImpl::gain_control() const {
559 return gain_control_;
560}
561
562HighPassFilter* AudioProcessingImpl::high_pass_filter() const {
563 return high_pass_filter_;
564}
565
566LevelEstimator* AudioProcessingImpl::level_estimator() const {
567 return level_estimator_;
568}
569
570NoiseSuppression* AudioProcessingImpl::noise_suppression() const {
571 return noise_suppression_;
572}
573
574VoiceDetection* AudioProcessingImpl::voice_detection() const {
575 return voice_detection_;
576}
577
pbos@webrtc.orgb7192b82013-04-10 07:50:54 +0000578int32_t AudioProcessingImpl::ChangeUniqueId(const int32_t id) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000579 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000580 id_ = id;
581
582 return kNoError;
583}
ajm@google.com808e0e02011-08-03 21:08:51 +0000584
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000585bool AudioProcessingImpl::is_data_processed() const {
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000586 int enabled_count = 0;
587 std::list<ProcessingComponent*>::const_iterator it;
588 for (it = component_list_.begin(); it != component_list_.end(); it++) {
589 if ((*it)->is_component_enabled()) {
590 enabled_count++;
591 }
592 }
593
594 // Data is unchanged if no components are enabled, or if only level_estimator_
595 // or voice_detection_ is enabled.
596 if (enabled_count == 0) {
597 return false;
598 } else if (enabled_count == 1) {
599 if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) {
600 return false;
601 }
602 } else if (enabled_count == 2) {
603 if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) {
604 return false;
605 }
606 }
607 return true;
608}
609
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000610bool AudioProcessingImpl::interleave_needed(bool is_data_processed) const {
611 // Check if we've upmixed or downmixed the audio.
612 return (num_output_channels_ != num_input_channels_ || is_data_processed);
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000613}
614
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000615bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const {
616 return (is_data_processed && sample_rate_hz_ == kSampleRate32kHz);
617}
618
619bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const {
620 if (!is_data_processed && !voice_detection_->is_enabled()) {
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000621 // Only level_estimator_ is enabled.
622 return false;
623 } else if (sample_rate_hz_ == kSampleRate32kHz) {
624 // Something besides level_estimator_ is enabled, and we have super-wb.
625 return true;
626 }
627 return false;
628}
629
630#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
ajm@google.com808e0e02011-08-03 21:08:51 +0000631int AudioProcessingImpl::WriteMessageToDebugFile() {
632 int32_t size = event_msg_->ByteSize();
633 if (size <= 0) {
634 return kUnspecifiedError;
635 }
636#if defined(WEBRTC_BIG_ENDIAN)
637 // TODO(ajm): Use little-endian "on the wire". For the moment, we can be
638 // pretty safe in assuming little-endian.
639#endif
640
641 if (!event_msg_->SerializeToString(&event_str_)) {
642 return kUnspecifiedError;
643 }
644
645 // Write message preceded by its size.
646 if (!debug_file_->Write(&size, sizeof(int32_t))) {
647 return kFileError;
648 }
649 if (!debug_file_->Write(event_str_.data(), event_str_.length())) {
650 return kFileError;
651 }
652
653 event_msg_->Clear();
654
655 return 0;
656}
657
658int AudioProcessingImpl::WriteInitMessage() {
659 event_msg_->set_type(audioproc::Event::INIT);
660 audioproc::Init* msg = event_msg_->mutable_init();
661 msg->set_sample_rate(sample_rate_hz_);
662 msg->set_device_sample_rate(echo_cancellation_->device_sample_rate_hz());
663 msg->set_num_input_channels(num_input_channels_);
664 msg->set_num_output_channels(num_output_channels_);
665 msg->set_num_reverse_channels(num_reverse_channels_);
666
667 int err = WriteMessageToDebugFile();
668 if (err != kNoError) {
669 return err;
670 }
671
672 return kNoError;
673}
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000674#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000675} // namespace webrtc