blob: da8dcdb276b346a7ba0ee8d07fea8a0e7649e65f [file] [log] [blame]
niklase@google.com470e71d2011-07-07 08:21:25 +00001/*
2 * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include "audio_processing_impl.h"
12
ajm@google.com808e0e02011-08-03 21:08:51 +000013#include <assert.h>
niklase@google.com470e71d2011-07-07 08:21:25 +000014
15#include "audio_buffer.h"
ajm@google.com808e0e02011-08-03 21:08:51 +000016#include "critical_section_wrapper.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000017#include "echo_cancellation_impl.h"
18#include "echo_control_mobile_impl.h"
ajm@google.com808e0e02011-08-03 21:08:51 +000019#include "file_wrapper.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000020#include "high_pass_filter_impl.h"
21#include "gain_control_impl.h"
22#include "level_estimator_impl.h"
ajm@google.com808e0e02011-08-03 21:08:51 +000023#include "module_common_types.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000024#include "noise_suppression_impl.h"
25#include "processing_component.h"
26#include "splitting_filter.h"
27#include "voice_detection_impl.h"
leozwang@google.comce9bfbb2011-08-03 23:34:31 +000028#ifdef WEBRTC_ANDROID
andrew@webrtc.org4d5d5c12011-10-19 01:40:33 +000029#include "external/webrtc/src/modules/audio_processing/debug.pb.h"
leozwang@google.comce9bfbb2011-08-03 23:34:31 +000030#else
ajm@google.com808e0e02011-08-03 21:08:51 +000031#include "webrtc/audio_processing/debug.pb.h"
leozwang@google.comce9bfbb2011-08-03 23:34:31 +000032#endif
niklase@google.com470e71d2011-07-07 08:21:25 +000033
34namespace webrtc {
niklase@google.com470e71d2011-07-07 08:21:25 +000035AudioProcessing* AudioProcessing::Create(int id) {
36 /*WEBRTC_TRACE(webrtc::kTraceModuleCall,
37 webrtc::kTraceAudioProcessing,
38 id,
39 "AudioProcessing::Create()");*/
40
41 AudioProcessingImpl* apm = new AudioProcessingImpl(id);
42 if (apm->Initialize() != kNoError) {
43 delete apm;
44 apm = NULL;
45 }
46
47 return apm;
48}
49
50void AudioProcessing::Destroy(AudioProcessing* apm) {
51 delete static_cast<AudioProcessingImpl*>(apm);
52}
53
54AudioProcessingImpl::AudioProcessingImpl(int id)
55 : id_(id),
56 echo_cancellation_(NULL),
57 echo_control_mobile_(NULL),
58 gain_control_(NULL),
59 high_pass_filter_(NULL),
60 level_estimator_(NULL),
61 noise_suppression_(NULL),
62 voice_detection_(NULL),
63 debug_file_(FileWrapper::Create()),
ajm@google.com808e0e02011-08-03 21:08:51 +000064 event_msg_(new audioproc::Event()),
niklase@google.com470e71d2011-07-07 08:21:25 +000065 crit_(CriticalSectionWrapper::CreateCriticalSection()),
66 render_audio_(NULL),
67 capture_audio_(NULL),
68 sample_rate_hz_(kSampleRate16kHz),
69 split_sample_rate_hz_(kSampleRate16kHz),
70 samples_per_channel_(sample_rate_hz_ / 100),
71 stream_delay_ms_(0),
72 was_stream_delay_set_(false),
ajm@google.com808e0e02011-08-03 21:08:51 +000073 num_reverse_channels_(1),
74 num_input_channels_(1),
75 num_output_channels_(1) {
niklase@google.com470e71d2011-07-07 08:21:25 +000076
77 echo_cancellation_ = new EchoCancellationImpl(this);
78 component_list_.push_back(echo_cancellation_);
79
80 echo_control_mobile_ = new EchoControlMobileImpl(this);
81 component_list_.push_back(echo_control_mobile_);
82
83 gain_control_ = new GainControlImpl(this);
84 component_list_.push_back(gain_control_);
85
86 high_pass_filter_ = new HighPassFilterImpl(this);
87 component_list_.push_back(high_pass_filter_);
88
89 level_estimator_ = new LevelEstimatorImpl(this);
90 component_list_.push_back(level_estimator_);
91
92 noise_suppression_ = new NoiseSuppressionImpl(this);
93 component_list_.push_back(noise_suppression_);
94
95 voice_detection_ = new VoiceDetectionImpl(this);
96 component_list_.push_back(voice_detection_);
97}
98
99AudioProcessingImpl::~AudioProcessingImpl() {
100 while (!component_list_.empty()) {
101 ProcessingComponent* component = component_list_.front();
102 component->Destroy();
103 delete component;
104 component_list_.pop_front();
105 }
106
107 if (debug_file_->Open()) {
108 debug_file_->CloseFile();
109 }
110 delete debug_file_;
111 debug_file_ = NULL;
112
ajm@google.com808e0e02011-08-03 21:08:51 +0000113 delete event_msg_;
114 event_msg_ = NULL;
115
niklase@google.com470e71d2011-07-07 08:21:25 +0000116 delete crit_;
117 crit_ = NULL;
118
ajm@google.com808e0e02011-08-03 21:08:51 +0000119 if (render_audio_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000120 delete render_audio_;
121 render_audio_ = NULL;
122 }
123
ajm@google.com808e0e02011-08-03 21:08:51 +0000124 if (capture_audio_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000125 delete capture_audio_;
126 capture_audio_ = NULL;
127 }
128}
129
130CriticalSectionWrapper* AudioProcessingImpl::crit() const {
131 return crit_;
132}
133
134int AudioProcessingImpl::split_sample_rate_hz() const {
135 return split_sample_rate_hz_;
136}
137
138int AudioProcessingImpl::Initialize() {
139 CriticalSectionScoped crit_scoped(*crit_);
140 return InitializeLocked();
141}
142
143int AudioProcessingImpl::InitializeLocked() {
144 if (render_audio_ != NULL) {
145 delete render_audio_;
146 render_audio_ = NULL;
147 }
148
149 if (capture_audio_ != NULL) {
150 delete capture_audio_;
151 capture_audio_ = NULL;
152 }
153
ajm@google.com808e0e02011-08-03 21:08:51 +0000154 render_audio_ = new AudioBuffer(num_reverse_channels_,
niklase@google.com470e71d2011-07-07 08:21:25 +0000155 samples_per_channel_);
ajm@google.com808e0e02011-08-03 21:08:51 +0000156 capture_audio_ = new AudioBuffer(num_input_channels_,
niklase@google.com470e71d2011-07-07 08:21:25 +0000157 samples_per_channel_);
158
159 was_stream_delay_set_ = false;
160
161 // Initialize all components.
162 std::list<ProcessingComponent*>::iterator it;
163 for (it = component_list_.begin(); it != component_list_.end(); it++) {
164 int err = (*it)->Initialize();
165 if (err != kNoError) {
166 return err;
167 }
168 }
169
ajm@google.com808e0e02011-08-03 21:08:51 +0000170 if (debug_file_->Open()) {
171 int err = WriteInitMessage();
172 if (err != kNoError) {
173 return err;
174 }
175 }
176
niklase@google.com470e71d2011-07-07 08:21:25 +0000177 return kNoError;
178}
179
180int AudioProcessingImpl::set_sample_rate_hz(int rate) {
181 CriticalSectionScoped crit_scoped(*crit_);
182 if (rate != kSampleRate8kHz &&
183 rate != kSampleRate16kHz &&
184 rate != kSampleRate32kHz) {
185 return kBadParameterError;
186 }
187
188 sample_rate_hz_ = rate;
189 samples_per_channel_ = rate / 100;
190
191 if (sample_rate_hz_ == kSampleRate32kHz) {
192 split_sample_rate_hz_ = kSampleRate16kHz;
193 } else {
194 split_sample_rate_hz_ = sample_rate_hz_;
195 }
196
197 return InitializeLocked();
198}
199
200int AudioProcessingImpl::sample_rate_hz() const {
201 return sample_rate_hz_;
202}
203
204int AudioProcessingImpl::set_num_reverse_channels(int channels) {
205 CriticalSectionScoped crit_scoped(*crit_);
206 // Only stereo supported currently.
207 if (channels > 2 || channels < 1) {
208 return kBadParameterError;
209 }
210
ajm@google.com808e0e02011-08-03 21:08:51 +0000211 num_reverse_channels_ = channels;
niklase@google.com470e71d2011-07-07 08:21:25 +0000212
213 return InitializeLocked();
214}
215
216int AudioProcessingImpl::num_reverse_channels() const {
ajm@google.com808e0e02011-08-03 21:08:51 +0000217 return num_reverse_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000218}
219
220int AudioProcessingImpl::set_num_channels(
221 int input_channels,
222 int output_channels) {
223 CriticalSectionScoped crit_scoped(*crit_);
224 if (output_channels > input_channels) {
225 return kBadParameterError;
226 }
227
228 // Only stereo supported currently.
229 if (input_channels > 2 || input_channels < 1) {
230 return kBadParameterError;
231 }
232
233 if (output_channels > 2 || output_channels < 1) {
234 return kBadParameterError;
235 }
236
ajm@google.com808e0e02011-08-03 21:08:51 +0000237 num_input_channels_ = input_channels;
238 num_output_channels_ = output_channels;
niklase@google.com470e71d2011-07-07 08:21:25 +0000239
240 return InitializeLocked();
241}
242
243int AudioProcessingImpl::num_input_channels() const {
ajm@google.com808e0e02011-08-03 21:08:51 +0000244 return num_input_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000245}
246
247int AudioProcessingImpl::num_output_channels() const {
ajm@google.com808e0e02011-08-03 21:08:51 +0000248 return num_output_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000249}
250
251int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
252 CriticalSectionScoped crit_scoped(*crit_);
253 int err = kNoError;
254
255 if (frame == NULL) {
256 return kNullPointerError;
257 }
258
xians@google.com0b0665a2011-08-08 08:18:44 +0000259 if (frame->_frequencyInHz != sample_rate_hz_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000260 return kBadSampleRateError;
261 }
262
ajm@google.com808e0e02011-08-03 21:08:51 +0000263 if (frame->_audioChannel != num_input_channels_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000264 return kBadNumberChannelsError;
265 }
266
267 if (frame->_payloadDataLengthInSamples != samples_per_channel_) {
268 return kBadDataLengthError;
269 }
270
271 if (debug_file_->Open()) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000272 event_msg_->set_type(audioproc::Event::STREAM);
273 audioproc::Stream* msg = event_msg_->mutable_stream();
274 const size_t data_size = sizeof(WebRtc_Word16) *
275 frame->_payloadDataLengthInSamples *
276 frame->_audioChannel;
277 msg->set_input_data(frame->_payloadData, data_size);
278 msg->set_delay(stream_delay_ms_);
279 msg->set_drift(echo_cancellation_->stream_drift_samples());
280 msg->set_level(gain_control_->stream_analog_level());
niklase@google.com470e71d2011-07-07 08:21:25 +0000281 }
282
283 capture_audio_->DeinterleaveFrom(frame);
284
285 // TODO(ajm): experiment with mixing and AEC placement.
ajm@google.com808e0e02011-08-03 21:08:51 +0000286 if (num_output_channels_ < num_input_channels_) {
287 capture_audio_->Mix(num_output_channels_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000288
ajm@google.com808e0e02011-08-03 21:08:51 +0000289 frame->_audioChannel = num_output_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000290 }
291
292 if (sample_rate_hz_ == kSampleRate32kHz) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000293 for (int i = 0; i < num_input_channels_; i++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000294 // Split into a low and high band.
295 SplittingFilterAnalysis(capture_audio_->data(i),
296 capture_audio_->low_pass_split_data(i),
297 capture_audio_->high_pass_split_data(i),
298 capture_audio_->analysis_filter_state1(i),
299 capture_audio_->analysis_filter_state2(i));
300 }
301 }
302
303 err = high_pass_filter_->ProcessCaptureAudio(capture_audio_);
304 if (err != kNoError) {
305 return err;
306 }
307
308 err = gain_control_->AnalyzeCaptureAudio(capture_audio_);
309 if (err != kNoError) {
310 return err;
311 }
312
313 err = echo_cancellation_->ProcessCaptureAudio(capture_audio_);
314 if (err != kNoError) {
315 return err;
316 }
317
318 if (echo_control_mobile_->is_enabled() &&
319 noise_suppression_->is_enabled()) {
320 capture_audio_->CopyLowPassToReference();
321 }
322
323 err = noise_suppression_->ProcessCaptureAudio(capture_audio_);
324 if (err != kNoError) {
325 return err;
326 }
327
328 err = echo_control_mobile_->ProcessCaptureAudio(capture_audio_);
329 if (err != kNoError) {
330 return err;
331 }
332
333 err = voice_detection_->ProcessCaptureAudio(capture_audio_);
334 if (err != kNoError) {
335 return err;
336 }
337
338 err = gain_control_->ProcessCaptureAudio(capture_audio_);
339 if (err != kNoError) {
340 return err;
341 }
342
343 //err = level_estimator_->ProcessCaptureAudio(capture_audio_);
344 //if (err != kNoError) {
345 // return err;
346 //}
347
348 if (sample_rate_hz_ == kSampleRate32kHz) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000349 for (int i = 0; i < num_output_channels_; i++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000350 // Recombine low and high bands.
351 SplittingFilterSynthesis(capture_audio_->low_pass_split_data(i),
352 capture_audio_->high_pass_split_data(i),
353 capture_audio_->data(i),
354 capture_audio_->synthesis_filter_state1(i),
355 capture_audio_->synthesis_filter_state2(i));
356 }
357 }
358
359 capture_audio_->InterleaveTo(frame);
360
ajm@google.com808e0e02011-08-03 21:08:51 +0000361 if (debug_file_->Open()) {
362 audioproc::Stream* msg = event_msg_->mutable_stream();
363 const size_t data_size = sizeof(WebRtc_Word16) *
364 frame->_payloadDataLengthInSamples *
365 frame->_audioChannel;
366 msg->set_output_data(frame->_payloadData, data_size);
367 err = WriteMessageToDebugFile();
368 if (err != kNoError) {
369 return err;
370 }
371 }
372
niklase@google.com470e71d2011-07-07 08:21:25 +0000373 return kNoError;
374}
375
376int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
377 CriticalSectionScoped crit_scoped(*crit_);
378 int err = kNoError;
379
380 if (frame == NULL) {
381 return kNullPointerError;
382 }
383
xians@google.com0b0665a2011-08-08 08:18:44 +0000384 if (frame->_frequencyInHz != sample_rate_hz_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000385 return kBadSampleRateError;
386 }
387
ajm@google.com808e0e02011-08-03 21:08:51 +0000388 if (frame->_audioChannel != num_reverse_channels_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000389 return kBadNumberChannelsError;
390 }
391
392 if (frame->_payloadDataLengthInSamples != samples_per_channel_) {
393 return kBadDataLengthError;
394 }
395
396 if (debug_file_->Open()) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000397 event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
398 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
399 const size_t data_size = sizeof(WebRtc_Word16) *
400 frame->_payloadDataLengthInSamples *
401 frame->_audioChannel;
402 msg->set_data(frame->_payloadData, data_size);
403 err = WriteMessageToDebugFile();
404 if (err != kNoError) {
405 return err;
niklase@google.com470e71d2011-07-07 08:21:25 +0000406 }
407 }
408
409 render_audio_->DeinterleaveFrom(frame);
410
411 // TODO(ajm): turn the splitting filter into a component?
412 if (sample_rate_hz_ == kSampleRate32kHz) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000413 for (int i = 0; i < num_reverse_channels_; i++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000414 // Split into low and high band.
415 SplittingFilterAnalysis(render_audio_->data(i),
416 render_audio_->low_pass_split_data(i),
417 render_audio_->high_pass_split_data(i),
418 render_audio_->analysis_filter_state1(i),
419 render_audio_->analysis_filter_state2(i));
420 }
421 }
422
423 // TODO(ajm): warnings possible from components?
424 err = echo_cancellation_->ProcessRenderAudio(render_audio_);
425 if (err != kNoError) {
426 return err;
427 }
428
429 err = echo_control_mobile_->ProcessRenderAudio(render_audio_);
430 if (err != kNoError) {
431 return err;
432 }
433
434 err = gain_control_->ProcessRenderAudio(render_audio_);
435 if (err != kNoError) {
436 return err;
437 }
438
439 //err = level_estimator_->AnalyzeReverseStream(render_audio_);
440 //if (err != kNoError) {
441 // return err;
442 //}
443
444 was_stream_delay_set_ = false;
445 return err; // TODO(ajm): this is for returning warnings; necessary?
446}
447
448int AudioProcessingImpl::set_stream_delay_ms(int delay) {
449 was_stream_delay_set_ = true;
450 if (delay < 0) {
451 return kBadParameterError;
452 }
453
454 // TODO(ajm): the max is rather arbitrarily chosen; investigate.
455 if (delay > 500) {
456 stream_delay_ms_ = 500;
457 return kBadStreamParameterWarning;
458 }
459
460 stream_delay_ms_ = delay;
461 return kNoError;
462}
463
464int AudioProcessingImpl::stream_delay_ms() const {
465 return stream_delay_ms_;
466}
467
468bool AudioProcessingImpl::was_stream_delay_set() const {
469 return was_stream_delay_set_;
470}
471
472int AudioProcessingImpl::StartDebugRecording(
473 const char filename[AudioProcessing::kMaxFilenameSize]) {
474 CriticalSectionScoped crit_scoped(*crit_);
475 assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize);
476
477 if (filename == NULL) {
478 return kNullPointerError;
479 }
480
481 // Stop any ongoing recording.
482 if (debug_file_->Open()) {
483 if (debug_file_->CloseFile() == -1) {
484 return kFileError;
485 }
486 }
487
488 if (debug_file_->OpenFile(filename, false) == -1) {
489 debug_file_->CloseFile();
490 return kFileError;
491 }
492
ajm@google.com808e0e02011-08-03 21:08:51 +0000493 int err = WriteInitMessage();
494 if (err != kNoError) {
495 return err;
niklase@google.com470e71d2011-07-07 08:21:25 +0000496 }
497
498 return kNoError;
499}
500
501int AudioProcessingImpl::StopDebugRecording() {
502 CriticalSectionScoped crit_scoped(*crit_);
503 // We just return if recording hasn't started.
504 if (debug_file_->Open()) {
505 if (debug_file_->CloseFile() == -1) {
506 return kFileError;
507 }
508 }
509
510 return kNoError;
511}
512
513EchoCancellation* AudioProcessingImpl::echo_cancellation() const {
514 return echo_cancellation_;
515}
516
517EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const {
518 return echo_control_mobile_;
519}
520
521GainControl* AudioProcessingImpl::gain_control() const {
522 return gain_control_;
523}
524
525HighPassFilter* AudioProcessingImpl::high_pass_filter() const {
526 return high_pass_filter_;
527}
528
529LevelEstimator* AudioProcessingImpl::level_estimator() const {
530 return level_estimator_;
531}
532
533NoiseSuppression* AudioProcessingImpl::noise_suppression() const {
534 return noise_suppression_;
535}
536
537VoiceDetection* AudioProcessingImpl::voice_detection() const {
538 return voice_detection_;
539}
540
541WebRtc_Word32 AudioProcessingImpl::Version(WebRtc_Word8* version,
542 WebRtc_UWord32& bytes_remaining, WebRtc_UWord32& position) const {
543 if (version == NULL) {
544 /*WEBRTC_TRACE(webrtc::kTraceError,
545 webrtc::kTraceAudioProcessing,
546 -1,
547 "Null version pointer");*/
548 return kNullPointerError;
549 }
550 memset(&version[position], 0, bytes_remaining);
551
ajm@google.com808e0e02011-08-03 21:08:51 +0000552 char my_version[] = "AudioProcessing 1.0.0";
niklase@google.com470e71d2011-07-07 08:21:25 +0000553 // Includes null termination.
554 WebRtc_UWord32 length = static_cast<WebRtc_UWord32>(strlen(my_version));
555 if (bytes_remaining < length) {
556 /*WEBRTC_TRACE(webrtc::kTraceError,
557 webrtc::kTraceAudioProcessing,
558 -1,
559 "Buffer of insufficient length");*/
560 return kBadParameterError;
561 }
562 memcpy(&version[position], my_version, length);
563 bytes_remaining -= length;
564 position += length;
565
566 std::list<ProcessingComponent*>::const_iterator it;
567 for (it = component_list_.begin(); it != component_list_.end(); it++) {
568 char component_version[256];
569 strcpy(component_version, "\n");
570 int err = (*it)->get_version(&component_version[1],
571 sizeof(component_version) - 1);
572 if (err != kNoError) {
573 return err;
574 }
575 if (strncmp(&component_version[1], "\0", 1) == 0) {
576 // Assume empty if first byte is NULL.
577 continue;
578 }
579
580 length = static_cast<WebRtc_UWord32>(strlen(component_version));
581 if (bytes_remaining < length) {
582 /*WEBRTC_TRACE(webrtc::kTraceError,
583 webrtc::kTraceAudioProcessing,
584 -1,
585 "Buffer of insufficient length");*/
586 return kBadParameterError;
587 }
588 memcpy(&version[position], component_version, length);
589 bytes_remaining -= length;
590 position += length;
591 }
592
593 return kNoError;
594}
595
596WebRtc_Word32 AudioProcessingImpl::ChangeUniqueId(const WebRtc_Word32 id) {
597 CriticalSectionScoped crit_scoped(*crit_);
598 /*WEBRTC_TRACE(webrtc::kTraceModuleCall,
599 webrtc::kTraceAudioProcessing,
600 id_,
601 "ChangeUniqueId(new id = %d)",
602 id);*/
603 id_ = id;
604
605 return kNoError;
606}
ajm@google.com808e0e02011-08-03 21:08:51 +0000607
608int AudioProcessingImpl::WriteMessageToDebugFile() {
609 int32_t size = event_msg_->ByteSize();
610 if (size <= 0) {
611 return kUnspecifiedError;
612 }
613#if defined(WEBRTC_BIG_ENDIAN)
614 // TODO(ajm): Use little-endian "on the wire". For the moment, we can be
615 // pretty safe in assuming little-endian.
616#endif
617
618 if (!event_msg_->SerializeToString(&event_str_)) {
619 return kUnspecifiedError;
620 }
621
622 // Write message preceded by its size.
623 if (!debug_file_->Write(&size, sizeof(int32_t))) {
624 return kFileError;
625 }
626 if (!debug_file_->Write(event_str_.data(), event_str_.length())) {
627 return kFileError;
628 }
629
630 event_msg_->Clear();
631
632 return 0;
633}
634
635int AudioProcessingImpl::WriteInitMessage() {
636 event_msg_->set_type(audioproc::Event::INIT);
637 audioproc::Init* msg = event_msg_->mutable_init();
638 msg->set_sample_rate(sample_rate_hz_);
639 msg->set_device_sample_rate(echo_cancellation_->device_sample_rate_hz());
640 msg->set_num_input_channels(num_input_channels_);
641 msg->set_num_output_channels(num_output_channels_);
642 msg->set_num_reverse_channels(num_reverse_channels_);
643
644 int err = WriteMessageToDebugFile();
645 if (err != kNoError) {
646 return err;
647 }
648
649 return kNoError;
650}
niklase@google.com470e71d2011-07-07 08:21:25 +0000651} // namespace webrtc