blob: e19cfec9735179e9ca0dff824ac43c2af3eb8529 [file] [log] [blame]
niklase@google.com470e71d2011-07-07 08:21:25 +00001/*
andrew@webrtc.org40654032012-01-30 20:51:15 +00002 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
niklase@google.com470e71d2011-07-07 08:21:25 +00003 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000011#include "webrtc/modules/audio_processing/audio_processing_impl.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000012
ajm@google.com808e0e02011-08-03 21:08:51 +000013#include <assert.h>
niklase@google.com470e71d2011-07-07 08:21:25 +000014
andrew@webrtc.org17e40642014-03-04 20:58:13 +000015#include "webrtc/common_audio/include/audio_util.h"
andrew@webrtc.org60730cf2014-01-07 17:45:09 +000016#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000017#include "webrtc/modules/audio_processing/audio_buffer.h"
andrew@webrtc.org56e4a052014-02-27 22:23:17 +000018#include "webrtc/modules/audio_processing/echo_cancellation_impl.h"
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000019#include "webrtc/modules/audio_processing/echo_control_mobile_impl.h"
20#include "webrtc/modules/audio_processing/gain_control_impl.h"
21#include "webrtc/modules/audio_processing/high_pass_filter_impl.h"
22#include "webrtc/modules/audio_processing/level_estimator_impl.h"
23#include "webrtc/modules/audio_processing/noise_suppression_impl.h"
24#include "webrtc/modules/audio_processing/processing_component.h"
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000025#include "webrtc/modules/audio_processing/voice_detection_impl.h"
26#include "webrtc/modules/interface/module_common_types.h"
andrew@webrtc.org60730cf2014-01-07 17:45:09 +000027#include "webrtc/system_wrappers/interface/compile_assert.h"
andrew@webrtc.org78693fe2013-03-01 16:36:19 +000028#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
29#include "webrtc/system_wrappers/interface/file_wrapper.h"
30#include "webrtc/system_wrappers/interface/logging.h"
andrew@webrtc.org7bf26462011-12-03 00:03:31 +000031
32#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
33// Files generated at build-time by the protobuf compiler.
leozwang@webrtc.orga3736342012-03-16 21:36:00 +000034#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
leozwang@webrtc.org534e4952012-10-22 21:21:52 +000035#include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h"
leozwang@google.comce9bfbb2011-08-03 23:34:31 +000036#else
ajm@google.com808e0e02011-08-03 21:08:51 +000037#include "webrtc/audio_processing/debug.pb.h"
leozwang@google.comce9bfbb2011-08-03 23:34:31 +000038#endif
andrew@webrtc.org7bf26462011-12-03 00:03:31 +000039#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +000040
andrew@webrtc.org60730cf2014-01-07 17:45:09 +000041#define RETURN_ON_ERR(expr) \
42 do { \
43 int err = expr; \
44 if (err != kNoError) { \
45 return err; \
46 } \
47 } while (0)
48
niklase@google.com470e71d2011-07-07 08:21:25 +000049namespace webrtc {
andrew@webrtc.org17e40642014-03-04 20:58:13 +000050namespace {
51
52const int kChunkSizeMs = 10;
53
54int ChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
55 switch (layout) {
56 case AudioProcessing::kMono:
57 case AudioProcessing::kMonoAndKeyboard:
58 return 1;
59 case AudioProcessing::kStereo:
60 case AudioProcessing::kStereoAndKeyboard:
61 return 2;
62 }
63 assert(false);
64 return -1;
65}
66
67} // namespace
andrew@webrtc.org60730cf2014-01-07 17:45:09 +000068
69// Throughout webrtc, it's assumed that success is represented by zero.
70COMPILE_ASSERT(AudioProcessing::kNoError == 0, no_error_must_be_zero);
71
niklase@google.com470e71d2011-07-07 08:21:25 +000072AudioProcessing* AudioProcessing::Create(int id) {
andrew@webrtc.orge84978f2014-01-25 02:09:06 +000073 return Create();
74}
75
76AudioProcessing* AudioProcessing::Create() {
77 Config config;
78 return Create(config);
79}
80
81AudioProcessing* AudioProcessing::Create(const Config& config) {
82 AudioProcessingImpl* apm = new AudioProcessingImpl(config);
niklase@google.com470e71d2011-07-07 08:21:25 +000083 if (apm->Initialize() != kNoError) {
84 delete apm;
85 apm = NULL;
86 }
87
88 return apm;
89}
90
andrew@webrtc.orge84978f2014-01-25 02:09:06 +000091AudioProcessingImpl::AudioProcessingImpl(const Config& config)
andrew@webrtc.org60730cf2014-01-07 17:45:09 +000092 : echo_cancellation_(NULL),
niklase@google.com470e71d2011-07-07 08:21:25 +000093 echo_control_mobile_(NULL),
94 gain_control_(NULL),
95 high_pass_filter_(NULL),
96 level_estimator_(NULL),
97 noise_suppression_(NULL),
98 voice_detection_(NULL),
niklase@google.com470e71d2011-07-07 08:21:25 +000099 crit_(CriticalSectionWrapper::CreateCriticalSection()),
100 render_audio_(NULL),
101 capture_audio_(NULL),
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000102#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
103 debug_file_(FileWrapper::Create()),
104 event_msg_(new audioproc::Event()),
105#endif
niklase@google.com470e71d2011-07-07 08:21:25 +0000106 sample_rate_hz_(kSampleRate16kHz),
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000107 reverse_sample_rate_hz_(kSampleRate16kHz),
niklase@google.com470e71d2011-07-07 08:21:25 +0000108 split_sample_rate_hz_(kSampleRate16kHz),
andrew@webrtc.org60730cf2014-01-07 17:45:09 +0000109 samples_per_channel_(kChunkSizeMs * sample_rate_hz_ / 1000),
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000110 reverse_samples_per_channel_(
111 kChunkSizeMs * reverse_sample_rate_hz_ / 1000),
niklase@google.com470e71d2011-07-07 08:21:25 +0000112 stream_delay_ms_(0),
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000113 delay_offset_ms_(0),
niklase@google.com470e71d2011-07-07 08:21:25 +0000114 was_stream_delay_set_(false),
ajm@google.com808e0e02011-08-03 21:08:51 +0000115 num_reverse_channels_(1),
116 num_input_channels_(1),
andrew@webrtc.org07b59502014-02-12 16:41:13 +0000117 num_output_channels_(1),
andrew@webrtc.org38bf2492014-02-13 17:43:44 +0000118 output_will_be_muted_(false),
andrew@webrtc.org07b59502014-02-12 16:41:13 +0000119 key_pressed_(false) {
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000120 echo_cancellation_ = new EchoCancellationImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000121 component_list_.push_back(echo_cancellation_);
122
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000123 echo_control_mobile_ = new EchoControlMobileImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000124 component_list_.push_back(echo_control_mobile_);
125
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000126 gain_control_ = new GainControlImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000127 component_list_.push_back(gain_control_);
128
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000129 high_pass_filter_ = new HighPassFilterImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000130 component_list_.push_back(high_pass_filter_);
131
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000132 level_estimator_ = new LevelEstimatorImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000133 component_list_.push_back(level_estimator_);
134
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000135 noise_suppression_ = new NoiseSuppressionImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000136 component_list_.push_back(noise_suppression_);
137
andrew@webrtc.org56e4a052014-02-27 22:23:17 +0000138 voice_detection_ = new VoiceDetectionImpl(this, crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000139 component_list_.push_back(voice_detection_);
andrew@webrtc.orge84978f2014-01-25 02:09:06 +0000140
141 SetExtraOptions(config);
niklase@google.com470e71d2011-07-07 08:21:25 +0000142}
143
144AudioProcessingImpl::~AudioProcessingImpl() {
andrew@webrtc.org81865342012-10-27 00:28:27 +0000145 {
146 CriticalSectionScoped crit_scoped(crit_);
147 while (!component_list_.empty()) {
148 ProcessingComponent* component = component_list_.front();
149 component->Destroy();
150 delete component;
151 component_list_.pop_front();
152 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000153
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000154#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
andrew@webrtc.org81865342012-10-27 00:28:27 +0000155 if (debug_file_->Open()) {
156 debug_file_->CloseFile();
157 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000158#endif
ajm@google.com808e0e02011-08-03 21:08:51 +0000159
andrew@webrtc.org81865342012-10-27 00:28:27 +0000160 if (render_audio_) {
161 delete render_audio_;
162 render_audio_ = NULL;
163 }
164
165 if (capture_audio_) {
166 delete capture_audio_;
167 capture_audio_ = NULL;
168 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000169 }
170
andrew@webrtc.org16cfbe22012-08-29 16:58:25 +0000171 delete crit_;
172 crit_ = NULL;
niklase@google.com470e71d2011-07-07 08:21:25 +0000173}
174
niklase@google.com470e71d2011-07-07 08:21:25 +0000175int AudioProcessingImpl::split_sample_rate_hz() const {
176 return split_sample_rate_hz_;
177}
178
179int AudioProcessingImpl::Initialize() {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000180 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000181 return InitializeLocked();
182}
183
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000184int AudioProcessingImpl::Initialize(int sample_rate_hz,
185 int reverse_sample_rate_hz,
186 int num_input_channels,
187 int num_output_channels,
188 int num_reverse_channels) {
189 CriticalSectionScoped crit_scoped(crit_);
190 return InitializeLocked(sample_rate_hz,
191 reverse_sample_rate_hz,
192 num_input_channels,
193 num_output_channels,
194 num_reverse_channels);
195}
196
niklase@google.com470e71d2011-07-07 08:21:25 +0000197int AudioProcessingImpl::InitializeLocked() {
198 if (render_audio_ != NULL) {
199 delete render_audio_;
200 render_audio_ = NULL;
201 }
202
203 if (capture_audio_ != NULL) {
204 delete capture_audio_;
205 capture_audio_ = NULL;
206 }
207
ajm@google.com808e0e02011-08-03 21:08:51 +0000208 render_audio_ = new AudioBuffer(num_reverse_channels_,
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000209 reverse_samples_per_channel_);
ajm@google.com808e0e02011-08-03 21:08:51 +0000210 capture_audio_ = new AudioBuffer(num_input_channels_,
niklase@google.com470e71d2011-07-07 08:21:25 +0000211 samples_per_channel_);
212
niklase@google.com470e71d2011-07-07 08:21:25 +0000213 // Initialize all components.
214 std::list<ProcessingComponent*>::iterator it;
andrew@webrtc.org81865342012-10-27 00:28:27 +0000215 for (it = component_list_.begin(); it != component_list_.end(); ++it) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000216 int err = (*it)->Initialize();
217 if (err != kNoError) {
218 return err;
219 }
220 }
221
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000222#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
ajm@google.com808e0e02011-08-03 21:08:51 +0000223 if (debug_file_->Open()) {
224 int err = WriteInitMessage();
225 if (err != kNoError) {
226 return err;
227 }
228 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000229#endif
ajm@google.com808e0e02011-08-03 21:08:51 +0000230
niklase@google.com470e71d2011-07-07 08:21:25 +0000231 return kNoError;
232}
233
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000234int AudioProcessingImpl::InitializeLocked(int sample_rate_hz,
235 int reverse_sample_rate_hz,
236 int num_input_channels,
237 int num_output_channels,
238 int num_reverse_channels) {
239 if (sample_rate_hz != kSampleRate8kHz &&
240 sample_rate_hz != kSampleRate16kHz &&
241 sample_rate_hz != kSampleRate32kHz) {
242 return kBadSampleRateError;
243 }
244 if (reverse_sample_rate_hz != kSampleRate8kHz &&
245 reverse_sample_rate_hz != kSampleRate16kHz &&
246 reverse_sample_rate_hz != kSampleRate32kHz) {
247 return kBadSampleRateError;
248 }
249 // TODO(ajm): The reverse sample rate is constrained to be identical to the
250 // forward rate for now.
251 if (reverse_sample_rate_hz != sample_rate_hz) {
252 return kBadSampleRateError;
253 }
254 if (num_output_channels > num_input_channels) {
255 return kBadNumberChannelsError;
256 }
257 // Only mono and stereo supported currently.
258 if (num_input_channels > 2 || num_input_channels < 1 ||
259 num_output_channels > 2 || num_output_channels < 1 ||
260 num_reverse_channels > 2 || num_reverse_channels < 1) {
261 return kBadNumberChannelsError;
262 }
263 if (echo_control_mobile_->is_enabled() && sample_rate_hz > kSampleRate16kHz) {
264 LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates";
265 return kUnsupportedComponentError;
266 }
267
268 sample_rate_hz_ = sample_rate_hz;
269 reverse_sample_rate_hz_ = reverse_sample_rate_hz;
270 reverse_samples_per_channel_ = kChunkSizeMs * reverse_sample_rate_hz / 1000;
271 samples_per_channel_ = kChunkSizeMs * sample_rate_hz / 1000;
272 num_input_channels_ = num_input_channels;
273 num_output_channels_ = num_output_channels;
274 num_reverse_channels_ = num_reverse_channels;
275
276 if (sample_rate_hz_ == kSampleRate32kHz) {
277 split_sample_rate_hz_ = kSampleRate16kHz;
278 } else {
279 split_sample_rate_hz_ = sample_rate_hz_;
280 }
281
282 return InitializeLocked();
283}
284
285// Calls InitializeLocked() if any of the audio parameters have changed from
286// their current values.
287int AudioProcessingImpl::MaybeInitializeLocked(int sample_rate_hz,
288 int reverse_sample_rate_hz,
289 int num_input_channels,
290 int num_output_channels,
291 int num_reverse_channels) {
292 if (sample_rate_hz == sample_rate_hz_ &&
293 reverse_sample_rate_hz == reverse_sample_rate_hz_ &&
294 num_input_channels == num_input_channels_ &&
295 num_output_channels == num_output_channels_ &&
296 num_reverse_channels == num_reverse_channels_) {
297 return kNoError;
298 }
299
300 return InitializeLocked(sample_rate_hz,
301 reverse_sample_rate_hz,
302 num_input_channels,
303 num_output_channels,
304 num_reverse_channels);
305}
306
andrew@webrtc.org61e596f2013-07-25 18:28:29 +0000307void AudioProcessingImpl::SetExtraOptions(const Config& config) {
andrew@webrtc.orge84978f2014-01-25 02:09:06 +0000308 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org61e596f2013-07-25 18:28:29 +0000309 std::list<ProcessingComponent*>::iterator it;
310 for (it = component_list_.begin(); it != component_list_.end(); ++it)
311 (*it)->SetExtraOptions(config);
312}
313
aluebs@webrtc.org0b72f582013-11-19 15:17:51 +0000314int AudioProcessingImpl::EnableExperimentalNs(bool enable) {
315 return kNoError;
316}
317
niklase@google.com470e71d2011-07-07 08:21:25 +0000318int AudioProcessingImpl::set_sample_rate_hz(int rate) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000319 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org81865342012-10-27 00:28:27 +0000320 if (rate == sample_rate_hz_) {
321 return kNoError;
322 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000323 if (rate != kSampleRate8kHz &&
324 rate != kSampleRate16kHz &&
325 rate != kSampleRate32kHz) {
326 return kBadParameterError;
327 }
andrew@webrtc.org78693fe2013-03-01 16:36:19 +0000328 if (echo_control_mobile_->is_enabled() && rate > kSampleRate16kHz) {
329 LOG(LS_ERROR) << "AECM only supports 16 kHz or lower sample rates";
330 return kUnsupportedComponentError;
331 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000332
333 sample_rate_hz_ = rate;
334 samples_per_channel_ = rate / 100;
335
336 if (sample_rate_hz_ == kSampleRate32kHz) {
337 split_sample_rate_hz_ = kSampleRate16kHz;
338 } else {
339 split_sample_rate_hz_ = sample_rate_hz_;
340 }
341
342 return InitializeLocked();
343}
344
345int AudioProcessingImpl::sample_rate_hz() const {
henrika@webrtc.org19da7192013-04-05 14:34:57 +0000346 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000347 return sample_rate_hz_;
348}
349
350int AudioProcessingImpl::set_num_reverse_channels(int channels) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000351 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org81865342012-10-27 00:28:27 +0000352 if (channels == num_reverse_channels_) {
353 return kNoError;
354 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000355 // Only stereo supported currently.
356 if (channels > 2 || channels < 1) {
357 return kBadParameterError;
358 }
359
ajm@google.com808e0e02011-08-03 21:08:51 +0000360 num_reverse_channels_ = channels;
niklase@google.com470e71d2011-07-07 08:21:25 +0000361
362 return InitializeLocked();
363}
364
365int AudioProcessingImpl::num_reverse_channels() const {
ajm@google.com808e0e02011-08-03 21:08:51 +0000366 return num_reverse_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000367}
368
369int AudioProcessingImpl::set_num_channels(
370 int input_channels,
371 int output_channels) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000372 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org81865342012-10-27 00:28:27 +0000373 if (input_channels == num_input_channels_ &&
374 output_channels == num_output_channels_) {
375 return kNoError;
376 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000377 if (output_channels > input_channels) {
378 return kBadParameterError;
379 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000380 // Only stereo supported currently.
andrew@webrtc.org81865342012-10-27 00:28:27 +0000381 if (input_channels > 2 || input_channels < 1 ||
382 output_channels > 2 || output_channels < 1) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000383 return kBadParameterError;
384 }
385
ajm@google.com808e0e02011-08-03 21:08:51 +0000386 num_input_channels_ = input_channels;
387 num_output_channels_ = output_channels;
niklase@google.com470e71d2011-07-07 08:21:25 +0000388
389 return InitializeLocked();
390}
391
392int AudioProcessingImpl::num_input_channels() const {
ajm@google.com808e0e02011-08-03 21:08:51 +0000393 return num_input_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000394}
395
396int AudioProcessingImpl::num_output_channels() const {
ajm@google.com808e0e02011-08-03 21:08:51 +0000397 return num_output_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000398}
399
andrew@webrtc.org17342e52014-02-12 22:28:31 +0000400void AudioProcessingImpl::set_output_will_be_muted(bool muted) {
401 output_will_be_muted_ = muted;
402}
403
404bool AudioProcessingImpl::output_will_be_muted() const {
405 return output_will_be_muted_;
406}
407
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000408int AudioProcessingImpl::ProcessStream(float* const* data,
409 int samples_per_channel,
410 int sample_rate_hz,
411 ChannelLayout input_layout,
412 ChannelLayout output_layout) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000413 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000414 if (!data) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000415 return kNullPointerError;
416 }
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000417
418 const int num_input_channels = ChannelsFromLayout(input_layout);
andrew@webrtc.org60730cf2014-01-07 17:45:09 +0000419 // TODO(ajm): We now always set the output channels equal to the input
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000420 // channels here. Restore the ability to downmix.
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000421 // TODO(ajm): The reverse sample rate is constrained to be identical to the
422 // forward rate for now.
423 RETURN_ON_ERR(MaybeInitializeLocked(sample_rate_hz, sample_rate_hz,
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000424 num_input_channels, num_input_channels, num_reverse_channels_));
425 if (samples_per_channel != samples_per_channel_) {
426 return kBadDataLengthError;
427 }
428
429#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
430 if (debug_file_->Open()) {
431 event_msg_->set_type(audioproc::Event::STREAM);
432 audioproc::Stream* msg = event_msg_->mutable_stream();
433 const size_t channel_size = sizeof(float) * samples_per_channel;
434 for (int i = 0; i < num_input_channels; ++i)
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000435 msg->add_input_channel(data[i], channel_size);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000436 }
437#endif
438
439 capture_audio_->CopyFrom(data, samples_per_channel, num_output_channels_);
440 RETURN_ON_ERR(ProcessStreamLocked());
441 if (output_copy_needed(is_data_processed())) {
442 capture_audio_->CopyTo(samples_per_channel, num_output_channels_, data);
443 }
444
445#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
446 if (debug_file_->Open()) {
447 audioproc::Stream* msg = event_msg_->mutable_stream();
448 const size_t channel_size = sizeof(float) * samples_per_channel;
449 for (int i = 0; i < num_output_channels_; ++i)
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000450 msg->add_output_channel(data[i], channel_size);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000451 RETURN_ON_ERR(WriteMessageToDebugFile());
452 }
453#endif
454
455 return kNoError;
456}
457
458int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
459 CriticalSectionScoped crit_scoped(crit_);
460 if (!frame) {
461 return kNullPointerError;
462 }
463
464 // TODO(ajm): We now always set the output channels equal to the input
465 // channels here. Restore the ability to downmix.
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000466 // TODO(ajm): The reverse sample rate is constrained to be identical to the
467 // forward rate for now.
andrew@webrtc.org60730cf2014-01-07 17:45:09 +0000468 RETURN_ON_ERR(MaybeInitializeLocked(frame->sample_rate_hz_,
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000469 frame->sample_rate_hz_, frame->num_channels_, frame->num_channels_,
470 num_reverse_channels_));
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000471 if (frame->samples_per_channel_ != samples_per_channel_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000472 return kBadDataLengthError;
473 }
474
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000475#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000476 if (debug_file_->Open()) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000477 event_msg_->set_type(audioproc::Event::STREAM);
478 audioproc::Stream* msg = event_msg_->mutable_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000479 const size_t data_size = sizeof(int16_t) *
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000480 frame->samples_per_channel_ *
481 frame->num_channels_;
482 msg->set_input_data(frame->data_, data_size);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000483 }
484#endif
485
486 capture_audio_->DeinterleaveFrom(frame);
487 if (num_output_channels_ < num_input_channels_) {
488 capture_audio_->Mix(num_output_channels_);
489 frame->num_channels_ = num_output_channels_;
490 }
491 RETURN_ON_ERR(ProcessStreamLocked());
492 capture_audio_->InterleaveTo(frame, output_copy_needed(is_data_processed()));
493
494#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
495 if (debug_file_->Open()) {
496 audioproc::Stream* msg = event_msg_->mutable_stream();
497 const size_t data_size = sizeof(int16_t) *
498 frame->samples_per_channel_ *
499 frame->num_channels_;
500 msg->set_output_data(frame->data_, data_size);
501 RETURN_ON_ERR(WriteMessageToDebugFile());
502 }
503#endif
504
505 return kNoError;
506}
507
508
509int AudioProcessingImpl::ProcessStreamLocked() {
510#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
511 if (debug_file_->Open()) {
512 audioproc::Stream* msg = event_msg_->mutable_stream();
ajm@google.com808e0e02011-08-03 21:08:51 +0000513 msg->set_delay(stream_delay_ms_);
514 msg->set_drift(echo_cancellation_->stream_drift_samples());
515 msg->set_level(gain_control_->stream_analog_level());
andrew@webrtc.orgce8e0772014-02-12 15:28:30 +0000516 msg->set_keypress(key_pressed_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000517 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000518#endif
niklase@google.com470e71d2011-07-07 08:21:25 +0000519
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000520 bool data_processed = is_data_processed();
521 if (analysis_needed(data_processed)) {
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000522 for (int i = 0; i < num_output_channels_; i++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000523 // Split into a low and high band.
andrew@webrtc.org60730cf2014-01-07 17:45:09 +0000524 WebRtcSpl_AnalysisQMF(capture_audio_->data(i),
525 capture_audio_->samples_per_channel(),
526 capture_audio_->low_pass_split_data(i),
527 capture_audio_->high_pass_split_data(i),
528 capture_audio_->analysis_filter_state1(i),
529 capture_audio_->analysis_filter_state2(i));
niklase@google.com470e71d2011-07-07 08:21:25 +0000530 }
531 }
532
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000533 RETURN_ON_ERR(high_pass_filter_->ProcessCaptureAudio(capture_audio_));
534 RETURN_ON_ERR(gain_control_->AnalyzeCaptureAudio(capture_audio_));
535 RETURN_ON_ERR(echo_cancellation_->ProcessCaptureAudio(capture_audio_));
niklase@google.com470e71d2011-07-07 08:21:25 +0000536
537 if (echo_control_mobile_->is_enabled() &&
538 noise_suppression_->is_enabled()) {
539 capture_audio_->CopyLowPassToReference();
540 }
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000541 RETURN_ON_ERR(noise_suppression_->ProcessCaptureAudio(capture_audio_));
542 RETURN_ON_ERR(echo_control_mobile_->ProcessCaptureAudio(capture_audio_));
543 RETURN_ON_ERR(voice_detection_->ProcessCaptureAudio(capture_audio_));
544 RETURN_ON_ERR(gain_control_->ProcessCaptureAudio(capture_audio_));
niklase@google.com470e71d2011-07-07 08:21:25 +0000545
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000546 if (synthesis_needed(data_processed)) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000547 for (int i = 0; i < num_output_channels_; i++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000548 // Recombine low and high bands.
andrew@webrtc.org60730cf2014-01-07 17:45:09 +0000549 WebRtcSpl_SynthesisQMF(capture_audio_->low_pass_split_data(i),
550 capture_audio_->high_pass_split_data(i),
551 capture_audio_->samples_per_split_channel(),
552 capture_audio_->data(i),
553 capture_audio_->synthesis_filter_state1(i),
554 capture_audio_->synthesis_filter_state2(i));
niklase@google.com470e71d2011-07-07 08:21:25 +0000555 }
556 }
557
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000558 // The level estimator operates on the recombined data.
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000559 RETURN_ON_ERR(level_estimator_->ProcessStream(capture_audio_));
ajm@google.com808e0e02011-08-03 21:08:51 +0000560
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000561 was_stream_delay_set_ = false;
niklase@google.com470e71d2011-07-07 08:21:25 +0000562 return kNoError;
563}
564
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000565int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data,
566 int samples_per_channel,
567 int sample_rate_hz,
568 ChannelLayout layout) {
569 CriticalSectionScoped crit_scoped(crit_);
570 if (data == NULL) {
571 return kNullPointerError;
572 }
573 if (sample_rate_hz != sample_rate_hz_) {
574 return kBadSampleRateError;
575 }
576
577 const int num_channels = ChannelsFromLayout(layout);
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000578 // TODO(ajm): The reverse sample rate is constrained to be identical to the
579 // forward rate for now.
580 RETURN_ON_ERR(MaybeInitializeLocked(sample_rate_hz_, sample_rate_hz_,
581 num_input_channels_, num_output_channels_, num_channels));
582 if (samples_per_channel != reverse_samples_per_channel_) {
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000583 return kBadDataLengthError;
584 }
585
586#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
587 if (debug_file_->Open()) {
588 event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
589 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
590 const size_t channel_size = sizeof(float) * samples_per_channel;
591 for (int i = 0; i < num_channels; ++i)
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000592 msg->add_channel(data[i], channel_size);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000593 RETURN_ON_ERR(WriteMessageToDebugFile());
594 }
595#endif
596
597 render_audio_->CopyFrom(data, samples_per_channel, num_channels);
598 return AnalyzeReverseStreamLocked();
599}
600
niklase@google.com470e71d2011-07-07 08:21:25 +0000601int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000602 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000603 if (frame == NULL) {
604 return kNullPointerError;
605 }
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000606 if (frame->sample_rate_hz_ != sample_rate_hz_) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000607 return kBadSampleRateError;
608 }
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000609
610 // TODO(ajm): The reverse sample rate is constrained to be identical to the
611 // forward rate for now.
612 RETURN_ON_ERR(MaybeInitializeLocked(sample_rate_hz_, sample_rate_hz_,
613 num_input_channels_, num_output_channels_, frame->num_channels_));
614 if (frame->samples_per_channel_ != reverse_samples_per_channel_) {
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000615 return kBadDataLengthError;
616 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000617
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000618#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000619 if (debug_file_->Open()) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000620 event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
621 audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000622 const size_t data_size = sizeof(int16_t) *
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000623 frame->samples_per_channel_ *
624 frame->num_channels_;
625 msg->set_data(frame->data_, data_size);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000626 RETURN_ON_ERR(WriteMessageToDebugFile());
niklase@google.com470e71d2011-07-07 08:21:25 +0000627 }
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000628#endif
niklase@google.com470e71d2011-07-07 08:21:25 +0000629
630 render_audio_->DeinterleaveFrom(frame);
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000631 return AnalyzeReverseStreamLocked();
632}
niklase@google.com470e71d2011-07-07 08:21:25 +0000633
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000634// TODO(ajm): Have AnalyzeReverseStream accept sample rates not matching the
635// primary stream and convert ourselves rather than having the user manage it.
636// We can be smarter and use the splitting filter when appropriate. Similarly,
637// perform downmixing here.
638int AudioProcessingImpl::AnalyzeReverseStreamLocked() {
niklase@google.com470e71d2011-07-07 08:21:25 +0000639 if (sample_rate_hz_ == kSampleRate32kHz) {
ajm@google.com808e0e02011-08-03 21:08:51 +0000640 for (int i = 0; i < num_reverse_channels_; i++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000641 // Split into low and high band.
andrew@webrtc.org60730cf2014-01-07 17:45:09 +0000642 WebRtcSpl_AnalysisQMF(render_audio_->data(i),
643 render_audio_->samples_per_channel(),
644 render_audio_->low_pass_split_data(i),
645 render_audio_->high_pass_split_data(i),
646 render_audio_->analysis_filter_state1(i),
647 render_audio_->analysis_filter_state2(i));
niklase@google.com470e71d2011-07-07 08:21:25 +0000648 }
649 }
650
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000651 RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(render_audio_));
652 RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(render_audio_));
653 RETURN_ON_ERR(gain_control_->ProcessRenderAudio(render_audio_));
niklase@google.com470e71d2011-07-07 08:21:25 +0000654
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000655 return kNoError;
niklase@google.com470e71d2011-07-07 08:21:25 +0000656}
657
658int AudioProcessingImpl::set_stream_delay_ms(int delay) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000659 Error retval = kNoError;
niklase@google.com470e71d2011-07-07 08:21:25 +0000660 was_stream_delay_set_ = true;
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000661 delay += delay_offset_ms_;
662
niklase@google.com470e71d2011-07-07 08:21:25 +0000663 if (delay < 0) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000664 delay = 0;
665 retval = kBadStreamParameterWarning;
niklase@google.com470e71d2011-07-07 08:21:25 +0000666 }
667
668 // TODO(ajm): the max is rather arbitrarily chosen; investigate.
669 if (delay > 500) {
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000670 delay = 500;
671 retval = kBadStreamParameterWarning;
niklase@google.com470e71d2011-07-07 08:21:25 +0000672 }
673
674 stream_delay_ms_ = delay;
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000675 return retval;
niklase@google.com470e71d2011-07-07 08:21:25 +0000676}
677
678int AudioProcessingImpl::stream_delay_ms() const {
679 return stream_delay_ms_;
680}
681
682bool AudioProcessingImpl::was_stream_delay_set() const {
683 return was_stream_delay_set_;
684}
685
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000686void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) {
687 key_pressed_ = key_pressed;
688}
689
690bool AudioProcessingImpl::stream_key_pressed() const {
691 return key_pressed_;
692}
693
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000694void AudioProcessingImpl::set_delay_offset_ms(int offset) {
695 CriticalSectionScoped crit_scoped(crit_);
696 delay_offset_ms_ = offset;
697}
698
699int AudioProcessingImpl::delay_offset_ms() const {
700 return delay_offset_ms_;
701}
702
niklase@google.com470e71d2011-07-07 08:21:25 +0000703int AudioProcessingImpl::StartDebugRecording(
704 const char filename[AudioProcessing::kMaxFilenameSize]) {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000705 CriticalSectionScoped crit_scoped(crit_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000706 assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize);
707
708 if (filename == NULL) {
709 return kNullPointerError;
710 }
711
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000712#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000713 // Stop any ongoing recording.
714 if (debug_file_->Open()) {
715 if (debug_file_->CloseFile() == -1) {
716 return kFileError;
717 }
718 }
719
720 if (debug_file_->OpenFile(filename, false) == -1) {
721 debug_file_->CloseFile();
722 return kFileError;
723 }
724
ajm@google.com808e0e02011-08-03 21:08:51 +0000725 int err = WriteInitMessage();
726 if (err != kNoError) {
727 return err;
niklase@google.com470e71d2011-07-07 08:21:25 +0000728 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000729 return kNoError;
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000730#else
731 return kUnsupportedFunctionError;
732#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000733}
734
henrikg@webrtc.org863b5362013-12-06 16:05:17 +0000735int AudioProcessingImpl::StartDebugRecording(FILE* handle) {
736 CriticalSectionScoped crit_scoped(crit_);
737
738 if (handle == NULL) {
739 return kNullPointerError;
740 }
741
742#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
743 // Stop any ongoing recording.
744 if (debug_file_->Open()) {
745 if (debug_file_->CloseFile() == -1) {
746 return kFileError;
747 }
748 }
749
750 if (debug_file_->OpenFromFileHandle(handle, true, false) == -1) {
751 return kFileError;
752 }
753
754 int err = WriteInitMessage();
755 if (err != kNoError) {
756 return err;
757 }
758 return kNoError;
759#else
760 return kUnsupportedFunctionError;
761#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
762}
763
niklase@google.com470e71d2011-07-07 08:21:25 +0000764int AudioProcessingImpl::StopDebugRecording() {
andrew@webrtc.org40654032012-01-30 20:51:15 +0000765 CriticalSectionScoped crit_scoped(crit_);
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000766
767#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000768 // We just return if recording hasn't started.
769 if (debug_file_->Open()) {
770 if (debug_file_->CloseFile() == -1) {
771 return kFileError;
772 }
773 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000774 return kNoError;
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000775#else
776 return kUnsupportedFunctionError;
777#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000778}
779
780EchoCancellation* AudioProcessingImpl::echo_cancellation() const {
781 return echo_cancellation_;
782}
783
784EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const {
785 return echo_control_mobile_;
786}
787
788GainControl* AudioProcessingImpl::gain_control() const {
789 return gain_control_;
790}
791
792HighPassFilter* AudioProcessingImpl::high_pass_filter() const {
793 return high_pass_filter_;
794}
795
796LevelEstimator* AudioProcessingImpl::level_estimator() const {
797 return level_estimator_;
798}
799
800NoiseSuppression* AudioProcessingImpl::noise_suppression() const {
801 return noise_suppression_;
802}
803
804VoiceDetection* AudioProcessingImpl::voice_detection() const {
805 return voice_detection_;
806}
807
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000808bool AudioProcessingImpl::is_data_processed() const {
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000809 int enabled_count = 0;
810 std::list<ProcessingComponent*>::const_iterator it;
811 for (it = component_list_.begin(); it != component_list_.end(); it++) {
812 if ((*it)->is_component_enabled()) {
813 enabled_count++;
814 }
815 }
816
817 // Data is unchanged if no components are enabled, or if only level_estimator_
818 // or voice_detection_ is enabled.
819 if (enabled_count == 0) {
820 return false;
821 } else if (enabled_count == 1) {
822 if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) {
823 return false;
824 }
825 } else if (enabled_count == 2) {
826 if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) {
827 return false;
828 }
829 }
830 return true;
831}
832
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000833bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const {
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000834 // Check if we've upmixed or downmixed the audio.
835 return (num_output_channels_ != num_input_channels_ || is_data_processed);
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000836}
837
andrew@webrtc.org369166a2012-04-24 18:38:03 +0000838bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const {
839 return (is_data_processed && sample_rate_hz_ == kSampleRate32kHz);
840}
841
842bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const {
843 if (!is_data_processed && !voice_detection_->is_enabled()) {
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000844 // Only level_estimator_ is enabled.
845 return false;
846 } else if (sample_rate_hz_ == kSampleRate32kHz) {
847 // Something besides level_estimator_ is enabled, and we have super-wb.
848 return true;
849 }
850 return false;
851}
852
853#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
ajm@google.com808e0e02011-08-03 21:08:51 +0000854int AudioProcessingImpl::WriteMessageToDebugFile() {
855 int32_t size = event_msg_->ByteSize();
856 if (size <= 0) {
857 return kUnspecifiedError;
858 }
andrew@webrtc.org621df672013-10-22 10:27:23 +0000859#if defined(WEBRTC_ARCH_BIG_ENDIAN)
ajm@google.com808e0e02011-08-03 21:08:51 +0000860 // TODO(ajm): Use little-endian "on the wire". For the moment, we can be
861 // pretty safe in assuming little-endian.
862#endif
863
864 if (!event_msg_->SerializeToString(&event_str_)) {
865 return kUnspecifiedError;
866 }
867
868 // Write message preceded by its size.
869 if (!debug_file_->Write(&size, sizeof(int32_t))) {
870 return kFileError;
871 }
872 if (!debug_file_->Write(event_str_.data(), event_str_.length())) {
873 return kFileError;
874 }
875
876 event_msg_->Clear();
877
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000878 return kNoError;
ajm@google.com808e0e02011-08-03 21:08:51 +0000879}
880
881int AudioProcessingImpl::WriteInitMessage() {
882 event_msg_->set_type(audioproc::Event::INIT);
883 audioproc::Init* msg = event_msg_->mutable_init();
884 msg->set_sample_rate(sample_rate_hz_);
885 msg->set_device_sample_rate(echo_cancellation_->device_sample_rate_hz());
886 msg->set_num_input_channels(num_input_channels_);
887 msg->set_num_output_channels(num_output_channels_);
888 msg->set_num_reverse_channels(num_reverse_channels_);
andrew@webrtc.orga8b97372014-03-10 22:26:12 +0000889 msg->set_reverse_sample_rate(reverse_sample_rate_hz_);
ajm@google.com808e0e02011-08-03 21:08:51 +0000890
891 int err = WriteMessageToDebugFile();
892 if (err != kNoError) {
893 return err;
894 }
895
896 return kNoError;
897}
andrew@webrtc.org7bf26462011-12-03 00:03:31 +0000898#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
niklase@google.com470e71d2011-07-07 08:21:25 +0000899} // namespace webrtc