blob: 51340e959aacdcc842b9c55530ec7ad62b6e5f42 [file] [log] [blame]
niklase@google.com470e71d2011-07-07 08:21:25 +00001/*
andrew@webrtc.org293d22b2012-01-30 22:04:26 +00002 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
niklase@google.com470e71d2011-07-07 08:21:25 +00003 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
andrew@webrtc.org81865342012-10-27 00:28:27 +000011#include "audio_processing.h"
12
ajm@google.com59e41402011-07-28 17:34:04 +000013#include <stdio.h>
niklase@google.com470e71d2011-07-07 08:21:25 +000014
andrew@webrtc.org07bf9a02012-05-05 00:32:00 +000015#include <algorithm>
16
kjellander@webrtc.org61f07c32011-10-18 06:54:58 +000017#include "gtest/gtest.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000018
niklase@google.com470e71d2011-07-07 08:21:25 +000019#include "event_wrapper.h"
20#include "module_common_types.h"
andrew@webrtc.org3119ecf2011-11-01 17:00:18 +000021#include "scoped_ptr.h"
ajm@google.com59e41402011-07-28 17:34:04 +000022#include "signal_processing_library.h"
andrew@webrtc.org7a281a52012-06-27 03:22:37 +000023#include "test/testsupport/fileutils.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000024#include "thread_wrapper.h"
25#include "trace.h"
leozwang@webrtc.orga3736342012-03-16 21:36:00 +000026#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
leozwang@webrtc.org534e4952012-10-22 21:21:52 +000027#include "external/webrtc/webrtc/modules/audio_processing/test/unittest.pb.h"
leozwang@webrtc.orga3736342012-03-16 21:36:00 +000028#else
ajm@google.com808e0e02011-08-03 21:08:51 +000029#include "webrtc/audio_processing/unittest.pb.h"
leozwang@webrtc.orga3736342012-03-16 21:36:00 +000030#endif
niklase@google.com470e71d2011-07-07 08:21:25 +000031
andrew@webrtc.org293d22b2012-01-30 22:04:26 +000032#if (defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)) || \
33 (defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) && !defined(NDEBUG))
34# define WEBRTC_AUDIOPROC_BIT_EXACT
35#endif
36
niklase@google.com470e71d2011-07-07 08:21:25 +000037using webrtc::AudioProcessing;
38using webrtc::AudioFrame;
39using webrtc::GainControl;
40using webrtc::NoiseSuppression;
41using webrtc::EchoCancellation;
42using webrtc::EventWrapper;
andrew@webrtc.org3119ecf2011-11-01 17:00:18 +000043using webrtc::scoped_array;
niklase@google.com470e71d2011-07-07 08:21:25 +000044using webrtc::Trace;
45using webrtc::LevelEstimator;
46using webrtc::EchoCancellation;
47using webrtc::EchoControlMobile;
48using webrtc::VoiceDetection;
49
50namespace {
ajm@google.com59e41402011-07-28 17:34:04 +000051// When false, this will compare the output data with the results stored to
niklase@google.com470e71d2011-07-07 08:21:25 +000052// file. This is the typical case. When the file should be updated, it can
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +000053// be set to true with the command-line switch --write_ref_data.
54bool write_ref_data = false;
ajm@google.com59e41402011-07-28 17:34:04 +000055
andrew@webrtc.orgecac9b72012-05-02 00:04:10 +000056const int kSampleRates[] = {8000, 16000, 32000};
57const size_t kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
58const int kChannels[] = {1, 2};
59const size_t kChannelsSize = sizeof(kChannels) / sizeof(*kChannels);
60
andrew@webrtc.org07bf9a02012-05-05 00:32:00 +000061#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
62// AECM doesn't support super-wb.
63const int kProcessSampleRates[] = {8000, 16000};
64#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
65const int kProcessSampleRates[] = {8000, 16000, 32000};
66#endif
67const size_t kProcessSampleRatesSize = sizeof(kProcessSampleRates) /
68 sizeof(*kProcessSampleRates);
69
andrew@webrtc.org81865342012-10-27 00:28:27 +000070// TODO(andrew): Use the MonoToStereo routine from AudioFrameOperations.
71void MixStereoToMono(const int16_t* stereo,
72 int16_t* mono,
73 int samples_per_channel) {
74 for (int i = 0; i < samples_per_channel; i++) {
75 int32_t int32 = (static_cast<int32_t>(stereo[i * 2]) +
76 static_cast<int32_t>(stereo[i * 2 + 1])) >> 1;
77 mono[i] = static_cast<int16_t>(int32);
78 }
79}
80
81void CopyLeftToRightChannel(int16_t* stereo, int samples_per_channel) {
82 for (int i = 0; i < samples_per_channel; i++) {
83 stereo[i * 2 + 1] = stereo[i * 2];
84 }
85}
86
87void VerifyChannelsAreEqual(int16_t* stereo, int samples_per_channel) {
88 for (int i = 0; i < samples_per_channel; i++) {
89 EXPECT_EQ(stereo[i * 2 + 1], stereo[i * 2]);
90 }
91}
92
93void SetFrameTo(AudioFrame* frame, int16_t value) {
94 for (int i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
95 ++i) {
96 frame->data_[i] = value;
97 }
98}
99
100void SetFrameTo(AudioFrame* frame, int16_t left, int16_t right) {
101 ASSERT_EQ(2, frame->num_channels_);
102 for (int i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
103 frame->data_[i] = left;
104 frame->data_[i + 1] = right;
105 }
106}
107
108template <class T>
109T AbsValue(T a) {
110 return a > 0 ? a: -a;
111}
112
113int16_t MaxAudioFrame(const AudioFrame& frame) {
114 const int length = frame.samples_per_channel_ * frame.num_channels_;
115 int16_t max_data = AbsValue(frame.data_[0]);
116 for (int i = 1; i < length; i++) {
117 max_data = std::max(max_data, AbsValue(frame.data_[i]));
118 }
119
120 return max_data;
121}
122
123bool FrameDataAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) {
124 if (frame1.samples_per_channel_ !=
125 frame2.samples_per_channel_) {
126 return false;
127 }
128 if (frame1.num_channels_ !=
129 frame2.num_channels_) {
130 return false;
131 }
132 if (memcmp(frame1.data_, frame2.data_,
133 frame1.samples_per_channel_ * frame1.num_channels_ *
134 sizeof(int16_t))) {
135 return false;
136 }
137 return true;
138}
139
140void TestStats(const AudioProcessing::Statistic& test,
141 const webrtc::audioproc::Test::Statistic& reference) {
142 EXPECT_EQ(reference.instant(), test.instant);
143 EXPECT_EQ(reference.average(), test.average);
144 EXPECT_EQ(reference.maximum(), test.maximum);
145 EXPECT_EQ(reference.minimum(), test.minimum);
146}
147
148void WriteStatsMessage(const AudioProcessing::Statistic& output,
149 webrtc::audioproc::Test::Statistic* message) {
150 message->set_instant(output.instant);
151 message->set_average(output.average);
152 message->set_maximum(output.maximum);
153 message->set_minimum(output.minimum);
154}
155
156void WriteMessageLiteToFile(const std::string filename,
157 const ::google::protobuf::MessageLite& message) {
158 FILE* file = fopen(filename.c_str(), "wb");
159 ASSERT_TRUE(file != NULL) << "Could not open " << filename;
160 int size = message.ByteSize();
161 ASSERT_GT(size, 0);
162 unsigned char* array = new unsigned char[size];
163 ASSERT_TRUE(message.SerializeToArray(array, size));
164
165 ASSERT_EQ(1u, fwrite(&size, sizeof(int), 1, file));
166 ASSERT_EQ(static_cast<size_t>(size),
167 fwrite(array, sizeof(unsigned char), size, file));
168
169 delete [] array;
170 fclose(file);
171}
172
173void ReadMessageLiteFromFile(const std::string filename,
174 ::google::protobuf::MessageLite* message) {
175 assert(message != NULL);
176
177 FILE* file = fopen(filename.c_str(), "rb");
178 ASSERT_TRUE(file != NULL) << "Could not open " << filename;
179 int size = 0;
180 ASSERT_EQ(1u, fread(&size, sizeof(int), 1, file));
181 ASSERT_GT(size, 0);
182 unsigned char* array = new unsigned char[size];
183 ASSERT_EQ(static_cast<size_t>(size),
184 fread(array, sizeof(unsigned char), size, file));
185
186 ASSERT_TRUE(message->ParseFromArray(array, size));
187
188 delete [] array;
189 fclose(file);
190}
191
192struct ThreadData {
193 ThreadData(int thread_num_, AudioProcessing* ap_)
194 : thread_num(thread_num_),
195 error(false),
196 ap(ap_) {}
197 int thread_num;
198 bool error;
199 AudioProcessing* ap;
200};
201
niklase@google.com470e71d2011-07-07 08:21:25 +0000202class ApmTest : public ::testing::Test {
203 protected:
204 ApmTest();
205 virtual void SetUp();
206 virtual void TearDown();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000207
208 static void SetUpTestCase() {
209 Trace::CreateTrace();
210 std::string trace_filename = webrtc::test::OutputPath() +
andrew@webrtc.org81865342012-10-27 00:28:27 +0000211 "audioproc_trace.txt";
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000212 ASSERT_EQ(0, Trace::SetTraceFile(trace_filename.c_str()));
213 }
214
215 static void TearDownTestCase() {
216 Trace::ReturnTrace();
217 }
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000218
219 void Init(int sample_rate_hz, int num_reverse_channels,
220 int num_input_channels, int num_output_channels,
221 bool open_output_file);
222 std::string ResourceFilePath(std::string name, int sample_rate_hz);
223 std::string OutputFilePath(std::string name,
224 int sample_rate_hz,
225 int num_reverse_channels,
226 int num_input_channels,
227 int num_output_channels);
andrew@webrtc.org07bf9a02012-05-05 00:32:00 +0000228 void EnableAllComponents();
229 bool ReadFrame(FILE* file, AudioFrame* frame);
andrew@webrtc.org81865342012-10-27 00:28:27 +0000230 void ProcessWithDefaultStreamParameters(AudioFrame* frame);
231 template <typename F>
232 void ChangeTriggersInit(F f, AudioProcessing* ap, int initial_value,
233 int changed_value);
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000234
235 const std::string output_path_;
236 const std::string ref_path_;
237 const std::string ref_filename_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000238 webrtc::AudioProcessing* apm_;
239 webrtc::AudioFrame* frame_;
240 webrtc::AudioFrame* revframe_;
241 FILE* far_file_;
242 FILE* near_file_;
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000243 FILE* out_file_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000244};
245
246ApmTest::ApmTest()
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000247 : output_path_(webrtc::test::OutputPath()),
248 ref_path_(webrtc::test::ProjectRootPath() +
andrew@webrtc.org9dc45da2012-05-23 15:39:01 +0000249 "data/audio_processing/"),
andrew@webrtc.org293d22b2012-01-30 22:04:26 +0000250#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000251 ref_filename_(ref_path_ + "output_data_fixed.pb"),
andrew@webrtc.org293d22b2012-01-30 22:04:26 +0000252#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000253 ref_filename_(ref_path_ + "output_data_float.pb"),
kjellander@webrtc.org61f07c32011-10-18 06:54:58 +0000254#endif
255 apm_(NULL),
niklase@google.com470e71d2011-07-07 08:21:25 +0000256 frame_(NULL),
ajm@google.com22e65152011-07-18 18:03:01 +0000257 revframe_(NULL),
258 far_file_(NULL),
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000259 near_file_(NULL),
260 out_file_(NULL) {}
niklase@google.com470e71d2011-07-07 08:21:25 +0000261
262void ApmTest::SetUp() {
263 apm_ = AudioProcessing::Create(0);
264 ASSERT_TRUE(apm_ != NULL);
265
266 frame_ = new AudioFrame();
267 revframe_ = new AudioFrame();
268
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000269 Init(32000, 2, 2, 2, false);
niklase@google.com470e71d2011-07-07 08:21:25 +0000270}
271
272void ApmTest::TearDown() {
273 if (frame_) {
274 delete frame_;
275 }
276 frame_ = NULL;
277
278 if (revframe_) {
279 delete revframe_;
280 }
281 revframe_ = NULL;
282
283 if (far_file_) {
284 ASSERT_EQ(0, fclose(far_file_));
285 }
286 far_file_ = NULL;
287
288 if (near_file_) {
289 ASSERT_EQ(0, fclose(near_file_));
290 }
291 near_file_ = NULL;
292
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000293 if (out_file_) {
294 ASSERT_EQ(0, fclose(out_file_));
295 }
296 out_file_ = NULL;
297
niklase@google.com470e71d2011-07-07 08:21:25 +0000298 if (apm_ != NULL) {
299 AudioProcessing::Destroy(apm_);
300 }
301 apm_ = NULL;
302}
303
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000304std::string ApmTest::ResourceFilePath(std::string name, int sample_rate_hz) {
305 std::ostringstream ss;
306 // Resource files are all stereo.
307 ss << name << sample_rate_hz / 1000 << "_stereo";
308 return webrtc::test::ResourcePath(ss.str(), "pcm");
309}
310
311std::string ApmTest::OutputFilePath(std::string name,
312 int sample_rate_hz,
313 int num_reverse_channels,
314 int num_input_channels,
315 int num_output_channels) {
316 std::ostringstream ss;
317 ss << name << sample_rate_hz / 1000 << "_" << num_reverse_channels << "r" <<
318 num_input_channels << "i" << "_";
319 if (num_output_channels == 1) {
320 ss << "mono";
321 } else if (num_output_channels == 2) {
322 ss << "stereo";
323 } else {
324 assert(false);
325 return "";
326 }
327 ss << ".pcm";
328
329 return output_path_ + ss.str();
330}
331
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000332void ApmTest::Init(int sample_rate_hz, int num_reverse_channels,
333 int num_input_channels, int num_output_channels,
334 bool open_output_file) {
335 ASSERT_EQ(apm_->kNoError, apm_->Initialize());
336
337 // Handles error checking of the parameters as well. No need to repeat it.
338 ASSERT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(sample_rate_hz));
339 ASSERT_EQ(apm_->kNoError, apm_->set_num_channels(num_input_channels,
340 num_output_channels));
341 ASSERT_EQ(apm_->kNoError,
342 apm_->set_num_reverse_channels(num_reverse_channels));
343
344 // We always use 10 ms frames.
345 const int samples_per_channel = sample_rate_hz / 100;
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000346 frame_->samples_per_channel_ = samples_per_channel;
347 frame_->num_channels_ = num_input_channels;
348 frame_->sample_rate_hz_ = sample_rate_hz;
349 revframe_->samples_per_channel_ = samples_per_channel;
350 revframe_->num_channels_ = num_reverse_channels;
351 revframe_->sample_rate_hz_ = sample_rate_hz;
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000352
353 if (far_file_) {
354 ASSERT_EQ(0, fclose(far_file_));
355 }
356 std::string filename = ResourceFilePath("far", sample_rate_hz);
357 far_file_ = fopen(filename.c_str(), "rb");
358 ASSERT_TRUE(far_file_ != NULL) << "Could not open file " <<
359 filename << "\n";
360
361 if (near_file_) {
362 ASSERT_EQ(0, fclose(near_file_));
363 }
364 filename = ResourceFilePath("near", sample_rate_hz);
365 near_file_ = fopen(filename.c_str(), "rb");
366 ASSERT_TRUE(near_file_ != NULL) << "Could not open file " <<
367 filename << "\n";
368
369 if (open_output_file) {
370 if (out_file_) {
371 ASSERT_EQ(0, fclose(out_file_));
372 }
373 filename = OutputFilePath("out", sample_rate_hz, num_reverse_channels,
374 num_input_channels, num_output_channels);
375 out_file_ = fopen(filename.c_str(), "wb");
376 ASSERT_TRUE(out_file_ != NULL) << "Could not open file " <<
377 filename << "\n";
378 }
379}
380
andrew@webrtc.org07bf9a02012-05-05 00:32:00 +0000381void ApmTest::EnableAllComponents() {
382#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
383 EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
384 EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true));
385
386 EXPECT_EQ(apm_->kNoError,
387 apm_->gain_control()->set_mode(GainControl::kAdaptiveDigital));
388 EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
389#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
390 EXPECT_EQ(apm_->kNoError,
391 apm_->echo_cancellation()->enable_drift_compensation(true));
392 EXPECT_EQ(apm_->kNoError,
393 apm_->echo_cancellation()->enable_metrics(true));
394 EXPECT_EQ(apm_->kNoError,
395 apm_->echo_cancellation()->enable_delay_logging(true));
396 EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
397
398 EXPECT_EQ(apm_->kNoError,
399 apm_->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
400 EXPECT_EQ(apm_->kNoError,
401 apm_->gain_control()->set_analog_level_limits(0, 255));
402 EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
403#endif
404
405 EXPECT_EQ(apm_->kNoError,
406 apm_->high_pass_filter()->Enable(true));
407
408 EXPECT_EQ(apm_->kNoError,
409 apm_->level_estimator()->Enable(true));
410
411 EXPECT_EQ(apm_->kNoError,
412 apm_->noise_suppression()->Enable(true));
413
414 EXPECT_EQ(apm_->kNoError,
415 apm_->voice_detection()->Enable(true));
416}
417
418bool ApmTest::ReadFrame(FILE* file, AudioFrame* frame) {
419 // The files always contain stereo audio.
420 size_t frame_size = frame->samples_per_channel_ * 2;
421 size_t read_count = fread(frame->data_,
422 sizeof(int16_t),
423 frame_size,
424 file);
425 if (read_count != frame_size) {
426 // Check that the file really ended.
427 EXPECT_NE(0, feof(file));
428 return false; // This is expected.
429 }
430
431 if (frame->num_channels_ == 1) {
432 MixStereoToMono(frame->data_, frame->data_,
433 frame->samples_per_channel_);
434 }
435
436 return true;
ajm@google.coma769fa52011-07-13 21:57:58 +0000437}
438
andrew@webrtc.org81865342012-10-27 00:28:27 +0000439void ApmTest::ProcessWithDefaultStreamParameters(AudioFrame* frame) {
440 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
441 EXPECT_EQ(apm_->kNoError,
442 apm_->echo_cancellation()->set_stream_drift_samples(0));
443 EXPECT_EQ(apm_->kNoError,
444 apm_->gain_control()->set_stream_analog_level(127));
445 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame));
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000446}
447
andrew@webrtc.org81865342012-10-27 00:28:27 +0000448template <typename F>
449void ApmTest::ChangeTriggersInit(F f, AudioProcessing* ap, int initial_value,
450 int changed_value) {
451 EnableAllComponents();
452 Init(16000, 2, 2, 2, false);
453 SetFrameTo(frame_, 1000);
454 AudioFrame frame_copy = *frame_;
455 ProcessWithDefaultStreamParameters(frame_);
456 // Verify the processing has actually changed the frame.
457 EXPECT_FALSE(FrameDataAreEqual(*frame_, frame_copy));
458
459 // Test that a change in value triggers an init.
460 f(apm_, changed_value);
461 f(apm_, initial_value);
462 ProcessWithDefaultStreamParameters(&frame_copy);
463 EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
464
465 apm_->Initialize();
466 SetFrameTo(frame_, 1000);
467 AudioFrame initial_frame = *frame_;
468 ProcessWithDefaultStreamParameters(frame_);
469 ProcessWithDefaultStreamParameters(frame_);
470 // Verify the processing has actually changed the frame.
471 EXPECT_FALSE(FrameDataAreEqual(*frame_, initial_frame));
472
473 frame_copy = initial_frame;
474 apm_->Initialize();
475 ProcessWithDefaultStreamParameters(&frame_copy);
476 // Verify an init here would result in different output.
477 apm_->Initialize();
478 ProcessWithDefaultStreamParameters(&frame_copy);
479 EXPECT_FALSE(FrameDataAreEqual(*frame_, frame_copy));
480
481 frame_copy = initial_frame;
482 apm_->Initialize();
483 ProcessWithDefaultStreamParameters(&frame_copy);
484 // Test that the same value does not trigger an init.
485 f(apm_, initial_value);
486 ProcessWithDefaultStreamParameters(&frame_copy);
487 EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
andrew@webrtc.orgecac9b72012-05-02 00:04:10 +0000488}
489
niklase@google.com470e71d2011-07-07 08:21:25 +0000490TEST_F(ApmTest, StreamParameters) {
491 // No errors when the components are disabled.
492 EXPECT_EQ(apm_->kNoError,
493 apm_->ProcessStream(frame_));
494
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000495 // -- Missing AGC level --
niklase@google.com470e71d2011-07-07 08:21:25 +0000496 EXPECT_EQ(apm_->kNoError, apm_->Initialize());
497 EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000498 EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
niklase@google.com470e71d2011-07-07 08:21:25 +0000499
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000500 // Resets after successful ProcessStream().
niklase@google.com470e71d2011-07-07 08:21:25 +0000501 EXPECT_EQ(apm_->kNoError,
502 apm_->gain_control()->set_stream_analog_level(127));
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000503 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
504 EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
niklase@google.com470e71d2011-07-07 08:21:25 +0000505
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000506 // Other stream parameters set correctly.
507 EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
niklase@google.com470e71d2011-07-07 08:21:25 +0000508 EXPECT_EQ(apm_->kNoError,
509 apm_->echo_cancellation()->enable_drift_compensation(true));
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000510 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
511 EXPECT_EQ(apm_->kNoError,
512 apm_->echo_cancellation()->set_stream_drift_samples(0));
niklase@google.com470e71d2011-07-07 08:21:25 +0000513 EXPECT_EQ(apm_->kStreamParameterNotSetError,
514 apm_->ProcessStream(frame_));
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000515 EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
516 EXPECT_EQ(apm_->kNoError,
517 apm_->echo_cancellation()->enable_drift_compensation(false));
518
519 // -- Missing delay --
520 EXPECT_EQ(apm_->kNoError, apm_->Initialize());
521 EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
522 EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
523
524 // Resets after successful ProcessStream().
525 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
526 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
527 EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
528
529 // Other stream parameters set correctly.
530 EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
531 EXPECT_EQ(apm_->kNoError,
532 apm_->echo_cancellation()->enable_drift_compensation(true));
533 EXPECT_EQ(apm_->kNoError,
534 apm_->echo_cancellation()->set_stream_drift_samples(0));
535 EXPECT_EQ(apm_->kNoError,
536 apm_->gain_control()->set_stream_analog_level(127));
537 EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
538 EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
539
540 // -- Missing drift --
541 EXPECT_EQ(apm_->kNoError, apm_->Initialize());
542 EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
543
544 // Resets after successful ProcessStream().
545 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
546 EXPECT_EQ(apm_->kNoError,
547 apm_->echo_cancellation()->set_stream_drift_samples(0));
548 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
549 EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
550
551 // Other stream parameters set correctly.
niklase@google.com470e71d2011-07-07 08:21:25 +0000552 EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
553 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
554 EXPECT_EQ(apm_->kNoError,
555 apm_->gain_control()->set_stream_analog_level(127));
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000556 EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
niklase@google.com470e71d2011-07-07 08:21:25 +0000557
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000558 // -- No stream parameters --
niklase@google.com470e71d2011-07-07 08:21:25 +0000559 EXPECT_EQ(apm_->kNoError, apm_->Initialize());
560 EXPECT_EQ(apm_->kNoError,
561 apm_->AnalyzeReverseStream(revframe_));
562 EXPECT_EQ(apm_->kStreamParameterNotSetError,
563 apm_->ProcessStream(frame_));
564
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000565 // -- All there --
niklase@google.com470e71d2011-07-07 08:21:25 +0000566 EXPECT_EQ(apm_->kNoError, apm_->Initialize());
567 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
568 EXPECT_EQ(apm_->kNoError,
569 apm_->echo_cancellation()->set_stream_drift_samples(0));
570 EXPECT_EQ(apm_->kNoError,
571 apm_->gain_control()->set_stream_analog_level(127));
572 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
573}
574
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000575TEST_F(ApmTest, DefaultDelayOffsetIsZero) {
576 EXPECT_EQ(0, apm_->delay_offset_ms());
577 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(50));
578 EXPECT_EQ(50, apm_->stream_delay_ms());
579}
580
581TEST_F(ApmTest, DelayOffsetWithLimitsIsSetProperly) {
582 // High limit of 500 ms.
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000583 apm_->set_delay_offset_ms(100);
584 EXPECT_EQ(100, apm_->delay_offset_ms());
585 EXPECT_EQ(apm_->kBadStreamParameterWarning, apm_->set_stream_delay_ms(450));
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000586 EXPECT_EQ(500, apm_->stream_delay_ms());
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000587 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
588 EXPECT_EQ(200, apm_->stream_delay_ms());
589
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000590 // Low limit of 0 ms.
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000591 apm_->set_delay_offset_ms(-50);
592 EXPECT_EQ(-50, apm_->delay_offset_ms());
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000593 EXPECT_EQ(apm_->kBadStreamParameterWarning, apm_->set_stream_delay_ms(20));
594 EXPECT_EQ(0, apm_->stream_delay_ms());
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000595 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
596 EXPECT_EQ(50, apm_->stream_delay_ms());
597}
598
niklase@google.com470e71d2011-07-07 08:21:25 +0000599TEST_F(ApmTest, Channels) {
600 // Testing number of invalid channels
601 EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(0, 1));
602 EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(1, 0));
603 EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(3, 1));
604 EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(1, 3));
605 EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_reverse_channels(0));
606 EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_reverse_channels(3));
607 // Testing number of valid channels
608 for (int i = 1; i < 3; i++) {
609 for (int j = 1; j < 3; j++) {
610 if (j > i) {
611 EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(i, j));
612 } else {
613 EXPECT_EQ(apm_->kNoError, apm_->set_num_channels(i, j));
614 EXPECT_EQ(j, apm_->num_output_channels());
615 }
616 }
617 EXPECT_EQ(i, apm_->num_input_channels());
618 EXPECT_EQ(apm_->kNoError, apm_->set_num_reverse_channels(i));
619 EXPECT_EQ(i, apm_->num_reverse_channels());
620 }
621}
622
623TEST_F(ApmTest, SampleRates) {
624 // Testing invalid sample rates
625 EXPECT_EQ(apm_->kBadParameterError, apm_->set_sample_rate_hz(10000));
626 // Testing valid sample rates
627 int fs[] = {8000, 16000, 32000};
628 for (size_t i = 0; i < sizeof(fs) / sizeof(*fs); i++) {
629 EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(fs[i]));
630 EXPECT_EQ(fs[i], apm_->sample_rate_hz());
631 }
632}
633
andrew@webrtc.org81865342012-10-27 00:28:27 +0000634void SetSampleRate(AudioProcessing* ap, int value) {
635 EXPECT_EQ(ap->kNoError, ap->set_sample_rate_hz(value));
636}
637
638void SetNumReverseChannels(AudioProcessing* ap, int value) {
639 EXPECT_EQ(ap->kNoError, ap->set_num_reverse_channels(value));
640}
641
642void SetNumOutputChannels(AudioProcessing* ap, int value) {
643 EXPECT_EQ(ap->kNoError, ap->set_num_channels(2, value));
644}
645
646TEST_F(ApmTest, SampleRateChangeTriggersInit) {
647 ChangeTriggersInit(SetSampleRate, apm_, 16000, 8000);
648}
649
650TEST_F(ApmTest, ReverseChannelChangeTriggersInit) {
651 ChangeTriggersInit(SetNumReverseChannels, apm_, 2, 1);
652}
653
654TEST_F(ApmTest, ChannelChangeTriggersInit) {
655 ChangeTriggersInit(SetNumOutputChannels, apm_, 2, 1);
656}
niklase@google.com470e71d2011-07-07 08:21:25 +0000657
658TEST_F(ApmTest, EchoCancellation) {
659 EXPECT_EQ(apm_->kNoError,
660 apm_->echo_cancellation()->enable_drift_compensation(true));
661 EXPECT_TRUE(apm_->echo_cancellation()->is_drift_compensation_enabled());
662 EXPECT_EQ(apm_->kNoError,
663 apm_->echo_cancellation()->enable_drift_compensation(false));
664 EXPECT_FALSE(apm_->echo_cancellation()->is_drift_compensation_enabled());
665
666 EXPECT_EQ(apm_->kBadParameterError,
667 apm_->echo_cancellation()->set_device_sample_rate_hz(4000));
668 EXPECT_EQ(apm_->kBadParameterError,
669 apm_->echo_cancellation()->set_device_sample_rate_hz(100000));
670
671 int rate[] = {16000, 44100, 48000};
672 for (size_t i = 0; i < sizeof(rate)/sizeof(*rate); i++) {
673 EXPECT_EQ(apm_->kNoError,
674 apm_->echo_cancellation()->set_device_sample_rate_hz(rate[i]));
675 EXPECT_EQ(rate[i],
676 apm_->echo_cancellation()->device_sample_rate_hz());
677 }
678
niklase@google.com470e71d2011-07-07 08:21:25 +0000679 EchoCancellation::SuppressionLevel level[] = {
680 EchoCancellation::kLowSuppression,
681 EchoCancellation::kModerateSuppression,
682 EchoCancellation::kHighSuppression,
683 };
684 for (size_t i = 0; i < sizeof(level)/sizeof(*level); i++) {
685 EXPECT_EQ(apm_->kNoError,
686 apm_->echo_cancellation()->set_suppression_level(level[i]));
687 EXPECT_EQ(level[i],
688 apm_->echo_cancellation()->suppression_level());
689 }
690
691 EchoCancellation::Metrics metrics;
692 EXPECT_EQ(apm_->kNotEnabledError,
693 apm_->echo_cancellation()->GetMetrics(&metrics));
694
695 EXPECT_EQ(apm_->kNoError,
696 apm_->echo_cancellation()->enable_metrics(true));
697 EXPECT_TRUE(apm_->echo_cancellation()->are_metrics_enabled());
698 EXPECT_EQ(apm_->kNoError,
699 apm_->echo_cancellation()->enable_metrics(false));
700 EXPECT_FALSE(apm_->echo_cancellation()->are_metrics_enabled());
701
bjornv@google.com1ba3dbe2011-10-03 08:18:10 +0000702 int median = 0;
703 int std = 0;
704 EXPECT_EQ(apm_->kNotEnabledError,
705 apm_->echo_cancellation()->GetDelayMetrics(&median, &std));
706
707 EXPECT_EQ(apm_->kNoError,
708 apm_->echo_cancellation()->enable_delay_logging(true));
709 EXPECT_TRUE(apm_->echo_cancellation()->is_delay_logging_enabled());
710 EXPECT_EQ(apm_->kNoError,
711 apm_->echo_cancellation()->enable_delay_logging(false));
712 EXPECT_FALSE(apm_->echo_cancellation()->is_delay_logging_enabled());
713
niklase@google.com470e71d2011-07-07 08:21:25 +0000714 EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
715 EXPECT_TRUE(apm_->echo_cancellation()->is_enabled());
716 EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(false));
717 EXPECT_FALSE(apm_->echo_cancellation()->is_enabled());
718}
719
720TEST_F(ApmTest, EchoControlMobile) {
721 // AECM won't use super-wideband.
722 EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
723 EXPECT_EQ(apm_->kBadSampleRateError, apm_->echo_control_mobile()->Enable(true));
niklase@google.com470e71d2011-07-07 08:21:25 +0000724 // Turn AECM on (and AEC off)
andrew@webrtc.org75f19482012-02-09 17:16:18 +0000725 Init(16000, 2, 2, 2, false);
niklase@google.com470e71d2011-07-07 08:21:25 +0000726 EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true));
727 EXPECT_TRUE(apm_->echo_control_mobile()->is_enabled());
728
niklase@google.com470e71d2011-07-07 08:21:25 +0000729 // Toggle routing modes
730 EchoControlMobile::RoutingMode mode[] = {
731 EchoControlMobile::kQuietEarpieceOrHeadset,
732 EchoControlMobile::kEarpiece,
733 EchoControlMobile::kLoudEarpiece,
734 EchoControlMobile::kSpeakerphone,
735 EchoControlMobile::kLoudSpeakerphone,
736 };
737 for (size_t i = 0; i < sizeof(mode)/sizeof(*mode); i++) {
738 EXPECT_EQ(apm_->kNoError,
739 apm_->echo_control_mobile()->set_routing_mode(mode[i]));
740 EXPECT_EQ(mode[i],
741 apm_->echo_control_mobile()->routing_mode());
742 }
743 // Turn comfort noise off/on
744 EXPECT_EQ(apm_->kNoError,
745 apm_->echo_control_mobile()->enable_comfort_noise(false));
746 EXPECT_FALSE(apm_->echo_control_mobile()->is_comfort_noise_enabled());
747 EXPECT_EQ(apm_->kNoError,
748 apm_->echo_control_mobile()->enable_comfort_noise(true));
749 EXPECT_TRUE(apm_->echo_control_mobile()->is_comfort_noise_enabled());
bjornv@google.comc4b939c2011-07-13 08:09:56 +0000750 // Set and get echo path
ajm@google.com22e65152011-07-18 18:03:01 +0000751 const size_t echo_path_size =
752 apm_->echo_control_mobile()->echo_path_size_bytes();
andrew@webrtc.org3119ecf2011-11-01 17:00:18 +0000753 scoped_array<char> echo_path_in(new char[echo_path_size]);
754 scoped_array<char> echo_path_out(new char[echo_path_size]);
bjornv@google.comc4b939c2011-07-13 08:09:56 +0000755 EXPECT_EQ(apm_->kNullPointerError,
756 apm_->echo_control_mobile()->SetEchoPath(NULL, echo_path_size));
757 EXPECT_EQ(apm_->kNullPointerError,
758 apm_->echo_control_mobile()->GetEchoPath(NULL, echo_path_size));
759 EXPECT_EQ(apm_->kBadParameterError,
andrew@webrtc.org3119ecf2011-11-01 17:00:18 +0000760 apm_->echo_control_mobile()->GetEchoPath(echo_path_out.get(), 1));
bjornv@google.comc4b939c2011-07-13 08:09:56 +0000761 EXPECT_EQ(apm_->kNoError,
andrew@webrtc.org3119ecf2011-11-01 17:00:18 +0000762 apm_->echo_control_mobile()->GetEchoPath(echo_path_out.get(),
bjornv@google.comc4b939c2011-07-13 08:09:56 +0000763 echo_path_size));
ajm@google.com22e65152011-07-18 18:03:01 +0000764 for (size_t i = 0; i < echo_path_size; i++) {
bjornv@google.comc4b939c2011-07-13 08:09:56 +0000765 echo_path_in[i] = echo_path_out[i] + 1;
766 }
767 EXPECT_EQ(apm_->kBadParameterError,
andrew@webrtc.org3119ecf2011-11-01 17:00:18 +0000768 apm_->echo_control_mobile()->SetEchoPath(echo_path_in.get(), 1));
bjornv@google.comc4b939c2011-07-13 08:09:56 +0000769 EXPECT_EQ(apm_->kNoError,
andrew@webrtc.org3119ecf2011-11-01 17:00:18 +0000770 apm_->echo_control_mobile()->SetEchoPath(echo_path_in.get(),
771 echo_path_size));
bjornv@google.comc4b939c2011-07-13 08:09:56 +0000772 EXPECT_EQ(apm_->kNoError,
andrew@webrtc.org3119ecf2011-11-01 17:00:18 +0000773 apm_->echo_control_mobile()->GetEchoPath(echo_path_out.get(),
774 echo_path_size));
ajm@google.com22e65152011-07-18 18:03:01 +0000775 for (size_t i = 0; i < echo_path_size; i++) {
bjornv@google.comc4b939c2011-07-13 08:09:56 +0000776 EXPECT_EQ(echo_path_in[i], echo_path_out[i]);
777 }
andrew@webrtc.org75f19482012-02-09 17:16:18 +0000778
779 // Process a few frames with NS in the default disabled state. This exercises
780 // a different codepath than with it enabled.
781 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
782 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
783 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
784 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
785
niklase@google.com470e71d2011-07-07 08:21:25 +0000786 // Turn AECM off
787 EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(false));
788 EXPECT_FALSE(apm_->echo_control_mobile()->is_enabled());
789}
790
791TEST_F(ApmTest, GainControl) {
792 // Testing gain modes
niklase@google.com470e71d2011-07-07 08:21:25 +0000793 EXPECT_EQ(apm_->kNoError,
794 apm_->gain_control()->set_mode(
795 apm_->gain_control()->mode()));
796
797 GainControl::Mode mode[] = {
798 GainControl::kAdaptiveAnalog,
799 GainControl::kAdaptiveDigital,
800 GainControl::kFixedDigital
801 };
802 for (size_t i = 0; i < sizeof(mode)/sizeof(*mode); i++) {
803 EXPECT_EQ(apm_->kNoError,
804 apm_->gain_control()->set_mode(mode[i]));
805 EXPECT_EQ(mode[i], apm_->gain_control()->mode());
806 }
807 // Testing invalid target levels
808 EXPECT_EQ(apm_->kBadParameterError,
809 apm_->gain_control()->set_target_level_dbfs(-3));
810 EXPECT_EQ(apm_->kBadParameterError,
811 apm_->gain_control()->set_target_level_dbfs(-40));
812 // Testing valid target levels
813 EXPECT_EQ(apm_->kNoError,
814 apm_->gain_control()->set_target_level_dbfs(
815 apm_->gain_control()->target_level_dbfs()));
816
817 int level_dbfs[] = {0, 6, 31};
818 for (size_t i = 0; i < sizeof(level_dbfs)/sizeof(*level_dbfs); i++) {
819 EXPECT_EQ(apm_->kNoError,
820 apm_->gain_control()->set_target_level_dbfs(level_dbfs[i]));
821 EXPECT_EQ(level_dbfs[i], apm_->gain_control()->target_level_dbfs());
822 }
823
824 // Testing invalid compression gains
825 EXPECT_EQ(apm_->kBadParameterError,
826 apm_->gain_control()->set_compression_gain_db(-1));
827 EXPECT_EQ(apm_->kBadParameterError,
828 apm_->gain_control()->set_compression_gain_db(100));
829
830 // Testing valid compression gains
831 EXPECT_EQ(apm_->kNoError,
832 apm_->gain_control()->set_compression_gain_db(
833 apm_->gain_control()->compression_gain_db()));
834
835 int gain_db[] = {0, 10, 90};
836 for (size_t i = 0; i < sizeof(gain_db)/sizeof(*gain_db); i++) {
837 EXPECT_EQ(apm_->kNoError,
838 apm_->gain_control()->set_compression_gain_db(gain_db[i]));
839 EXPECT_EQ(gain_db[i], apm_->gain_control()->compression_gain_db());
840 }
841
842 // Testing limiter off/on
843 EXPECT_EQ(apm_->kNoError, apm_->gain_control()->enable_limiter(false));
844 EXPECT_FALSE(apm_->gain_control()->is_limiter_enabled());
845 EXPECT_EQ(apm_->kNoError, apm_->gain_control()->enable_limiter(true));
846 EXPECT_TRUE(apm_->gain_control()->is_limiter_enabled());
847
848 // Testing invalid level limits
849 EXPECT_EQ(apm_->kBadParameterError,
850 apm_->gain_control()->set_analog_level_limits(-1, 512));
851 EXPECT_EQ(apm_->kBadParameterError,
852 apm_->gain_control()->set_analog_level_limits(100000, 512));
853 EXPECT_EQ(apm_->kBadParameterError,
854 apm_->gain_control()->set_analog_level_limits(512, -1));
855 EXPECT_EQ(apm_->kBadParameterError,
856 apm_->gain_control()->set_analog_level_limits(512, 100000));
857 EXPECT_EQ(apm_->kBadParameterError,
858 apm_->gain_control()->set_analog_level_limits(512, 255));
859
860 // Testing valid level limits
861 EXPECT_EQ(apm_->kNoError,
862 apm_->gain_control()->set_analog_level_limits(
863 apm_->gain_control()->analog_level_minimum(),
864 apm_->gain_control()->analog_level_maximum()));
865
866 int min_level[] = {0, 255, 1024};
867 for (size_t i = 0; i < sizeof(min_level)/sizeof(*min_level); i++) {
868 EXPECT_EQ(apm_->kNoError,
869 apm_->gain_control()->set_analog_level_limits(min_level[i], 1024));
870 EXPECT_EQ(min_level[i], apm_->gain_control()->analog_level_minimum());
871 }
872
873 int max_level[] = {0, 1024, 65535};
874 for (size_t i = 0; i < sizeof(min_level)/sizeof(*min_level); i++) {
875 EXPECT_EQ(apm_->kNoError,
876 apm_->gain_control()->set_analog_level_limits(0, max_level[i]));
877 EXPECT_EQ(max_level[i], apm_->gain_control()->analog_level_maximum());
878 }
879
880 // TODO(ajm): stream_is_saturated() and stream_analog_level()
881
882 // Turn AGC off
883 EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
884 EXPECT_FALSE(apm_->gain_control()->is_enabled());
885}
886
887TEST_F(ApmTest, NoiseSuppression) {
andrew@webrtc.org648af742012-02-08 01:57:29 +0000888 // Test valid suppression levels.
niklase@google.com470e71d2011-07-07 08:21:25 +0000889 NoiseSuppression::Level level[] = {
890 NoiseSuppression::kLow,
891 NoiseSuppression::kModerate,
892 NoiseSuppression::kHigh,
893 NoiseSuppression::kVeryHigh
894 };
895 for (size_t i = 0; i < sizeof(level)/sizeof(*level); i++) {
896 EXPECT_EQ(apm_->kNoError,
897 apm_->noise_suppression()->set_level(level[i]));
898 EXPECT_EQ(level[i], apm_->noise_suppression()->level());
899 }
900
andrew@webrtc.org648af742012-02-08 01:57:29 +0000901 // Turn NS on/off
niklase@google.com470e71d2011-07-07 08:21:25 +0000902 EXPECT_EQ(apm_->kNoError, apm_->noise_suppression()->Enable(true));
903 EXPECT_TRUE(apm_->noise_suppression()->is_enabled());
904 EXPECT_EQ(apm_->kNoError, apm_->noise_suppression()->Enable(false));
905 EXPECT_FALSE(apm_->noise_suppression()->is_enabled());
906}
907
908TEST_F(ApmTest, HighPassFilter) {
andrew@webrtc.org648af742012-02-08 01:57:29 +0000909 // Turn HP filter on/off
niklase@google.com470e71d2011-07-07 08:21:25 +0000910 EXPECT_EQ(apm_->kNoError, apm_->high_pass_filter()->Enable(true));
911 EXPECT_TRUE(apm_->high_pass_filter()->is_enabled());
912 EXPECT_EQ(apm_->kNoError, apm_->high_pass_filter()->Enable(false));
913 EXPECT_FALSE(apm_->high_pass_filter()->is_enabled());
914}
915
916TEST_F(ApmTest, LevelEstimator) {
andrew@webrtc.org648af742012-02-08 01:57:29 +0000917 // Turn level estimator on/off
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000918 EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
niklase@google.com470e71d2011-07-07 08:21:25 +0000919 EXPECT_FALSE(apm_->level_estimator()->is_enabled());
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000920
921 EXPECT_EQ(apm_->kNotEnabledError, apm_->level_estimator()->RMS());
922
923 EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
924 EXPECT_TRUE(apm_->level_estimator()->is_enabled());
925
926 // Run this test in wideband; in super-wb, the splitting filter distorts the
927 // audio enough to cause deviation from the expectation for small values.
928 EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000929 frame_->samples_per_channel_ = 160;
930 frame_->num_channels_ = 2;
931 frame_->sample_rate_hz_ = 16000;
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000932
933 // Min value if no frames have been processed.
934 EXPECT_EQ(127, apm_->level_estimator()->RMS());
935
936 // Min value on zero frames.
937 SetFrameTo(frame_, 0);
938 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
939 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
940 EXPECT_EQ(127, apm_->level_estimator()->RMS());
941
942 // Try a few RMS values.
943 // (These also test that the value resets after retrieving it.)
944 SetFrameTo(frame_, 32767);
945 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
946 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
947 EXPECT_EQ(0, apm_->level_estimator()->RMS());
948
949 SetFrameTo(frame_, 30000);
950 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
951 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
952 EXPECT_EQ(1, apm_->level_estimator()->RMS());
953
954 SetFrameTo(frame_, 10000);
955 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
956 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
957 EXPECT_EQ(10, apm_->level_estimator()->RMS());
958
959 SetFrameTo(frame_, 10);
960 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
961 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
962 EXPECT_EQ(70, apm_->level_estimator()->RMS());
963
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000964 // Min value if energy_ == 0.
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000965 SetFrameTo(frame_, 10000);
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000966 uint32_t energy = frame_->energy_; // Save default to restore below.
967 frame_->energy_ = 0;
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000968 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
969 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
970 EXPECT_EQ(127, apm_->level_estimator()->RMS());
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000971 frame_->energy_ = energy;
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000972
973 // Verify reset after enable/disable.
974 SetFrameTo(frame_, 32767);
975 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
976 EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
977 EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
978 SetFrameTo(frame_, 1);
979 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
980 EXPECT_EQ(90, apm_->level_estimator()->RMS());
981
982 // Verify reset after initialize.
983 SetFrameTo(frame_, 32767);
984 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
985 EXPECT_EQ(apm_->kNoError, apm_->Initialize());
986 SetFrameTo(frame_, 1);
987 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
988 EXPECT_EQ(90, apm_->level_estimator()->RMS());
niklase@google.com470e71d2011-07-07 08:21:25 +0000989}
990
991TEST_F(ApmTest, VoiceDetection) {
992 // Test external VAD
993 EXPECT_EQ(apm_->kNoError,
994 apm_->voice_detection()->set_stream_has_voice(true));
995 EXPECT_TRUE(apm_->voice_detection()->stream_has_voice());
996 EXPECT_EQ(apm_->kNoError,
997 apm_->voice_detection()->set_stream_has_voice(false));
998 EXPECT_FALSE(apm_->voice_detection()->stream_has_voice());
999
andrew@webrtc.org648af742012-02-08 01:57:29 +00001000 // Test valid likelihoods
niklase@google.com470e71d2011-07-07 08:21:25 +00001001 VoiceDetection::Likelihood likelihood[] = {
1002 VoiceDetection::kVeryLowLikelihood,
1003 VoiceDetection::kLowLikelihood,
1004 VoiceDetection::kModerateLikelihood,
1005 VoiceDetection::kHighLikelihood
1006 };
1007 for (size_t i = 0; i < sizeof(likelihood)/sizeof(*likelihood); i++) {
1008 EXPECT_EQ(apm_->kNoError,
1009 apm_->voice_detection()->set_likelihood(likelihood[i]));
1010 EXPECT_EQ(likelihood[i], apm_->voice_detection()->likelihood());
1011 }
1012
1013 /* TODO(bjornv): Enable once VAD supports other frame lengths than 10 ms
andrew@webrtc.org648af742012-02-08 01:57:29 +00001014 // Test invalid frame sizes
niklase@google.com470e71d2011-07-07 08:21:25 +00001015 EXPECT_EQ(apm_->kBadParameterError,
1016 apm_->voice_detection()->set_frame_size_ms(12));
1017
andrew@webrtc.org648af742012-02-08 01:57:29 +00001018 // Test valid frame sizes
niklase@google.com470e71d2011-07-07 08:21:25 +00001019 for (int i = 10; i <= 30; i += 10) {
1020 EXPECT_EQ(apm_->kNoError,
1021 apm_->voice_detection()->set_frame_size_ms(i));
1022 EXPECT_EQ(i, apm_->voice_detection()->frame_size_ms());
1023 }
1024 */
1025
andrew@webrtc.org648af742012-02-08 01:57:29 +00001026 // Turn VAD on/off
niklase@google.com470e71d2011-07-07 08:21:25 +00001027 EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
1028 EXPECT_TRUE(apm_->voice_detection()->is_enabled());
1029 EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
1030 EXPECT_FALSE(apm_->voice_detection()->is_enabled());
1031
andrew@webrtc.orged083d42011-09-19 15:28:51 +00001032 // Test that AudioFrame activity is maintained when VAD is disabled.
1033 EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
1034 AudioFrame::VADActivity activity[] = {
1035 AudioFrame::kVadActive,
1036 AudioFrame::kVadPassive,
1037 AudioFrame::kVadUnknown
1038 };
1039 for (size_t i = 0; i < sizeof(activity)/sizeof(*activity); i++) {
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001040 frame_->vad_activity_ = activity[i];
andrew@webrtc.orged083d42011-09-19 15:28:51 +00001041 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001042 EXPECT_EQ(activity[i], frame_->vad_activity_);
andrew@webrtc.orged083d42011-09-19 15:28:51 +00001043 }
1044
1045 // Test that AudioFrame activity is set when VAD is enabled.
1046 EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001047 frame_->vad_activity_ = AudioFrame::kVadUnknown;
andrew@webrtc.orged083d42011-09-19 15:28:51 +00001048 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001049 EXPECT_NE(AudioFrame::kVadUnknown, frame_->vad_activity_);
andrew@webrtc.orged083d42011-09-19 15:28:51 +00001050
niklase@google.com470e71d2011-07-07 08:21:25 +00001051 // TODO(bjornv): Add tests for streamed voice; stream_has_voice()
1052}
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001053
andrew@webrtc.orgecac9b72012-05-02 00:04:10 +00001054TEST_F(ApmTest, VerifyDownMixing) {
1055 for (size_t i = 0; i < kSampleRatesSize; i++) {
1056 Init(kSampleRates[i], 2, 2, 1, false);
1057 SetFrameTo(frame_, 1000, 2000);
1058 AudioFrame mono_frame;
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001059 mono_frame.samples_per_channel_ = frame_->samples_per_channel_;
1060 mono_frame.num_channels_ = 1;
andrew@webrtc.orgecac9b72012-05-02 00:04:10 +00001061 SetFrameTo(&mono_frame, 1500);
1062 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1063 EXPECT_TRUE(FrameDataAreEqual(*frame_, mono_frame));
1064 }
1065}
1066
1067TEST_F(ApmTest, AllProcessingDisabledByDefault) {
1068 EXPECT_FALSE(apm_->echo_cancellation()->is_enabled());
1069 EXPECT_FALSE(apm_->echo_control_mobile()->is_enabled());
1070 EXPECT_FALSE(apm_->gain_control()->is_enabled());
1071 EXPECT_FALSE(apm_->high_pass_filter()->is_enabled());
1072 EXPECT_FALSE(apm_->level_estimator()->is_enabled());
1073 EXPECT_FALSE(apm_->noise_suppression()->is_enabled());
1074 EXPECT_FALSE(apm_->voice_detection()->is_enabled());
1075}
1076
1077TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabled) {
1078 for (size_t i = 0; i < kSampleRatesSize; i++) {
1079 Init(kSampleRates[i], 2, 2, 2, false);
1080 SetFrameTo(frame_, 1000, 2000);
1081 AudioFrame frame_copy = *frame_;
1082 for (int j = 0; j < 1000; j++) {
1083 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1084 EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
1085 }
1086 }
1087}
1088
andrew@webrtc.org07bf9a02012-05-05 00:32:00 +00001089TEST_F(ApmTest, IdenticalInputChannelsResultInIdenticalOutputChannels) {
1090 EnableAllComponents();
1091
1092 for (size_t i = 0; i < kProcessSampleRatesSize; i++) {
1093 Init(kProcessSampleRates[i], 2, 2, 2, false);
1094 int analog_level = 127;
1095 while (1) {
1096 if (!ReadFrame(far_file_, revframe_)) break;
1097 CopyLeftToRightChannel(revframe_->data_, revframe_->samples_per_channel_);
1098
1099 EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
1100
1101 if (!ReadFrame(near_file_, frame_)) break;
1102 CopyLeftToRightChannel(frame_->data_, frame_->samples_per_channel_);
1103 frame_->vad_activity_ = AudioFrame::kVadUnknown;
1104
1105 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
1106 EXPECT_EQ(apm_->kNoError,
1107 apm_->echo_cancellation()->set_stream_drift_samples(0));
1108 EXPECT_EQ(apm_->kNoError,
1109 apm_->gain_control()->set_stream_analog_level(analog_level));
1110 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1111 analog_level = apm_->gain_control()->stream_analog_level();
1112
1113 VerifyChannelsAreEqual(frame_->data_, frame_->samples_per_channel_);
1114 }
1115 }
1116}
1117
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001118TEST_F(ApmTest, SplittingFilter) {
1119 // Verify the filter is not active through undistorted audio when:
1120 // 1. No components are enabled...
1121 SetFrameTo(frame_, 1000);
1122 AudioFrame frame_copy = *frame_;
1123 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1124 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1125 EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
1126
1127 // 2. Only the level estimator is enabled...
1128 SetFrameTo(frame_, 1000);
1129 frame_copy = *frame_;
1130 EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
1131 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1132 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1133 EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
1134 EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
1135
1136 // 3. Only VAD is enabled...
1137 SetFrameTo(frame_, 1000);
1138 frame_copy = *frame_;
1139 EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
1140 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1141 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1142 EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
1143 EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
1144
1145 // 4. Both VAD and the level estimator are enabled...
1146 SetFrameTo(frame_, 1000);
1147 frame_copy = *frame_;
1148 EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
1149 EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
1150 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1151 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1152 EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
1153 EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
1154 EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
1155
1156 // 5. Not using super-wb.
1157 EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001158 frame_->samples_per_channel_ = 160;
1159 frame_->num_channels_ = 2;
1160 frame_->sample_rate_hz_ = 16000;
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001161 // Enable AEC, which would require the filter in super-wb. We rely on the
1162 // first few frames of data being unaffected by the AEC.
1163 // TODO(andrew): This test, and the one below, rely rather tenuously on the
1164 // behavior of the AEC. Think of something more robust.
1165 EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
1166 SetFrameTo(frame_, 1000);
1167 frame_copy = *frame_;
1168 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
1169 EXPECT_EQ(apm_->kNoError,
1170 apm_->echo_cancellation()->set_stream_drift_samples(0));
1171 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1172 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
1173 EXPECT_EQ(apm_->kNoError,
1174 apm_->echo_cancellation()->set_stream_drift_samples(0));
1175 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1176 EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
1177
1178 // Check the test is valid. We should have distortion from the filter
1179 // when AEC is enabled (which won't affect the audio).
1180 EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001181 frame_->samples_per_channel_ = 320;
1182 frame_->num_channels_ = 2;
1183 frame_->sample_rate_hz_ = 32000;
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001184 SetFrameTo(frame_, 1000);
1185 frame_copy = *frame_;
1186 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
1187 EXPECT_EQ(apm_->kNoError,
1188 apm_->echo_cancellation()->set_stream_drift_samples(0));
1189 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1190 EXPECT_FALSE(FrameDataAreEqual(*frame_, frame_copy));
1191}
1192
andrew@webrtc.org7bf26462011-12-03 00:03:31 +00001193// TODO(andrew): expand test to verify output.
1194TEST_F(ApmTest, DebugDump) {
1195 const std::string filename = webrtc::test::OutputPath() + "debug.aec";
1196 EXPECT_EQ(apm_->kNullPointerError, apm_->StartDebugRecording(NULL));
1197
1198#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
1199 // Stopping without having started should be OK.
1200 EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
1201
1202 EXPECT_EQ(apm_->kNoError, apm_->StartDebugRecording(filename.c_str()));
1203 EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
1204 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1205 EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
1206
1207 // Verify the file has been written.
andrew@webrtc.orgf5d8c3b2012-01-24 21:35:39 +00001208 FILE* fid = fopen(filename.c_str(), "r");
1209 ASSERT_TRUE(fid != NULL);
1210
andrew@webrtc.org7bf26462011-12-03 00:03:31 +00001211 // Clean it up.
andrew@webrtc.orgf5d8c3b2012-01-24 21:35:39 +00001212 ASSERT_EQ(0, fclose(fid));
andrew@webrtc.org7bf26462011-12-03 00:03:31 +00001213 ASSERT_EQ(0, remove(filename.c_str()));
1214#else
1215 EXPECT_EQ(apm_->kUnsupportedFunctionError,
1216 apm_->StartDebugRecording(filename.c_str()));
1217 EXPECT_EQ(apm_->kUnsupportedFunctionError, apm_->StopDebugRecording());
1218
1219 // Verify the file has NOT been written.
1220 ASSERT_TRUE(fopen(filename.c_str(), "r") == NULL);
1221#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
1222}
1223
andrew@webrtc.org75f19482012-02-09 17:16:18 +00001224// TODO(andrew): Add a test to process a few frames with different combinations
1225// of enabled components.
1226
andrew@webrtc.orge2ed5ba2012-01-20 19:06:38 +00001227// TODO(andrew): Make this test more robust such that it can be run on multiple
1228// platforms. It currently requires bit-exactness.
andrew@webrtc.org293d22b2012-01-30 22:04:26 +00001229#ifdef WEBRTC_AUDIOPROC_BIT_EXACT
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001230TEST_F(ApmTest, Process) {
1231 GOOGLE_PROTOBUF_VERIFY_VERSION;
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001232 webrtc::audioproc::OutputData ref_data;
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001233
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001234 if (!write_ref_data) {
1235 ReadMessageLiteFromFile(ref_filename_, &ref_data);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001236 } else {
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001237 // Write the desired tests to the protobuf reference file.
andrew@webrtc.orgecac9b72012-05-02 00:04:10 +00001238 for (size_t i = 0; i < kChannelsSize; i++) {
1239 for (size_t j = 0; j < kChannelsSize; j++) {
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001240 // We can't have more output than input channels.
1241 for (size_t k = 0; k <= j; k++) {
andrew@webrtc.orgecac9b72012-05-02 00:04:10 +00001242 for (size_t l = 0; l < kProcessSampleRatesSize; l++) {
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001243 webrtc::audioproc::Test* test = ref_data.add_test();
andrew@webrtc.orgecac9b72012-05-02 00:04:10 +00001244 test->set_num_reverse_channels(kChannels[i]);
1245 test->set_num_input_channels(kChannels[j]);
1246 test->set_num_output_channels(kChannels[k]);
1247 test->set_sample_rate(kProcessSampleRates[l]);
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001248 }
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001249 }
1250 }
1251 }
1252 }
1253
andrew@webrtc.org07bf9a02012-05-05 00:32:00 +00001254 EnableAllComponents();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001255
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001256 for (int i = 0; i < ref_data.test_size(); i++) {
1257 printf("Running test %d of %d...\n", i + 1, ref_data.test_size());
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001258
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001259 webrtc::audioproc::Test* test = ref_data.mutable_test(i);
1260 Init(test->sample_rate(), test->num_reverse_channels(),
1261 test->num_input_channels(), test->num_output_channels(), true);
1262
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001263 int frame_count = 0;
1264 int has_echo_count = 0;
1265 int has_voice_count = 0;
1266 int is_saturated_count = 0;
1267 int analog_level = 127;
1268 int analog_level_average = 0;
1269 int max_output_average = 0;
bjornv@webrtc.org08329f42012-07-12 21:00:43 +00001270 float ns_speech_prob_average = 0.0f;
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001271
1272 while (1) {
andrew@webrtc.org07bf9a02012-05-05 00:32:00 +00001273 if (!ReadFrame(far_file_, revframe_)) break;
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001274 EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
1275
andrew@webrtc.org07bf9a02012-05-05 00:32:00 +00001276 if (!ReadFrame(near_file_, frame_)) break;
1277 frame_->vad_activity_ = AudioFrame::kVadUnknown;
1278
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001279 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
1280 EXPECT_EQ(apm_->kNoError,
1281 apm_->echo_cancellation()->set_stream_drift_samples(0));
1282 EXPECT_EQ(apm_->kNoError,
1283 apm_->gain_control()->set_stream_analog_level(analog_level));
1284
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001285 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001286 // Ensure the frame was downmixed properly.
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001287 EXPECT_EQ(test->num_output_channels(), frame_->num_channels_);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001288
1289 max_output_average += MaxAudioFrame(*frame_);
1290
1291 if (apm_->echo_cancellation()->stream_has_echo()) {
1292 has_echo_count++;
1293 }
1294
1295 analog_level = apm_->gain_control()->stream_analog_level();
1296 analog_level_average += analog_level;
1297 if (apm_->gain_control()->stream_is_saturated()) {
1298 is_saturated_count++;
1299 }
1300 if (apm_->voice_detection()->stream_has_voice()) {
1301 has_voice_count++;
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001302 EXPECT_EQ(AudioFrame::kVadActive, frame_->vad_activity_);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001303 } else {
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001304 EXPECT_EQ(AudioFrame::kVadPassive, frame_->vad_activity_);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001305 }
1306
bjornv@webrtc.org08329f42012-07-12 21:00:43 +00001307 ns_speech_prob_average += apm_->noise_suppression()->speech_probability();
1308
andrew@webrtc.org07bf9a02012-05-05 00:32:00 +00001309 size_t frame_size = frame_->samples_per_channel_ * frame_->num_channels_;
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001310 size_t write_count = fwrite(frame_->data_,
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001311 sizeof(int16_t),
1312 frame_size,
1313 out_file_);
1314 ASSERT_EQ(frame_size, write_count);
1315
1316 // Reset in case of downmixing.
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001317 frame_->num_channels_ = test->num_input_channels();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001318 frame_count++;
1319 }
1320 max_output_average /= frame_count;
1321 analog_level_average /= frame_count;
bjornv@webrtc.org08329f42012-07-12 21:00:43 +00001322 ns_speech_prob_average /= frame_count;
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001323
andrew@webrtc.org293d22b2012-01-30 22:04:26 +00001324#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001325 EchoCancellation::Metrics echo_metrics;
1326 EXPECT_EQ(apm_->kNoError,
1327 apm_->echo_cancellation()->GetMetrics(&echo_metrics));
1328 int median = 0;
1329 int std = 0;
1330 EXPECT_EQ(apm_->kNoError,
1331 apm_->echo_cancellation()->GetDelayMetrics(&median, &std));
1332
1333 int rms_level = apm_->level_estimator()->RMS();
1334 EXPECT_LE(0, rms_level);
1335 EXPECT_GE(127, rms_level);
1336#endif
1337
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001338 if (!write_ref_data) {
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001339 EXPECT_EQ(test->has_echo_count(), has_echo_count);
1340 EXPECT_EQ(test->has_voice_count(), has_voice_count);
1341 EXPECT_EQ(test->is_saturated_count(), is_saturated_count);
1342
1343 EXPECT_EQ(test->analog_level_average(), analog_level_average);
1344 EXPECT_EQ(test->max_output_average(), max_output_average);
1345
andrew@webrtc.org293d22b2012-01-30 22:04:26 +00001346#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001347 webrtc::audioproc::Test::EchoMetrics reference =
1348 test->echo_metrics();
1349 TestStats(echo_metrics.residual_echo_return_loss,
1350 reference.residual_echo_return_loss());
1351 TestStats(echo_metrics.echo_return_loss,
1352 reference.echo_return_loss());
1353 TestStats(echo_metrics.echo_return_loss_enhancement,
1354 reference.echo_return_loss_enhancement());
1355 TestStats(echo_metrics.a_nlp,
1356 reference.a_nlp());
1357
1358 webrtc::audioproc::Test::DelayMetrics reference_delay =
1359 test->delay_metrics();
andrew@webrtc.org828af1b2011-11-22 22:40:27 +00001360 EXPECT_EQ(reference_delay.median(), median);
1361 EXPECT_EQ(reference_delay.std(), std);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001362
1363 EXPECT_EQ(test->rms_level(), rms_level);
bjornv@webrtc.org08329f42012-07-12 21:00:43 +00001364
1365 EXPECT_FLOAT_EQ(test->ns_speech_probability_average(),
1366 ns_speech_prob_average);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001367#endif
1368 } else {
1369 test->set_has_echo_count(has_echo_count);
1370 test->set_has_voice_count(has_voice_count);
1371 test->set_is_saturated_count(is_saturated_count);
1372
1373 test->set_analog_level_average(analog_level_average);
1374 test->set_max_output_average(max_output_average);
1375
andrew@webrtc.org293d22b2012-01-30 22:04:26 +00001376#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001377 webrtc::audioproc::Test::EchoMetrics* message =
1378 test->mutable_echo_metrics();
1379 WriteStatsMessage(echo_metrics.residual_echo_return_loss,
1380 message->mutable_residual_echo_return_loss());
1381 WriteStatsMessage(echo_metrics.echo_return_loss,
1382 message->mutable_echo_return_loss());
1383 WriteStatsMessage(echo_metrics.echo_return_loss_enhancement,
1384 message->mutable_echo_return_loss_enhancement());
1385 WriteStatsMessage(echo_metrics.a_nlp,
1386 message->mutable_a_nlp());
1387
1388 webrtc::audioproc::Test::DelayMetrics* message_delay =
1389 test->mutable_delay_metrics();
1390 message_delay->set_median(median);
1391 message_delay->set_std(std);
1392
1393 test->set_rms_level(rms_level);
bjornv@webrtc.org08329f42012-07-12 21:00:43 +00001394
1395 EXPECT_LE(0.0f, ns_speech_prob_average);
1396 EXPECT_GE(1.0f, ns_speech_prob_average);
1397 test->set_ns_speech_probability_average(ns_speech_prob_average);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001398#endif
1399 }
1400
1401 rewind(far_file_);
1402 rewind(near_file_);
1403 }
1404
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001405 if (write_ref_data) {
1406 WriteMessageLiteToFile(ref_filename_, ref_data);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001407 }
1408}
andrew@webrtc.org293d22b2012-01-30 22:04:26 +00001409#endif // WEBRTC_AUDIOPROC_BIT_EXACT
andrew@webrtc.orge2ed5ba2012-01-20 19:06:38 +00001410
niklase@google.com470e71d2011-07-07 08:21:25 +00001411} // namespace
1412
1413int main(int argc, char** argv) {
niklase@google.com470e71d2011-07-07 08:21:25 +00001414 for (int i = 1; i < argc; i++) {
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001415 if (strcmp(argv[i], "--write_ref_data") == 0) {
1416 write_ref_data = true;
niklase@google.com470e71d2011-07-07 08:21:25 +00001417 }
1418 }
1419
andrew@webrtc.org28d01402012-10-18 00:42:32 +00001420 // We don't use TestSuite here because it would require the Android platform
1421 // build to depend on Gmock.
1422 webrtc::test::SetExecutablePath(argv[0]);
1423 testing::InitGoogleTest(&argc, argv);
1424 int result = RUN_ALL_TESTS();
andrew@webrtc.org64235092011-08-19 21:22:08 +00001425 // Optional, but removes memory leak noise from Valgrind.
1426 google::protobuf::ShutdownProtobufLibrary();
andrew@webrtc.org28d01402012-10-18 00:42:32 +00001427 return result;
niklase@google.com470e71d2011-07-07 08:21:25 +00001428}