blob: 1baa48d67c2312fd78fcd35bb38c021a6b9cb25f [file] [log] [blame]
niklase@google.com470e71d2011-07-07 08:21:25 +00001/*
andrew@webrtc.org293d22b2012-01-30 22:04:26 +00002 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
niklase@google.com470e71d2011-07-07 08:21:25 +00003 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
andrew@webrtc.org81865342012-10-27 00:28:27 +000011#include "audio_processing.h"
12
ajm@google.com59e41402011-07-28 17:34:04 +000013#include <stdio.h>
niklase@google.com470e71d2011-07-07 08:21:25 +000014
andrew@webrtc.org07bf9a02012-05-05 00:32:00 +000015#include <algorithm>
16
kjellander@webrtc.org61f07c32011-10-18 06:54:58 +000017#include "gtest/gtest.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000018
niklase@google.com470e71d2011-07-07 08:21:25 +000019#include "event_wrapper.h"
20#include "module_common_types.h"
andrew@webrtc.org3119ecf2011-11-01 17:00:18 +000021#include "scoped_ptr.h"
ajm@google.com59e41402011-07-28 17:34:04 +000022#include "signal_processing_library.h"
andrew@webrtc.org7a281a52012-06-27 03:22:37 +000023#include "test/testsupport/fileutils.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000024#include "thread_wrapper.h"
25#include "trace.h"
leozwang@webrtc.orga3736342012-03-16 21:36:00 +000026#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
leozwang@webrtc.org534e4952012-10-22 21:21:52 +000027#include "external/webrtc/webrtc/modules/audio_processing/test/unittest.pb.h"
leozwang@webrtc.orga3736342012-03-16 21:36:00 +000028#else
ajm@google.com808e0e02011-08-03 21:08:51 +000029#include "webrtc/audio_processing/unittest.pb.h"
leozwang@webrtc.orga3736342012-03-16 21:36:00 +000030#endif
niklase@google.com470e71d2011-07-07 08:21:25 +000031
andrew@webrtc.org293d22b2012-01-30 22:04:26 +000032#if (defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)) || \
33 (defined(WEBRTC_LINUX) && defined(WEBRTC_ARCH_X86_64) && !defined(NDEBUG))
34# define WEBRTC_AUDIOPROC_BIT_EXACT
35#endif
36
niklase@google.com470e71d2011-07-07 08:21:25 +000037using webrtc::AudioProcessing;
38using webrtc::AudioFrame;
39using webrtc::GainControl;
40using webrtc::NoiseSuppression;
41using webrtc::EchoCancellation;
42using webrtc::EventWrapper;
andrew@webrtc.org3119ecf2011-11-01 17:00:18 +000043using webrtc::scoped_array;
niklase@google.com470e71d2011-07-07 08:21:25 +000044using webrtc::Trace;
45using webrtc::LevelEstimator;
46using webrtc::EchoCancellation;
47using webrtc::EchoControlMobile;
48using webrtc::VoiceDetection;
49
50namespace {
ajm@google.com59e41402011-07-28 17:34:04 +000051// When false, this will compare the output data with the results stored to
niklase@google.com470e71d2011-07-07 08:21:25 +000052// file. This is the typical case. When the file should be updated, it can
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +000053// be set to true with the command-line switch --write_ref_data.
54bool write_ref_data = false;
ajm@google.com59e41402011-07-28 17:34:04 +000055
andrew@webrtc.orgecac9b72012-05-02 00:04:10 +000056const int kSampleRates[] = {8000, 16000, 32000};
57const size_t kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
58const int kChannels[] = {1, 2};
59const size_t kChannelsSize = sizeof(kChannels) / sizeof(*kChannels);
60
andrew@webrtc.org07bf9a02012-05-05 00:32:00 +000061#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
62// AECM doesn't support super-wb.
63const int kProcessSampleRates[] = {8000, 16000};
64#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
65const int kProcessSampleRates[] = {8000, 16000, 32000};
66#endif
67const size_t kProcessSampleRatesSize = sizeof(kProcessSampleRates) /
68 sizeof(*kProcessSampleRates);
69
andrew@webrtc.org81865342012-10-27 00:28:27 +000070// TODO(andrew): Use the MonoToStereo routine from AudioFrameOperations.
71void MixStereoToMono(const int16_t* stereo,
72 int16_t* mono,
73 int samples_per_channel) {
74 for (int i = 0; i < samples_per_channel; i++) {
75 int32_t int32 = (static_cast<int32_t>(stereo[i * 2]) +
76 static_cast<int32_t>(stereo[i * 2 + 1])) >> 1;
77 mono[i] = static_cast<int16_t>(int32);
78 }
79}
80
81void CopyLeftToRightChannel(int16_t* stereo, int samples_per_channel) {
82 for (int i = 0; i < samples_per_channel; i++) {
83 stereo[i * 2 + 1] = stereo[i * 2];
84 }
85}
86
87void VerifyChannelsAreEqual(int16_t* stereo, int samples_per_channel) {
88 for (int i = 0; i < samples_per_channel; i++) {
89 EXPECT_EQ(stereo[i * 2 + 1], stereo[i * 2]);
90 }
91}
92
93void SetFrameTo(AudioFrame* frame, int16_t value) {
94 for (int i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
95 ++i) {
96 frame->data_[i] = value;
97 }
98}
99
100void SetFrameTo(AudioFrame* frame, int16_t left, int16_t right) {
101 ASSERT_EQ(2, frame->num_channels_);
102 for (int i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
103 frame->data_[i] = left;
104 frame->data_[i + 1] = right;
105 }
106}
107
108template <class T>
109T AbsValue(T a) {
110 return a > 0 ? a: -a;
111}
112
113int16_t MaxAudioFrame(const AudioFrame& frame) {
114 const int length = frame.samples_per_channel_ * frame.num_channels_;
115 int16_t max_data = AbsValue(frame.data_[0]);
116 for (int i = 1; i < length; i++) {
117 max_data = std::max(max_data, AbsValue(frame.data_[i]));
118 }
119
120 return max_data;
121}
122
123bool FrameDataAreEqual(const AudioFrame& frame1, const AudioFrame& frame2) {
124 if (frame1.samples_per_channel_ !=
125 frame2.samples_per_channel_) {
126 return false;
127 }
128 if (frame1.num_channels_ !=
129 frame2.num_channels_) {
130 return false;
131 }
132 if (memcmp(frame1.data_, frame2.data_,
133 frame1.samples_per_channel_ * frame1.num_channels_ *
134 sizeof(int16_t))) {
135 return false;
136 }
137 return true;
138}
139
140void TestStats(const AudioProcessing::Statistic& test,
141 const webrtc::audioproc::Test::Statistic& reference) {
142 EXPECT_EQ(reference.instant(), test.instant);
143 EXPECT_EQ(reference.average(), test.average);
144 EXPECT_EQ(reference.maximum(), test.maximum);
145 EXPECT_EQ(reference.minimum(), test.minimum);
146}
147
148void WriteStatsMessage(const AudioProcessing::Statistic& output,
149 webrtc::audioproc::Test::Statistic* message) {
150 message->set_instant(output.instant);
151 message->set_average(output.average);
152 message->set_maximum(output.maximum);
153 message->set_minimum(output.minimum);
154}
155
156void WriteMessageLiteToFile(const std::string filename,
157 const ::google::protobuf::MessageLite& message) {
158 FILE* file = fopen(filename.c_str(), "wb");
159 ASSERT_TRUE(file != NULL) << "Could not open " << filename;
160 int size = message.ByteSize();
161 ASSERT_GT(size, 0);
162 unsigned char* array = new unsigned char[size];
163 ASSERT_TRUE(message.SerializeToArray(array, size));
164
165 ASSERT_EQ(1u, fwrite(&size, sizeof(int), 1, file));
166 ASSERT_EQ(static_cast<size_t>(size),
167 fwrite(array, sizeof(unsigned char), size, file));
168
169 delete [] array;
170 fclose(file);
171}
172
173void ReadMessageLiteFromFile(const std::string filename,
174 ::google::protobuf::MessageLite* message) {
175 assert(message != NULL);
176
177 FILE* file = fopen(filename.c_str(), "rb");
178 ASSERT_TRUE(file != NULL) << "Could not open " << filename;
179 int size = 0;
180 ASSERT_EQ(1u, fread(&size, sizeof(int), 1, file));
181 ASSERT_GT(size, 0);
182 unsigned char* array = new unsigned char[size];
183 ASSERT_EQ(static_cast<size_t>(size),
184 fread(array, sizeof(unsigned char), size, file));
185
186 ASSERT_TRUE(message->ParseFromArray(array, size));
187
188 delete [] array;
189 fclose(file);
190}
191
192struct ThreadData {
193 ThreadData(int thread_num_, AudioProcessing* ap_)
194 : thread_num(thread_num_),
195 error(false),
196 ap(ap_) {}
197 int thread_num;
198 bool error;
199 AudioProcessing* ap;
200};
201
niklase@google.com470e71d2011-07-07 08:21:25 +0000202class ApmTest : public ::testing::Test {
203 protected:
204 ApmTest();
205 virtual void SetUp();
206 virtual void TearDown();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000207
208 static void SetUpTestCase() {
209 Trace::CreateTrace();
210 std::string trace_filename = webrtc::test::OutputPath() +
andrew@webrtc.org81865342012-10-27 00:28:27 +0000211 "audioproc_trace.txt";
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000212 ASSERT_EQ(0, Trace::SetTraceFile(trace_filename.c_str()));
213 }
214
215 static void TearDownTestCase() {
216 Trace::ReturnTrace();
217 }
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000218
219 void Init(int sample_rate_hz, int num_reverse_channels,
220 int num_input_channels, int num_output_channels,
221 bool open_output_file);
222 std::string ResourceFilePath(std::string name, int sample_rate_hz);
223 std::string OutputFilePath(std::string name,
224 int sample_rate_hz,
225 int num_reverse_channels,
226 int num_input_channels,
227 int num_output_channels);
andrew@webrtc.org07bf9a02012-05-05 00:32:00 +0000228 void EnableAllComponents();
229 bool ReadFrame(FILE* file, AudioFrame* frame);
andrew@webrtc.org81865342012-10-27 00:28:27 +0000230 void ProcessWithDefaultStreamParameters(AudioFrame* frame);
231 template <typename F>
232 void ChangeTriggersInit(F f, AudioProcessing* ap, int initial_value,
233 int changed_value);
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000234
235 const std::string output_path_;
236 const std::string ref_path_;
237 const std::string ref_filename_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000238 webrtc::AudioProcessing* apm_;
239 webrtc::AudioFrame* frame_;
240 webrtc::AudioFrame* revframe_;
241 FILE* far_file_;
242 FILE* near_file_;
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000243 FILE* out_file_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000244};
245
246ApmTest::ApmTest()
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000247 : output_path_(webrtc::test::OutputPath()),
248 ref_path_(webrtc::test::ProjectRootPath() +
andrew@webrtc.org9dc45da2012-05-23 15:39:01 +0000249 "data/audio_processing/"),
andrew@webrtc.org293d22b2012-01-30 22:04:26 +0000250#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000251 ref_filename_(ref_path_ + "output_data_fixed.pb"),
andrew@webrtc.org293d22b2012-01-30 22:04:26 +0000252#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000253 ref_filename_(ref_path_ + "output_data_float.pb"),
kjellander@webrtc.org61f07c32011-10-18 06:54:58 +0000254#endif
255 apm_(NULL),
niklase@google.com470e71d2011-07-07 08:21:25 +0000256 frame_(NULL),
ajm@google.com22e65152011-07-18 18:03:01 +0000257 revframe_(NULL),
258 far_file_(NULL),
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000259 near_file_(NULL),
260 out_file_(NULL) {}
niklase@google.com470e71d2011-07-07 08:21:25 +0000261
262void ApmTest::SetUp() {
263 apm_ = AudioProcessing::Create(0);
264 ASSERT_TRUE(apm_ != NULL);
265
266 frame_ = new AudioFrame();
267 revframe_ = new AudioFrame();
268
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000269 Init(32000, 2, 2, 2, false);
niklase@google.com470e71d2011-07-07 08:21:25 +0000270}
271
272void ApmTest::TearDown() {
273 if (frame_) {
274 delete frame_;
275 }
276 frame_ = NULL;
277
278 if (revframe_) {
279 delete revframe_;
280 }
281 revframe_ = NULL;
282
283 if (far_file_) {
284 ASSERT_EQ(0, fclose(far_file_));
285 }
286 far_file_ = NULL;
287
288 if (near_file_) {
289 ASSERT_EQ(0, fclose(near_file_));
290 }
291 near_file_ = NULL;
292
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000293 if (out_file_) {
294 ASSERT_EQ(0, fclose(out_file_));
295 }
296 out_file_ = NULL;
297
niklase@google.com470e71d2011-07-07 08:21:25 +0000298 if (apm_ != NULL) {
299 AudioProcessing::Destroy(apm_);
300 }
301 apm_ = NULL;
302}
303
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000304std::string ApmTest::ResourceFilePath(std::string name, int sample_rate_hz) {
305 std::ostringstream ss;
306 // Resource files are all stereo.
307 ss << name << sample_rate_hz / 1000 << "_stereo";
308 return webrtc::test::ResourcePath(ss.str(), "pcm");
309}
310
311std::string ApmTest::OutputFilePath(std::string name,
312 int sample_rate_hz,
313 int num_reverse_channels,
314 int num_input_channels,
315 int num_output_channels) {
316 std::ostringstream ss;
317 ss << name << sample_rate_hz / 1000 << "_" << num_reverse_channels << "r" <<
318 num_input_channels << "i" << "_";
319 if (num_output_channels == 1) {
320 ss << "mono";
321 } else if (num_output_channels == 2) {
322 ss << "stereo";
323 } else {
324 assert(false);
325 return "";
326 }
327 ss << ".pcm";
328
329 return output_path_ + ss.str();
330}
331
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000332void ApmTest::Init(int sample_rate_hz, int num_reverse_channels,
333 int num_input_channels, int num_output_channels,
334 bool open_output_file) {
335 ASSERT_EQ(apm_->kNoError, apm_->Initialize());
336
337 // Handles error checking of the parameters as well. No need to repeat it.
338 ASSERT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(sample_rate_hz));
339 ASSERT_EQ(apm_->kNoError, apm_->set_num_channels(num_input_channels,
340 num_output_channels));
341 ASSERT_EQ(apm_->kNoError,
342 apm_->set_num_reverse_channels(num_reverse_channels));
343
344 // We always use 10 ms frames.
345 const int samples_per_channel = sample_rate_hz / 100;
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000346 frame_->samples_per_channel_ = samples_per_channel;
347 frame_->num_channels_ = num_input_channels;
348 frame_->sample_rate_hz_ = sample_rate_hz;
349 revframe_->samples_per_channel_ = samples_per_channel;
350 revframe_->num_channels_ = num_reverse_channels;
351 revframe_->sample_rate_hz_ = sample_rate_hz;
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +0000352
353 if (far_file_) {
354 ASSERT_EQ(0, fclose(far_file_));
355 }
356 std::string filename = ResourceFilePath("far", sample_rate_hz);
357 far_file_ = fopen(filename.c_str(), "rb");
358 ASSERT_TRUE(far_file_ != NULL) << "Could not open file " <<
359 filename << "\n";
360
361 if (near_file_) {
362 ASSERT_EQ(0, fclose(near_file_));
363 }
364 filename = ResourceFilePath("near", sample_rate_hz);
365 near_file_ = fopen(filename.c_str(), "rb");
366 ASSERT_TRUE(near_file_ != NULL) << "Could not open file " <<
367 filename << "\n";
368
369 if (open_output_file) {
370 if (out_file_) {
371 ASSERT_EQ(0, fclose(out_file_));
372 }
373 filename = OutputFilePath("out", sample_rate_hz, num_reverse_channels,
374 num_input_channels, num_output_channels);
375 out_file_ = fopen(filename.c_str(), "wb");
376 ASSERT_TRUE(out_file_ != NULL) << "Could not open file " <<
377 filename << "\n";
378 }
379}
380
andrew@webrtc.org07bf9a02012-05-05 00:32:00 +0000381void ApmTest::EnableAllComponents() {
382#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
383 EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
384 EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true));
385
386 EXPECT_EQ(apm_->kNoError,
387 apm_->gain_control()->set_mode(GainControl::kAdaptiveDigital));
388 EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
389#elif defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
390 EXPECT_EQ(apm_->kNoError,
391 apm_->echo_cancellation()->enable_drift_compensation(true));
392 EXPECT_EQ(apm_->kNoError,
393 apm_->echo_cancellation()->enable_metrics(true));
394 EXPECT_EQ(apm_->kNoError,
395 apm_->echo_cancellation()->enable_delay_logging(true));
396 EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
397
398 EXPECT_EQ(apm_->kNoError,
399 apm_->gain_control()->set_mode(GainControl::kAdaptiveAnalog));
400 EXPECT_EQ(apm_->kNoError,
401 apm_->gain_control()->set_analog_level_limits(0, 255));
402 EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
403#endif
404
405 EXPECT_EQ(apm_->kNoError,
406 apm_->high_pass_filter()->Enable(true));
407
408 EXPECT_EQ(apm_->kNoError,
409 apm_->level_estimator()->Enable(true));
410
411 EXPECT_EQ(apm_->kNoError,
412 apm_->noise_suppression()->Enable(true));
413
414 EXPECT_EQ(apm_->kNoError,
415 apm_->voice_detection()->Enable(true));
416}
417
418bool ApmTest::ReadFrame(FILE* file, AudioFrame* frame) {
419 // The files always contain stereo audio.
420 size_t frame_size = frame->samples_per_channel_ * 2;
421 size_t read_count = fread(frame->data_,
422 sizeof(int16_t),
423 frame_size,
424 file);
425 if (read_count != frame_size) {
426 // Check that the file really ended.
427 EXPECT_NE(0, feof(file));
428 return false; // This is expected.
429 }
430
431 if (frame->num_channels_ == 1) {
432 MixStereoToMono(frame->data_, frame->data_,
433 frame->samples_per_channel_);
434 }
435
436 return true;
ajm@google.coma769fa52011-07-13 21:57:58 +0000437}
438
andrew@webrtc.org81865342012-10-27 00:28:27 +0000439void ApmTest::ProcessWithDefaultStreamParameters(AudioFrame* frame) {
440 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
441 EXPECT_EQ(apm_->kNoError,
442 apm_->echo_cancellation()->set_stream_drift_samples(0));
443 EXPECT_EQ(apm_->kNoError,
444 apm_->gain_control()->set_stream_analog_level(127));
445 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame));
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000446}
447
andrew@webrtc.org81865342012-10-27 00:28:27 +0000448template <typename F>
449void ApmTest::ChangeTriggersInit(F f, AudioProcessing* ap, int initial_value,
450 int changed_value) {
451 EnableAllComponents();
452 Init(16000, 2, 2, 2, false);
453 SetFrameTo(frame_, 1000);
andrew@webrtc.orgae1a58b2013-01-22 04:44:30 +0000454 AudioFrame frame_copy;
455 frame_copy.CopyFrom(*frame_);
andrew@webrtc.org81865342012-10-27 00:28:27 +0000456 ProcessWithDefaultStreamParameters(frame_);
457 // Verify the processing has actually changed the frame.
458 EXPECT_FALSE(FrameDataAreEqual(*frame_, frame_copy));
459
460 // Test that a change in value triggers an init.
461 f(apm_, changed_value);
462 f(apm_, initial_value);
463 ProcessWithDefaultStreamParameters(&frame_copy);
464 EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
465
466 apm_->Initialize();
467 SetFrameTo(frame_, 1000);
andrew@webrtc.orgae1a58b2013-01-22 04:44:30 +0000468 AudioFrame initial_frame;
469 initial_frame.CopyFrom(*frame_);
andrew@webrtc.org81865342012-10-27 00:28:27 +0000470 ProcessWithDefaultStreamParameters(frame_);
471 ProcessWithDefaultStreamParameters(frame_);
472 // Verify the processing has actually changed the frame.
473 EXPECT_FALSE(FrameDataAreEqual(*frame_, initial_frame));
474
andrew@webrtc.orgae1a58b2013-01-22 04:44:30 +0000475 frame_copy.CopyFrom(initial_frame);
andrew@webrtc.org81865342012-10-27 00:28:27 +0000476 apm_->Initialize();
477 ProcessWithDefaultStreamParameters(&frame_copy);
478 // Verify an init here would result in different output.
479 apm_->Initialize();
480 ProcessWithDefaultStreamParameters(&frame_copy);
481 EXPECT_FALSE(FrameDataAreEqual(*frame_, frame_copy));
482
andrew@webrtc.orgae1a58b2013-01-22 04:44:30 +0000483 frame_copy.CopyFrom(initial_frame);
andrew@webrtc.org81865342012-10-27 00:28:27 +0000484 apm_->Initialize();
485 ProcessWithDefaultStreamParameters(&frame_copy);
486 // Test that the same value does not trigger an init.
487 f(apm_, initial_value);
488 ProcessWithDefaultStreamParameters(&frame_copy);
489 EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
andrew@webrtc.orgecac9b72012-05-02 00:04:10 +0000490}
491
niklase@google.com470e71d2011-07-07 08:21:25 +0000492TEST_F(ApmTest, StreamParameters) {
493 // No errors when the components are disabled.
494 EXPECT_EQ(apm_->kNoError,
495 apm_->ProcessStream(frame_));
496
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000497 // -- Missing AGC level --
niklase@google.com470e71d2011-07-07 08:21:25 +0000498 EXPECT_EQ(apm_->kNoError, apm_->Initialize());
499 EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000500 EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
niklase@google.com470e71d2011-07-07 08:21:25 +0000501
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000502 // Resets after successful ProcessStream().
niklase@google.com470e71d2011-07-07 08:21:25 +0000503 EXPECT_EQ(apm_->kNoError,
504 apm_->gain_control()->set_stream_analog_level(127));
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000505 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
506 EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
niklase@google.com470e71d2011-07-07 08:21:25 +0000507
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000508 // Other stream parameters set correctly.
509 EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
niklase@google.com470e71d2011-07-07 08:21:25 +0000510 EXPECT_EQ(apm_->kNoError,
511 apm_->echo_cancellation()->enable_drift_compensation(true));
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000512 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
513 EXPECT_EQ(apm_->kNoError,
514 apm_->echo_cancellation()->set_stream_drift_samples(0));
niklase@google.com470e71d2011-07-07 08:21:25 +0000515 EXPECT_EQ(apm_->kStreamParameterNotSetError,
516 apm_->ProcessStream(frame_));
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000517 EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
518 EXPECT_EQ(apm_->kNoError,
519 apm_->echo_cancellation()->enable_drift_compensation(false));
520
521 // -- Missing delay --
522 EXPECT_EQ(apm_->kNoError, apm_->Initialize());
523 EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
524 EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
525
526 // Resets after successful ProcessStream().
527 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
528 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
529 EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
530
531 // Other stream parameters set correctly.
532 EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
533 EXPECT_EQ(apm_->kNoError,
534 apm_->echo_cancellation()->enable_drift_compensation(true));
535 EXPECT_EQ(apm_->kNoError,
536 apm_->echo_cancellation()->set_stream_drift_samples(0));
537 EXPECT_EQ(apm_->kNoError,
538 apm_->gain_control()->set_stream_analog_level(127));
539 EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
540 EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
541
542 // -- Missing drift --
543 EXPECT_EQ(apm_->kNoError, apm_->Initialize());
544 EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
545
546 // Resets after successful ProcessStream().
547 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
548 EXPECT_EQ(apm_->kNoError,
549 apm_->echo_cancellation()->set_stream_drift_samples(0));
550 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
551 EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
552
553 // Other stream parameters set correctly.
niklase@google.com470e71d2011-07-07 08:21:25 +0000554 EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(true));
555 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
556 EXPECT_EQ(apm_->kNoError,
557 apm_->gain_control()->set_stream_analog_level(127));
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000558 EXPECT_EQ(apm_->kStreamParameterNotSetError, apm_->ProcessStream(frame_));
niklase@google.com470e71d2011-07-07 08:21:25 +0000559
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000560 // -- No stream parameters --
niklase@google.com470e71d2011-07-07 08:21:25 +0000561 EXPECT_EQ(apm_->kNoError, apm_->Initialize());
562 EXPECT_EQ(apm_->kNoError,
563 apm_->AnalyzeReverseStream(revframe_));
564 EXPECT_EQ(apm_->kStreamParameterNotSetError,
565 apm_->ProcessStream(frame_));
566
andrew@webrtc.org1e916932011-11-29 18:28:57 +0000567 // -- All there --
niklase@google.com470e71d2011-07-07 08:21:25 +0000568 EXPECT_EQ(apm_->kNoError, apm_->Initialize());
569 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
570 EXPECT_EQ(apm_->kNoError,
571 apm_->echo_cancellation()->set_stream_drift_samples(0));
572 EXPECT_EQ(apm_->kNoError,
573 apm_->gain_control()->set_stream_analog_level(127));
574 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
575}
576
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000577TEST_F(ApmTest, DefaultDelayOffsetIsZero) {
578 EXPECT_EQ(0, apm_->delay_offset_ms());
579 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(50));
580 EXPECT_EQ(50, apm_->stream_delay_ms());
581}
582
583TEST_F(ApmTest, DelayOffsetWithLimitsIsSetProperly) {
584 // High limit of 500 ms.
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000585 apm_->set_delay_offset_ms(100);
586 EXPECT_EQ(100, apm_->delay_offset_ms());
587 EXPECT_EQ(apm_->kBadStreamParameterWarning, apm_->set_stream_delay_ms(450));
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000588 EXPECT_EQ(500, apm_->stream_delay_ms());
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000589 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
590 EXPECT_EQ(200, apm_->stream_delay_ms());
591
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000592 // Low limit of 0 ms.
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000593 apm_->set_delay_offset_ms(-50);
594 EXPECT_EQ(-50, apm_->delay_offset_ms());
andrew@webrtc.org5f23d642012-05-29 21:14:06 +0000595 EXPECT_EQ(apm_->kBadStreamParameterWarning, apm_->set_stream_delay_ms(20));
596 EXPECT_EQ(0, apm_->stream_delay_ms());
andrew@webrtc.org6f9f8172012-03-06 19:03:39 +0000597 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(100));
598 EXPECT_EQ(50, apm_->stream_delay_ms());
599}
600
niklase@google.com470e71d2011-07-07 08:21:25 +0000601TEST_F(ApmTest, Channels) {
602 // Testing number of invalid channels
603 EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(0, 1));
604 EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(1, 0));
605 EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(3, 1));
606 EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(1, 3));
607 EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_reverse_channels(0));
608 EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_reverse_channels(3));
609 // Testing number of valid channels
610 for (int i = 1; i < 3; i++) {
611 for (int j = 1; j < 3; j++) {
612 if (j > i) {
613 EXPECT_EQ(apm_->kBadParameterError, apm_->set_num_channels(i, j));
614 } else {
615 EXPECT_EQ(apm_->kNoError, apm_->set_num_channels(i, j));
616 EXPECT_EQ(j, apm_->num_output_channels());
617 }
618 }
619 EXPECT_EQ(i, apm_->num_input_channels());
620 EXPECT_EQ(apm_->kNoError, apm_->set_num_reverse_channels(i));
621 EXPECT_EQ(i, apm_->num_reverse_channels());
622 }
623}
624
625TEST_F(ApmTest, SampleRates) {
626 // Testing invalid sample rates
627 EXPECT_EQ(apm_->kBadParameterError, apm_->set_sample_rate_hz(10000));
628 // Testing valid sample rates
629 int fs[] = {8000, 16000, 32000};
630 for (size_t i = 0; i < sizeof(fs) / sizeof(*fs); i++) {
631 EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(fs[i]));
632 EXPECT_EQ(fs[i], apm_->sample_rate_hz());
633 }
634}
635
andrew@webrtc.org81865342012-10-27 00:28:27 +0000636void SetSampleRate(AudioProcessing* ap, int value) {
637 EXPECT_EQ(ap->kNoError, ap->set_sample_rate_hz(value));
638}
639
640void SetNumReverseChannels(AudioProcessing* ap, int value) {
641 EXPECT_EQ(ap->kNoError, ap->set_num_reverse_channels(value));
642}
643
644void SetNumOutputChannels(AudioProcessing* ap, int value) {
645 EXPECT_EQ(ap->kNoError, ap->set_num_channels(2, value));
646}
647
648TEST_F(ApmTest, SampleRateChangeTriggersInit) {
649 ChangeTriggersInit(SetSampleRate, apm_, 16000, 8000);
650}
651
652TEST_F(ApmTest, ReverseChannelChangeTriggersInit) {
653 ChangeTriggersInit(SetNumReverseChannels, apm_, 2, 1);
654}
655
656TEST_F(ApmTest, ChannelChangeTriggersInit) {
657 ChangeTriggersInit(SetNumOutputChannels, apm_, 2, 1);
658}
niklase@google.com470e71d2011-07-07 08:21:25 +0000659
660TEST_F(ApmTest, EchoCancellation) {
661 EXPECT_EQ(apm_->kNoError,
662 apm_->echo_cancellation()->enable_drift_compensation(true));
663 EXPECT_TRUE(apm_->echo_cancellation()->is_drift_compensation_enabled());
664 EXPECT_EQ(apm_->kNoError,
665 apm_->echo_cancellation()->enable_drift_compensation(false));
666 EXPECT_FALSE(apm_->echo_cancellation()->is_drift_compensation_enabled());
667
668 EXPECT_EQ(apm_->kBadParameterError,
669 apm_->echo_cancellation()->set_device_sample_rate_hz(4000));
670 EXPECT_EQ(apm_->kBadParameterError,
671 apm_->echo_cancellation()->set_device_sample_rate_hz(100000));
672
673 int rate[] = {16000, 44100, 48000};
674 for (size_t i = 0; i < sizeof(rate)/sizeof(*rate); i++) {
675 EXPECT_EQ(apm_->kNoError,
676 apm_->echo_cancellation()->set_device_sample_rate_hz(rate[i]));
677 EXPECT_EQ(rate[i],
678 apm_->echo_cancellation()->device_sample_rate_hz());
679 }
680
niklase@google.com470e71d2011-07-07 08:21:25 +0000681 EchoCancellation::SuppressionLevel level[] = {
682 EchoCancellation::kLowSuppression,
683 EchoCancellation::kModerateSuppression,
684 EchoCancellation::kHighSuppression,
685 };
686 for (size_t i = 0; i < sizeof(level)/sizeof(*level); i++) {
687 EXPECT_EQ(apm_->kNoError,
688 apm_->echo_cancellation()->set_suppression_level(level[i]));
689 EXPECT_EQ(level[i],
690 apm_->echo_cancellation()->suppression_level());
691 }
692
693 EchoCancellation::Metrics metrics;
694 EXPECT_EQ(apm_->kNotEnabledError,
695 apm_->echo_cancellation()->GetMetrics(&metrics));
696
697 EXPECT_EQ(apm_->kNoError,
698 apm_->echo_cancellation()->enable_metrics(true));
699 EXPECT_TRUE(apm_->echo_cancellation()->are_metrics_enabled());
700 EXPECT_EQ(apm_->kNoError,
701 apm_->echo_cancellation()->enable_metrics(false));
702 EXPECT_FALSE(apm_->echo_cancellation()->are_metrics_enabled());
703
bjornv@google.com1ba3dbe2011-10-03 08:18:10 +0000704 int median = 0;
705 int std = 0;
706 EXPECT_EQ(apm_->kNotEnabledError,
707 apm_->echo_cancellation()->GetDelayMetrics(&median, &std));
708
709 EXPECT_EQ(apm_->kNoError,
710 apm_->echo_cancellation()->enable_delay_logging(true));
711 EXPECT_TRUE(apm_->echo_cancellation()->is_delay_logging_enabled());
712 EXPECT_EQ(apm_->kNoError,
713 apm_->echo_cancellation()->enable_delay_logging(false));
714 EXPECT_FALSE(apm_->echo_cancellation()->is_delay_logging_enabled());
715
niklase@google.com470e71d2011-07-07 08:21:25 +0000716 EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
717 EXPECT_TRUE(apm_->echo_cancellation()->is_enabled());
718 EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(false));
719 EXPECT_FALSE(apm_->echo_cancellation()->is_enabled());
720}
721
722TEST_F(ApmTest, EchoControlMobile) {
723 // AECM won't use super-wideband.
724 EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
725 EXPECT_EQ(apm_->kBadSampleRateError, apm_->echo_control_mobile()->Enable(true));
niklase@google.com470e71d2011-07-07 08:21:25 +0000726 // Turn AECM on (and AEC off)
andrew@webrtc.org75f19482012-02-09 17:16:18 +0000727 Init(16000, 2, 2, 2, false);
niklase@google.com470e71d2011-07-07 08:21:25 +0000728 EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(true));
729 EXPECT_TRUE(apm_->echo_control_mobile()->is_enabled());
730
niklase@google.com470e71d2011-07-07 08:21:25 +0000731 // Toggle routing modes
732 EchoControlMobile::RoutingMode mode[] = {
733 EchoControlMobile::kQuietEarpieceOrHeadset,
734 EchoControlMobile::kEarpiece,
735 EchoControlMobile::kLoudEarpiece,
736 EchoControlMobile::kSpeakerphone,
737 EchoControlMobile::kLoudSpeakerphone,
738 };
739 for (size_t i = 0; i < sizeof(mode)/sizeof(*mode); i++) {
740 EXPECT_EQ(apm_->kNoError,
741 apm_->echo_control_mobile()->set_routing_mode(mode[i]));
742 EXPECT_EQ(mode[i],
743 apm_->echo_control_mobile()->routing_mode());
744 }
745 // Turn comfort noise off/on
746 EXPECT_EQ(apm_->kNoError,
747 apm_->echo_control_mobile()->enable_comfort_noise(false));
748 EXPECT_FALSE(apm_->echo_control_mobile()->is_comfort_noise_enabled());
749 EXPECT_EQ(apm_->kNoError,
750 apm_->echo_control_mobile()->enable_comfort_noise(true));
751 EXPECT_TRUE(apm_->echo_control_mobile()->is_comfort_noise_enabled());
bjornv@google.comc4b939c2011-07-13 08:09:56 +0000752 // Set and get echo path
ajm@google.com22e65152011-07-18 18:03:01 +0000753 const size_t echo_path_size =
754 apm_->echo_control_mobile()->echo_path_size_bytes();
andrew@webrtc.org3119ecf2011-11-01 17:00:18 +0000755 scoped_array<char> echo_path_in(new char[echo_path_size]);
756 scoped_array<char> echo_path_out(new char[echo_path_size]);
bjornv@google.comc4b939c2011-07-13 08:09:56 +0000757 EXPECT_EQ(apm_->kNullPointerError,
758 apm_->echo_control_mobile()->SetEchoPath(NULL, echo_path_size));
759 EXPECT_EQ(apm_->kNullPointerError,
760 apm_->echo_control_mobile()->GetEchoPath(NULL, echo_path_size));
761 EXPECT_EQ(apm_->kBadParameterError,
andrew@webrtc.org3119ecf2011-11-01 17:00:18 +0000762 apm_->echo_control_mobile()->GetEchoPath(echo_path_out.get(), 1));
bjornv@google.comc4b939c2011-07-13 08:09:56 +0000763 EXPECT_EQ(apm_->kNoError,
andrew@webrtc.org3119ecf2011-11-01 17:00:18 +0000764 apm_->echo_control_mobile()->GetEchoPath(echo_path_out.get(),
bjornv@google.comc4b939c2011-07-13 08:09:56 +0000765 echo_path_size));
ajm@google.com22e65152011-07-18 18:03:01 +0000766 for (size_t i = 0; i < echo_path_size; i++) {
bjornv@google.comc4b939c2011-07-13 08:09:56 +0000767 echo_path_in[i] = echo_path_out[i] + 1;
768 }
769 EXPECT_EQ(apm_->kBadParameterError,
andrew@webrtc.org3119ecf2011-11-01 17:00:18 +0000770 apm_->echo_control_mobile()->SetEchoPath(echo_path_in.get(), 1));
bjornv@google.comc4b939c2011-07-13 08:09:56 +0000771 EXPECT_EQ(apm_->kNoError,
andrew@webrtc.org3119ecf2011-11-01 17:00:18 +0000772 apm_->echo_control_mobile()->SetEchoPath(echo_path_in.get(),
773 echo_path_size));
bjornv@google.comc4b939c2011-07-13 08:09:56 +0000774 EXPECT_EQ(apm_->kNoError,
andrew@webrtc.org3119ecf2011-11-01 17:00:18 +0000775 apm_->echo_control_mobile()->GetEchoPath(echo_path_out.get(),
776 echo_path_size));
ajm@google.com22e65152011-07-18 18:03:01 +0000777 for (size_t i = 0; i < echo_path_size; i++) {
bjornv@google.comc4b939c2011-07-13 08:09:56 +0000778 EXPECT_EQ(echo_path_in[i], echo_path_out[i]);
779 }
andrew@webrtc.org75f19482012-02-09 17:16:18 +0000780
781 // Process a few frames with NS in the default disabled state. This exercises
782 // a different codepath than with it enabled.
783 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
784 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
785 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
786 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
787
niklase@google.com470e71d2011-07-07 08:21:25 +0000788 // Turn AECM off
789 EXPECT_EQ(apm_->kNoError, apm_->echo_control_mobile()->Enable(false));
790 EXPECT_FALSE(apm_->echo_control_mobile()->is_enabled());
791}
792
793TEST_F(ApmTest, GainControl) {
794 // Testing gain modes
niklase@google.com470e71d2011-07-07 08:21:25 +0000795 EXPECT_EQ(apm_->kNoError,
796 apm_->gain_control()->set_mode(
797 apm_->gain_control()->mode()));
798
799 GainControl::Mode mode[] = {
800 GainControl::kAdaptiveAnalog,
801 GainControl::kAdaptiveDigital,
802 GainControl::kFixedDigital
803 };
804 for (size_t i = 0; i < sizeof(mode)/sizeof(*mode); i++) {
805 EXPECT_EQ(apm_->kNoError,
806 apm_->gain_control()->set_mode(mode[i]));
807 EXPECT_EQ(mode[i], apm_->gain_control()->mode());
808 }
809 // Testing invalid target levels
810 EXPECT_EQ(apm_->kBadParameterError,
811 apm_->gain_control()->set_target_level_dbfs(-3));
812 EXPECT_EQ(apm_->kBadParameterError,
813 apm_->gain_control()->set_target_level_dbfs(-40));
814 // Testing valid target levels
815 EXPECT_EQ(apm_->kNoError,
816 apm_->gain_control()->set_target_level_dbfs(
817 apm_->gain_control()->target_level_dbfs()));
818
819 int level_dbfs[] = {0, 6, 31};
820 for (size_t i = 0; i < sizeof(level_dbfs)/sizeof(*level_dbfs); i++) {
821 EXPECT_EQ(apm_->kNoError,
822 apm_->gain_control()->set_target_level_dbfs(level_dbfs[i]));
823 EXPECT_EQ(level_dbfs[i], apm_->gain_control()->target_level_dbfs());
824 }
825
826 // Testing invalid compression gains
827 EXPECT_EQ(apm_->kBadParameterError,
828 apm_->gain_control()->set_compression_gain_db(-1));
829 EXPECT_EQ(apm_->kBadParameterError,
830 apm_->gain_control()->set_compression_gain_db(100));
831
832 // Testing valid compression gains
833 EXPECT_EQ(apm_->kNoError,
834 apm_->gain_control()->set_compression_gain_db(
835 apm_->gain_control()->compression_gain_db()));
836
837 int gain_db[] = {0, 10, 90};
838 for (size_t i = 0; i < sizeof(gain_db)/sizeof(*gain_db); i++) {
839 EXPECT_EQ(apm_->kNoError,
840 apm_->gain_control()->set_compression_gain_db(gain_db[i]));
841 EXPECT_EQ(gain_db[i], apm_->gain_control()->compression_gain_db());
842 }
843
844 // Testing limiter off/on
845 EXPECT_EQ(apm_->kNoError, apm_->gain_control()->enable_limiter(false));
846 EXPECT_FALSE(apm_->gain_control()->is_limiter_enabled());
847 EXPECT_EQ(apm_->kNoError, apm_->gain_control()->enable_limiter(true));
848 EXPECT_TRUE(apm_->gain_control()->is_limiter_enabled());
849
850 // Testing invalid level limits
851 EXPECT_EQ(apm_->kBadParameterError,
852 apm_->gain_control()->set_analog_level_limits(-1, 512));
853 EXPECT_EQ(apm_->kBadParameterError,
854 apm_->gain_control()->set_analog_level_limits(100000, 512));
855 EXPECT_EQ(apm_->kBadParameterError,
856 apm_->gain_control()->set_analog_level_limits(512, -1));
857 EXPECT_EQ(apm_->kBadParameterError,
858 apm_->gain_control()->set_analog_level_limits(512, 100000));
859 EXPECT_EQ(apm_->kBadParameterError,
860 apm_->gain_control()->set_analog_level_limits(512, 255));
861
862 // Testing valid level limits
863 EXPECT_EQ(apm_->kNoError,
864 apm_->gain_control()->set_analog_level_limits(
865 apm_->gain_control()->analog_level_minimum(),
866 apm_->gain_control()->analog_level_maximum()));
867
868 int min_level[] = {0, 255, 1024};
869 for (size_t i = 0; i < sizeof(min_level)/sizeof(*min_level); i++) {
870 EXPECT_EQ(apm_->kNoError,
871 apm_->gain_control()->set_analog_level_limits(min_level[i], 1024));
872 EXPECT_EQ(min_level[i], apm_->gain_control()->analog_level_minimum());
873 }
874
875 int max_level[] = {0, 1024, 65535};
876 for (size_t i = 0; i < sizeof(min_level)/sizeof(*min_level); i++) {
877 EXPECT_EQ(apm_->kNoError,
878 apm_->gain_control()->set_analog_level_limits(0, max_level[i]));
879 EXPECT_EQ(max_level[i], apm_->gain_control()->analog_level_maximum());
880 }
881
882 // TODO(ajm): stream_is_saturated() and stream_analog_level()
883
884 // Turn AGC off
885 EXPECT_EQ(apm_->kNoError, apm_->gain_control()->Enable(false));
886 EXPECT_FALSE(apm_->gain_control()->is_enabled());
887}
888
889TEST_F(ApmTest, NoiseSuppression) {
andrew@webrtc.org648af742012-02-08 01:57:29 +0000890 // Test valid suppression levels.
niklase@google.com470e71d2011-07-07 08:21:25 +0000891 NoiseSuppression::Level level[] = {
892 NoiseSuppression::kLow,
893 NoiseSuppression::kModerate,
894 NoiseSuppression::kHigh,
895 NoiseSuppression::kVeryHigh
896 };
897 for (size_t i = 0; i < sizeof(level)/sizeof(*level); i++) {
898 EXPECT_EQ(apm_->kNoError,
899 apm_->noise_suppression()->set_level(level[i]));
900 EXPECT_EQ(level[i], apm_->noise_suppression()->level());
901 }
902
andrew@webrtc.org648af742012-02-08 01:57:29 +0000903 // Turn NS on/off
niklase@google.com470e71d2011-07-07 08:21:25 +0000904 EXPECT_EQ(apm_->kNoError, apm_->noise_suppression()->Enable(true));
905 EXPECT_TRUE(apm_->noise_suppression()->is_enabled());
906 EXPECT_EQ(apm_->kNoError, apm_->noise_suppression()->Enable(false));
907 EXPECT_FALSE(apm_->noise_suppression()->is_enabled());
908}
909
910TEST_F(ApmTest, HighPassFilter) {
andrew@webrtc.org648af742012-02-08 01:57:29 +0000911 // Turn HP filter on/off
niklase@google.com470e71d2011-07-07 08:21:25 +0000912 EXPECT_EQ(apm_->kNoError, apm_->high_pass_filter()->Enable(true));
913 EXPECT_TRUE(apm_->high_pass_filter()->is_enabled());
914 EXPECT_EQ(apm_->kNoError, apm_->high_pass_filter()->Enable(false));
915 EXPECT_FALSE(apm_->high_pass_filter()->is_enabled());
916}
917
918TEST_F(ApmTest, LevelEstimator) {
andrew@webrtc.org648af742012-02-08 01:57:29 +0000919 // Turn level estimator on/off
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000920 EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
niklase@google.com470e71d2011-07-07 08:21:25 +0000921 EXPECT_FALSE(apm_->level_estimator()->is_enabled());
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000922
923 EXPECT_EQ(apm_->kNotEnabledError, apm_->level_estimator()->RMS());
924
925 EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
926 EXPECT_TRUE(apm_->level_estimator()->is_enabled());
927
928 // Run this test in wideband; in super-wb, the splitting filter distorts the
929 // audio enough to cause deviation from the expectation for small values.
930 EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000931 frame_->samples_per_channel_ = 160;
932 frame_->num_channels_ = 2;
933 frame_->sample_rate_hz_ = 16000;
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000934
935 // Min value if no frames have been processed.
936 EXPECT_EQ(127, apm_->level_estimator()->RMS());
937
938 // Min value on zero frames.
939 SetFrameTo(frame_, 0);
940 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
941 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
942 EXPECT_EQ(127, apm_->level_estimator()->RMS());
943
944 // Try a few RMS values.
945 // (These also test that the value resets after retrieving it.)
946 SetFrameTo(frame_, 32767);
947 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
948 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
949 EXPECT_EQ(0, apm_->level_estimator()->RMS());
950
951 SetFrameTo(frame_, 30000);
952 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
953 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
954 EXPECT_EQ(1, apm_->level_estimator()->RMS());
955
956 SetFrameTo(frame_, 10000);
957 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
958 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
959 EXPECT_EQ(10, apm_->level_estimator()->RMS());
960
961 SetFrameTo(frame_, 10);
962 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
963 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
964 EXPECT_EQ(70, apm_->level_estimator()->RMS());
965
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000966 // Min value if energy_ == 0.
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000967 SetFrameTo(frame_, 10000);
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000968 uint32_t energy = frame_->energy_; // Save default to restore below.
969 frame_->energy_ = 0;
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000970 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
971 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
972 EXPECT_EQ(127, apm_->level_estimator()->RMS());
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000973 frame_->energy_ = energy;
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000974
975 // Verify reset after enable/disable.
976 SetFrameTo(frame_, 32767);
977 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
978 EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
979 EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
980 SetFrameTo(frame_, 1);
981 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
982 EXPECT_EQ(90, apm_->level_estimator()->RMS());
983
984 // Verify reset after initialize.
985 SetFrameTo(frame_, 32767);
986 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
987 EXPECT_EQ(apm_->kNoError, apm_->Initialize());
988 SetFrameTo(frame_, 1);
989 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
990 EXPECT_EQ(90, apm_->level_estimator()->RMS());
niklase@google.com470e71d2011-07-07 08:21:25 +0000991}
992
993TEST_F(ApmTest, VoiceDetection) {
994 // Test external VAD
995 EXPECT_EQ(apm_->kNoError,
996 apm_->voice_detection()->set_stream_has_voice(true));
997 EXPECT_TRUE(apm_->voice_detection()->stream_has_voice());
998 EXPECT_EQ(apm_->kNoError,
999 apm_->voice_detection()->set_stream_has_voice(false));
1000 EXPECT_FALSE(apm_->voice_detection()->stream_has_voice());
1001
andrew@webrtc.org648af742012-02-08 01:57:29 +00001002 // Test valid likelihoods
niklase@google.com470e71d2011-07-07 08:21:25 +00001003 VoiceDetection::Likelihood likelihood[] = {
1004 VoiceDetection::kVeryLowLikelihood,
1005 VoiceDetection::kLowLikelihood,
1006 VoiceDetection::kModerateLikelihood,
1007 VoiceDetection::kHighLikelihood
1008 };
1009 for (size_t i = 0; i < sizeof(likelihood)/sizeof(*likelihood); i++) {
1010 EXPECT_EQ(apm_->kNoError,
1011 apm_->voice_detection()->set_likelihood(likelihood[i]));
1012 EXPECT_EQ(likelihood[i], apm_->voice_detection()->likelihood());
1013 }
1014
1015 /* TODO(bjornv): Enable once VAD supports other frame lengths than 10 ms
andrew@webrtc.org648af742012-02-08 01:57:29 +00001016 // Test invalid frame sizes
niklase@google.com470e71d2011-07-07 08:21:25 +00001017 EXPECT_EQ(apm_->kBadParameterError,
1018 apm_->voice_detection()->set_frame_size_ms(12));
1019
andrew@webrtc.org648af742012-02-08 01:57:29 +00001020 // Test valid frame sizes
niklase@google.com470e71d2011-07-07 08:21:25 +00001021 for (int i = 10; i <= 30; i += 10) {
1022 EXPECT_EQ(apm_->kNoError,
1023 apm_->voice_detection()->set_frame_size_ms(i));
1024 EXPECT_EQ(i, apm_->voice_detection()->frame_size_ms());
1025 }
1026 */
1027
andrew@webrtc.org648af742012-02-08 01:57:29 +00001028 // Turn VAD on/off
niklase@google.com470e71d2011-07-07 08:21:25 +00001029 EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
1030 EXPECT_TRUE(apm_->voice_detection()->is_enabled());
1031 EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
1032 EXPECT_FALSE(apm_->voice_detection()->is_enabled());
1033
andrew@webrtc.orged083d42011-09-19 15:28:51 +00001034 // Test that AudioFrame activity is maintained when VAD is disabled.
1035 EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
1036 AudioFrame::VADActivity activity[] = {
1037 AudioFrame::kVadActive,
1038 AudioFrame::kVadPassive,
1039 AudioFrame::kVadUnknown
1040 };
1041 for (size_t i = 0; i < sizeof(activity)/sizeof(*activity); i++) {
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001042 frame_->vad_activity_ = activity[i];
andrew@webrtc.orged083d42011-09-19 15:28:51 +00001043 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001044 EXPECT_EQ(activity[i], frame_->vad_activity_);
andrew@webrtc.orged083d42011-09-19 15:28:51 +00001045 }
1046
1047 // Test that AudioFrame activity is set when VAD is enabled.
1048 EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001049 frame_->vad_activity_ = AudioFrame::kVadUnknown;
andrew@webrtc.orged083d42011-09-19 15:28:51 +00001050 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001051 EXPECT_NE(AudioFrame::kVadUnknown, frame_->vad_activity_);
andrew@webrtc.orged083d42011-09-19 15:28:51 +00001052
niklase@google.com470e71d2011-07-07 08:21:25 +00001053 // TODO(bjornv): Add tests for streamed voice; stream_has_voice()
1054}
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001055
andrew@webrtc.orgecac9b72012-05-02 00:04:10 +00001056TEST_F(ApmTest, VerifyDownMixing) {
1057 for (size_t i = 0; i < kSampleRatesSize; i++) {
1058 Init(kSampleRates[i], 2, 2, 1, false);
1059 SetFrameTo(frame_, 1000, 2000);
1060 AudioFrame mono_frame;
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001061 mono_frame.samples_per_channel_ = frame_->samples_per_channel_;
1062 mono_frame.num_channels_ = 1;
andrew@webrtc.orgecac9b72012-05-02 00:04:10 +00001063 SetFrameTo(&mono_frame, 1500);
1064 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1065 EXPECT_TRUE(FrameDataAreEqual(*frame_, mono_frame));
1066 }
1067}
1068
1069TEST_F(ApmTest, AllProcessingDisabledByDefault) {
1070 EXPECT_FALSE(apm_->echo_cancellation()->is_enabled());
1071 EXPECT_FALSE(apm_->echo_control_mobile()->is_enabled());
1072 EXPECT_FALSE(apm_->gain_control()->is_enabled());
1073 EXPECT_FALSE(apm_->high_pass_filter()->is_enabled());
1074 EXPECT_FALSE(apm_->level_estimator()->is_enabled());
1075 EXPECT_FALSE(apm_->noise_suppression()->is_enabled());
1076 EXPECT_FALSE(apm_->voice_detection()->is_enabled());
1077}
1078
1079TEST_F(ApmTest, NoProcessingWhenAllComponentsDisabled) {
1080 for (size_t i = 0; i < kSampleRatesSize; i++) {
1081 Init(kSampleRates[i], 2, 2, 2, false);
1082 SetFrameTo(frame_, 1000, 2000);
andrew@webrtc.orgae1a58b2013-01-22 04:44:30 +00001083 AudioFrame frame_copy;
1084 frame_copy.CopyFrom(*frame_);
andrew@webrtc.orgecac9b72012-05-02 00:04:10 +00001085 for (int j = 0; j < 1000; j++) {
1086 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1087 EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
1088 }
1089 }
1090}
1091
andrew@webrtc.org07bf9a02012-05-05 00:32:00 +00001092TEST_F(ApmTest, IdenticalInputChannelsResultInIdenticalOutputChannels) {
1093 EnableAllComponents();
1094
1095 for (size_t i = 0; i < kProcessSampleRatesSize; i++) {
1096 Init(kProcessSampleRates[i], 2, 2, 2, false);
1097 int analog_level = 127;
1098 while (1) {
1099 if (!ReadFrame(far_file_, revframe_)) break;
1100 CopyLeftToRightChannel(revframe_->data_, revframe_->samples_per_channel_);
1101
1102 EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
1103
1104 if (!ReadFrame(near_file_, frame_)) break;
1105 CopyLeftToRightChannel(frame_->data_, frame_->samples_per_channel_);
1106 frame_->vad_activity_ = AudioFrame::kVadUnknown;
1107
1108 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
1109 EXPECT_EQ(apm_->kNoError,
1110 apm_->echo_cancellation()->set_stream_drift_samples(0));
1111 EXPECT_EQ(apm_->kNoError,
1112 apm_->gain_control()->set_stream_analog_level(analog_level));
1113 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1114 analog_level = apm_->gain_control()->stream_analog_level();
1115
1116 VerifyChannelsAreEqual(frame_->data_, frame_->samples_per_channel_);
1117 }
1118 }
1119}
1120
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001121TEST_F(ApmTest, SplittingFilter) {
1122 // Verify the filter is not active through undistorted audio when:
1123 // 1. No components are enabled...
1124 SetFrameTo(frame_, 1000);
andrew@webrtc.orgae1a58b2013-01-22 04:44:30 +00001125 AudioFrame frame_copy;
1126 frame_copy.CopyFrom(*frame_);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001127 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1128 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1129 EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
1130
1131 // 2. Only the level estimator is enabled...
1132 SetFrameTo(frame_, 1000);
andrew@webrtc.orgae1a58b2013-01-22 04:44:30 +00001133 frame_copy.CopyFrom(*frame_);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001134 EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
1135 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1136 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1137 EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
1138 EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
1139
1140 // 3. Only VAD is enabled...
1141 SetFrameTo(frame_, 1000);
andrew@webrtc.orgae1a58b2013-01-22 04:44:30 +00001142 frame_copy.CopyFrom(*frame_);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001143 EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
1144 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1145 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1146 EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
1147 EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
1148
1149 // 4. Both VAD and the level estimator are enabled...
1150 SetFrameTo(frame_, 1000);
andrew@webrtc.orgae1a58b2013-01-22 04:44:30 +00001151 frame_copy.CopyFrom(*frame_);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001152 EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(true));
1153 EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(true));
1154 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1155 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1156 EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
1157 EXPECT_EQ(apm_->kNoError, apm_->level_estimator()->Enable(false));
1158 EXPECT_EQ(apm_->kNoError, apm_->voice_detection()->Enable(false));
1159
1160 // 5. Not using super-wb.
1161 EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(16000));
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001162 frame_->samples_per_channel_ = 160;
1163 frame_->num_channels_ = 2;
1164 frame_->sample_rate_hz_ = 16000;
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001165 // Enable AEC, which would require the filter in super-wb. We rely on the
1166 // first few frames of data being unaffected by the AEC.
1167 // TODO(andrew): This test, and the one below, rely rather tenuously on the
1168 // behavior of the AEC. Think of something more robust.
1169 EXPECT_EQ(apm_->kNoError, apm_->echo_cancellation()->Enable(true));
1170 SetFrameTo(frame_, 1000);
andrew@webrtc.orgae1a58b2013-01-22 04:44:30 +00001171 frame_copy.CopyFrom(*frame_);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001172 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
1173 EXPECT_EQ(apm_->kNoError,
1174 apm_->echo_cancellation()->set_stream_drift_samples(0));
1175 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1176 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
1177 EXPECT_EQ(apm_->kNoError,
1178 apm_->echo_cancellation()->set_stream_drift_samples(0));
1179 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1180 EXPECT_TRUE(FrameDataAreEqual(*frame_, frame_copy));
1181
1182 // Check the test is valid. We should have distortion from the filter
1183 // when AEC is enabled (which won't affect the audio).
1184 EXPECT_EQ(apm_->kNoError, apm_->set_sample_rate_hz(32000));
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001185 frame_->samples_per_channel_ = 320;
1186 frame_->num_channels_ = 2;
1187 frame_->sample_rate_hz_ = 32000;
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001188 SetFrameTo(frame_, 1000);
andrew@webrtc.orgae1a58b2013-01-22 04:44:30 +00001189 frame_copy.CopyFrom(*frame_);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001190 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
1191 EXPECT_EQ(apm_->kNoError,
1192 apm_->echo_cancellation()->set_stream_drift_samples(0));
1193 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1194 EXPECT_FALSE(FrameDataAreEqual(*frame_, frame_copy));
1195}
1196
andrew@webrtc.org7bf26462011-12-03 00:03:31 +00001197// TODO(andrew): expand test to verify output.
1198TEST_F(ApmTest, DebugDump) {
1199 const std::string filename = webrtc::test::OutputPath() + "debug.aec";
1200 EXPECT_EQ(apm_->kNullPointerError, apm_->StartDebugRecording(NULL));
1201
1202#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
1203 // Stopping without having started should be OK.
1204 EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
1205
1206 EXPECT_EQ(apm_->kNoError, apm_->StartDebugRecording(filename.c_str()));
1207 EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
1208 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
1209 EXPECT_EQ(apm_->kNoError, apm_->StopDebugRecording());
1210
1211 // Verify the file has been written.
andrew@webrtc.orgf5d8c3b2012-01-24 21:35:39 +00001212 FILE* fid = fopen(filename.c_str(), "r");
1213 ASSERT_TRUE(fid != NULL);
1214
andrew@webrtc.org7bf26462011-12-03 00:03:31 +00001215 // Clean it up.
andrew@webrtc.orgf5d8c3b2012-01-24 21:35:39 +00001216 ASSERT_EQ(0, fclose(fid));
andrew@webrtc.org7bf26462011-12-03 00:03:31 +00001217 ASSERT_EQ(0, remove(filename.c_str()));
1218#else
1219 EXPECT_EQ(apm_->kUnsupportedFunctionError,
1220 apm_->StartDebugRecording(filename.c_str()));
1221 EXPECT_EQ(apm_->kUnsupportedFunctionError, apm_->StopDebugRecording());
1222
1223 // Verify the file has NOT been written.
1224 ASSERT_TRUE(fopen(filename.c_str(), "r") == NULL);
1225#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
1226}
1227
andrew@webrtc.org75f19482012-02-09 17:16:18 +00001228// TODO(andrew): Add a test to process a few frames with different combinations
1229// of enabled components.
1230
andrew@webrtc.orge2ed5ba2012-01-20 19:06:38 +00001231// TODO(andrew): Make this test more robust such that it can be run on multiple
1232// platforms. It currently requires bit-exactness.
andrew@webrtc.org293d22b2012-01-30 22:04:26 +00001233#ifdef WEBRTC_AUDIOPROC_BIT_EXACT
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001234TEST_F(ApmTest, Process) {
1235 GOOGLE_PROTOBUF_VERIFY_VERSION;
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001236 webrtc::audioproc::OutputData ref_data;
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001237
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001238 if (!write_ref_data) {
1239 ReadMessageLiteFromFile(ref_filename_, &ref_data);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001240 } else {
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001241 // Write the desired tests to the protobuf reference file.
andrew@webrtc.orgecac9b72012-05-02 00:04:10 +00001242 for (size_t i = 0; i < kChannelsSize; i++) {
1243 for (size_t j = 0; j < kChannelsSize; j++) {
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001244 // We can't have more output than input channels.
1245 for (size_t k = 0; k <= j; k++) {
andrew@webrtc.orgecac9b72012-05-02 00:04:10 +00001246 for (size_t l = 0; l < kProcessSampleRatesSize; l++) {
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001247 webrtc::audioproc::Test* test = ref_data.add_test();
andrew@webrtc.orgecac9b72012-05-02 00:04:10 +00001248 test->set_num_reverse_channels(kChannels[i]);
1249 test->set_num_input_channels(kChannels[j]);
1250 test->set_num_output_channels(kChannels[k]);
1251 test->set_sample_rate(kProcessSampleRates[l]);
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001252 }
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001253 }
1254 }
1255 }
1256 }
1257
andrew@webrtc.org07bf9a02012-05-05 00:32:00 +00001258 EnableAllComponents();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001259
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001260 for (int i = 0; i < ref_data.test_size(); i++) {
1261 printf("Running test %d of %d...\n", i + 1, ref_data.test_size());
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001262
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001263 webrtc::audioproc::Test* test = ref_data.mutable_test(i);
1264 Init(test->sample_rate(), test->num_reverse_channels(),
1265 test->num_input_channels(), test->num_output_channels(), true);
1266
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001267 int frame_count = 0;
1268 int has_echo_count = 0;
1269 int has_voice_count = 0;
1270 int is_saturated_count = 0;
1271 int analog_level = 127;
1272 int analog_level_average = 0;
1273 int max_output_average = 0;
bjornv@webrtc.org08329f42012-07-12 21:00:43 +00001274 float ns_speech_prob_average = 0.0f;
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001275
1276 while (1) {
andrew@webrtc.org07bf9a02012-05-05 00:32:00 +00001277 if (!ReadFrame(far_file_, revframe_)) break;
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001278 EXPECT_EQ(apm_->kNoError, apm_->AnalyzeReverseStream(revframe_));
1279
andrew@webrtc.org07bf9a02012-05-05 00:32:00 +00001280 if (!ReadFrame(near_file_, frame_)) break;
1281 frame_->vad_activity_ = AudioFrame::kVadUnknown;
1282
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001283 EXPECT_EQ(apm_->kNoError, apm_->set_stream_delay_ms(0));
1284 EXPECT_EQ(apm_->kNoError,
1285 apm_->echo_cancellation()->set_stream_drift_samples(0));
1286 EXPECT_EQ(apm_->kNoError,
1287 apm_->gain_control()->set_stream_analog_level(analog_level));
1288
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001289 EXPECT_EQ(apm_->kNoError, apm_->ProcessStream(frame_));
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001290 // Ensure the frame was downmixed properly.
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001291 EXPECT_EQ(test->num_output_channels(), frame_->num_channels_);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001292
1293 max_output_average += MaxAudioFrame(*frame_);
1294
1295 if (apm_->echo_cancellation()->stream_has_echo()) {
1296 has_echo_count++;
1297 }
1298
1299 analog_level = apm_->gain_control()->stream_analog_level();
1300 analog_level_average += analog_level;
1301 if (apm_->gain_control()->stream_is_saturated()) {
1302 is_saturated_count++;
1303 }
1304 if (apm_->voice_detection()->stream_has_voice()) {
1305 has_voice_count++;
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001306 EXPECT_EQ(AudioFrame::kVadActive, frame_->vad_activity_);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001307 } else {
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001308 EXPECT_EQ(AudioFrame::kVadPassive, frame_->vad_activity_);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001309 }
1310
bjornv@webrtc.org08329f42012-07-12 21:00:43 +00001311 ns_speech_prob_average += apm_->noise_suppression()->speech_probability();
1312
andrew@webrtc.org07bf9a02012-05-05 00:32:00 +00001313 size_t frame_size = frame_->samples_per_channel_ * frame_->num_channels_;
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001314 size_t write_count = fwrite(frame_->data_,
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001315 sizeof(int16_t),
1316 frame_size,
1317 out_file_);
1318 ASSERT_EQ(frame_size, write_count);
1319
1320 // Reset in case of downmixing.
andrew@webrtc.org63a50982012-05-02 23:56:37 +00001321 frame_->num_channels_ = test->num_input_channels();
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001322 frame_count++;
1323 }
1324 max_output_average /= frame_count;
1325 analog_level_average /= frame_count;
bjornv@webrtc.org08329f42012-07-12 21:00:43 +00001326 ns_speech_prob_average /= frame_count;
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001327
andrew@webrtc.org293d22b2012-01-30 22:04:26 +00001328#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001329 EchoCancellation::Metrics echo_metrics;
1330 EXPECT_EQ(apm_->kNoError,
1331 apm_->echo_cancellation()->GetMetrics(&echo_metrics));
1332 int median = 0;
1333 int std = 0;
1334 EXPECT_EQ(apm_->kNoError,
1335 apm_->echo_cancellation()->GetDelayMetrics(&median, &std));
1336
1337 int rms_level = apm_->level_estimator()->RMS();
1338 EXPECT_LE(0, rms_level);
1339 EXPECT_GE(127, rms_level);
1340#endif
1341
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001342 if (!write_ref_data) {
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001343 EXPECT_EQ(test->has_echo_count(), has_echo_count);
1344 EXPECT_EQ(test->has_voice_count(), has_voice_count);
1345 EXPECT_EQ(test->is_saturated_count(), is_saturated_count);
1346
1347 EXPECT_EQ(test->analog_level_average(), analog_level_average);
1348 EXPECT_EQ(test->max_output_average(), max_output_average);
1349
andrew@webrtc.org293d22b2012-01-30 22:04:26 +00001350#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001351 webrtc::audioproc::Test::EchoMetrics reference =
1352 test->echo_metrics();
1353 TestStats(echo_metrics.residual_echo_return_loss,
1354 reference.residual_echo_return_loss());
1355 TestStats(echo_metrics.echo_return_loss,
1356 reference.echo_return_loss());
1357 TestStats(echo_metrics.echo_return_loss_enhancement,
1358 reference.echo_return_loss_enhancement());
1359 TestStats(echo_metrics.a_nlp,
1360 reference.a_nlp());
1361
1362 webrtc::audioproc::Test::DelayMetrics reference_delay =
1363 test->delay_metrics();
andrew@webrtc.org828af1b2011-11-22 22:40:27 +00001364 EXPECT_EQ(reference_delay.median(), median);
1365 EXPECT_EQ(reference_delay.std(), std);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001366
1367 EXPECT_EQ(test->rms_level(), rms_level);
bjornv@webrtc.org08329f42012-07-12 21:00:43 +00001368
1369 EXPECT_FLOAT_EQ(test->ns_speech_probability_average(),
1370 ns_speech_prob_average);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001371#endif
1372 } else {
1373 test->set_has_echo_count(has_echo_count);
1374 test->set_has_voice_count(has_voice_count);
1375 test->set_is_saturated_count(is_saturated_count);
1376
1377 test->set_analog_level_average(analog_level_average);
1378 test->set_max_output_average(max_output_average);
1379
andrew@webrtc.org293d22b2012-01-30 22:04:26 +00001380#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001381 webrtc::audioproc::Test::EchoMetrics* message =
1382 test->mutable_echo_metrics();
1383 WriteStatsMessage(echo_metrics.residual_echo_return_loss,
1384 message->mutable_residual_echo_return_loss());
1385 WriteStatsMessage(echo_metrics.echo_return_loss,
1386 message->mutable_echo_return_loss());
1387 WriteStatsMessage(echo_metrics.echo_return_loss_enhancement,
1388 message->mutable_echo_return_loss_enhancement());
1389 WriteStatsMessage(echo_metrics.a_nlp,
1390 message->mutable_a_nlp());
1391
1392 webrtc::audioproc::Test::DelayMetrics* message_delay =
1393 test->mutable_delay_metrics();
1394 message_delay->set_median(median);
1395 message_delay->set_std(std);
1396
1397 test->set_rms_level(rms_level);
bjornv@webrtc.org08329f42012-07-12 21:00:43 +00001398
1399 EXPECT_LE(0.0f, ns_speech_prob_average);
1400 EXPECT_GE(1.0f, ns_speech_prob_average);
1401 test->set_ns_speech_probability_average(ns_speech_prob_average);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001402#endif
1403 }
1404
1405 rewind(far_file_);
1406 rewind(near_file_);
1407 }
1408
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001409 if (write_ref_data) {
1410 WriteMessageLiteToFile(ref_filename_, ref_data);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +00001411 }
1412}
andrew@webrtc.org293d22b2012-01-30 22:04:26 +00001413#endif // WEBRTC_AUDIOPROC_BIT_EXACT
andrew@webrtc.orge2ed5ba2012-01-20 19:06:38 +00001414
niklase@google.com470e71d2011-07-07 08:21:25 +00001415} // namespace
1416
1417int main(int argc, char** argv) {
niklase@google.com470e71d2011-07-07 08:21:25 +00001418 for (int i = 1; i < argc; i++) {
andrew@webrtc.orgdaacee82012-02-07 00:01:04 +00001419 if (strcmp(argv[i], "--write_ref_data") == 0) {
1420 write_ref_data = true;
niklase@google.com470e71d2011-07-07 08:21:25 +00001421 }
1422 }
1423
andrew@webrtc.org28d01402012-10-18 00:42:32 +00001424 // We don't use TestSuite here because it would require the Android platform
1425 // build to depend on Gmock.
1426 webrtc::test::SetExecutablePath(argv[0]);
1427 testing::InitGoogleTest(&argc, argv);
1428 int result = RUN_ALL_TESTS();
andrew@webrtc.org64235092011-08-19 21:22:08 +00001429 // Optional, but removes memory leak noise from Valgrind.
1430 google::protobuf::ShutdownProtobufLibrary();
andrew@webrtc.org28d01402012-10-18 00:42:32 +00001431 return result;
niklase@google.com470e71d2011-07-07 08:21:25 +00001432}