Only reinitialize AudioProcessing when needed.
This takes away the burden from the user, resulting in cleaner code.
Review URL: https://webrtc-codereview.appspot.com/941005
git-svn-id: http://webrtc.googlecode.com/svn/trunk@3010 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/webrtc/modules/audio_processing/test/process_test.cc b/webrtc/modules/audio_processing/test/process_test.cc
index 57f1719..e68d1e9 100644
--- a/webrtc/modules/audio_processing/test/process_test.cc
+++ b/webrtc/modules/audio_processing/test/process_test.cc
@@ -8,12 +8,15 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#include <math.h>
#include <stdio.h>
#include <string.h>
#ifdef WEBRTC_ANDROID
#include <sys/stat.h>
#endif
+#include <algorithm>
+
#include "gtest/gtest.h"
#include "audio_processing.h"
@@ -131,6 +134,22 @@
printf(" --debug_file FILE Dump a debug recording.\n");
}
+static double MicLevel2Gain(int level) {
+ return pow(10.0, ((level - 127.0) / 128.0 * 80.) / 20.);
+}
+
+static void SimulateMic(int mic_level, AudioFrame* frame) {
+ mic_level = std::min(std::max(mic_level, 0), 255);
+ double mic_gain = MicLevel2Gain(mic_level);
+ int num_samples = frame->samples_per_channel_ * frame->num_channels_;
+ double v;
+ for (int n = 0; n < num_samples; n++) {
+ v = floor(frame->data_[n] * mic_gain + 0.5);
+ v = std::max(std::min(32767., v), -32768.);
+ frame->data_[n] = static_cast<int16_t>(v);
+ }
+}
+
// void function for gtest.
void void_main(int argc, char* argv[]) {
if (argc > 1 && strcmp(argv[1], "--help") == 0) {
@@ -658,6 +677,10 @@
fflush(stdout);
}
+ if (apm->gain_control()->mode() == GainControl::kAdaptiveAnalog) {
+ SimulateMic(capture_level, &near_frame);
+ }
+
if (perf_testing) {
t0 = TickTime::Now();
}
@@ -862,6 +885,10 @@
fread(&drift_samples, sizeof(drift_samples), 1, drift_file));
}
+ if (apm->gain_control()->mode() == GainControl::kAdaptiveAnalog) {
+ SimulateMic(capture_level, &near_frame);
+ }
+
if (perf_testing) {
t0 = TickTime::Now();
}