Add a wrapper around PushSincResampler and the old Resampler.

The old resampler is used whenever it supports the requested rates. Otherwise
the sinc resampler is enabled.

Integrated with output_mixer in order to test the change through
output_mixer_unittest. The sinc resampler will not yet be used, since we don't
feed VoE with any rates that trigger it.

BUG=webrtc:1395
R=bjornv@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/1355004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@3915 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/webrtc/voice_engine/output_mixer.cc b/webrtc/voice_engine/output_mixer.cc
index a124564..a8e4177 100644
--- a/webrtc/voice_engine/output_mixer.cc
+++ b/webrtc/voice_engine/output_mixer.cc
@@ -8,16 +8,16 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "output_mixer.h"
+#include "webrtc/voice_engine/output_mixer.h"
 
-#include "audio_processing.h"
-#include "audio_frame_operations.h"
-#include "critical_section_wrapper.h"
-#include "file_wrapper.h"
-#include "output_mixer_internal.h"
-#include "statistics.h"
-#include "trace.h"
-#include "voe_external_media.h"
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/modules/utility/interface/audio_frame_operations.h"
+#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
+#include "webrtc/system_wrappers/interface/file_wrapper.h"
+#include "webrtc/system_wrappers/interface/trace.h"
+#include "webrtc/voice_engine/include/voe_external_media.h"
+#include "webrtc/voice_engine/output_mixer_internal.h"
+#include "webrtc/voice_engine/statistics.h"
 
 namespace webrtc {
 
@@ -528,7 +528,7 @@
   frame->sample_rate_hz_ = sample_rate_hz;
   // TODO(andrew): Ideally the downmixing would occur much earlier, in
   // AudioCodingModule.
-  return RemixAndResample(_audioFrame, &_resampler, frame);
+  return RemixAndResample(_audioFrame, &resampler_, frame);
 }
 
 int32_t
@@ -602,7 +602,7 @@
   AudioFrame frame;
   frame.num_channels_ = 1;
   frame.sample_rate_hz_ = _audioProcessingModulePtr->sample_rate_hz();
-  if (RemixAndResample(_audioFrame, &_apmResampler, &frame) == -1)
+  if (RemixAndResample(_audioFrame, &audioproc_resampler_, &frame) == -1)
     return;
 
   if (_audioProcessingModulePtr->AnalyzeReverseStream(&frame) == -1) {
diff --git a/webrtc/voice_engine/output_mixer.h b/webrtc/voice_engine/output_mixer.h
index e2ca366..b98f88e 100644
--- a/webrtc/voice_engine/output_mixer.h
+++ b/webrtc/voice_engine/output_mixer.h
@@ -11,14 +11,14 @@
 #ifndef WEBRTC_VOICE_ENGINE_OUTPUT_MIXER_H_
 #define WEBRTC_VOICE_ENGINE_OUTPUT_MIXER_H_
 
-#include "audio_conference_mixer.h"
-#include "audio_conference_mixer_defines.h"
-#include "common_types.h"
-#include "dtmf_inband.h"
-#include "file_recorder.h"
-#include "level_indicator.h"
-#include "resampler.h"
-#include "voice_engine_defines.h"
+#include "webrtc/common_audio/resampler/include/push_resampler.h"
+#include "webrtc/common_types.h"
+#include "webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer.h"
+#include "webrtc/modules/audio_conference_mixer/interface/audio_conference_mixer_defines.h"
+#include "webrtc/modules/utility/interface/file_recorder.h"
+#include "webrtc/voice_engine/dtmf_inband.h"
+#include "webrtc/voice_engine/level_indicator.h"
+#include "webrtc/voice_engine/voice_engine_defines.h"
 
 namespace webrtc {
 
@@ -133,8 +133,8 @@
     CriticalSectionWrapper& _fileCritSect;
     AudioConferenceMixer& _mixerModule;
     AudioFrame _audioFrame;
-    Resampler _resampler;        // converts mixed audio to fit ADM format
-    Resampler _apmResampler;    // converts mixed audio to fit APM rate
+    PushResampler resampler_;  // converts mixed audio to fit ADM format
+    PushResampler audioproc_resampler_;  // converts mixed audio to fit APM rate
     AudioLevel _audioLevel;    // measures audio level for the combined signal
     DtmfInband _dtmfGenerator;
     int _instanceId;
diff --git a/webrtc/voice_engine/output_mixer_internal.cc b/webrtc/voice_engine/output_mixer_internal.cc
index dfa7d95..55eedb3 100644
--- a/webrtc/voice_engine/output_mixer_internal.cc
+++ b/webrtc/voice_engine/output_mixer_internal.cc
@@ -8,18 +8,19 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#include "output_mixer_internal.h"
+#include "webrtc/voice_engine/output_mixer_internal.h"
 
-#include "audio_frame_operations.h"
-#include "common_audio/resampler/include/resampler.h"
-#include "module_common_types.h"
-#include "trace.h"
+#include "webrtc/common_audio/resampler/include/push_resampler.h"
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/utility/interface/audio_frame_operations.h"
+#include "webrtc/system_wrappers/interface/logging.h"
+#include "webrtc/system_wrappers/interface/trace.h"
 
 namespace webrtc {
 namespace voe {
 
 int RemixAndResample(const AudioFrame& src_frame,
-                     Resampler* resampler,
+                     PushResampler* resampler,
                      AudioFrame* dst_frame) {
   const int16_t* audio_ptr = src_frame.data_;
   int audio_ptr_num_channels = src_frame.num_channels_;
@@ -34,30 +35,26 @@
     audio_ptr_num_channels = 1;
   }
 
-  const ResamplerType resampler_type = audio_ptr_num_channels == 1 ?
-      kResamplerSynchronous : kResamplerSynchronousStereo;
-  if (resampler->ResetIfNeeded(src_frame.sample_rate_hz_,
-                               dst_frame->sample_rate_hz_,
-                               resampler_type) == -1) {
+  if (resampler->InitializeIfNeeded(src_frame.sample_rate_hz_,
+                                    dst_frame->sample_rate_hz_,
+                                    audio_ptr_num_channels) == -1) {
     dst_frame->CopyFrom(src_frame);
-    WEBRTC_TRACE(kTraceError, kTraceVoice, -1,
-                "%s ResetIfNeeded failed", __FUNCTION__);
+    LOG_FERR3(LS_ERROR, InitializeIfNeeded, src_frame.sample_rate_hz_,
+              dst_frame->sample_rate_hz_, audio_ptr_num_channels);
     return -1;
   }
 
-  int out_length = 0;
-  if (resampler->Push(audio_ptr,
-                      src_frame.samples_per_channel_* audio_ptr_num_channels,
-                      dst_frame->data_,
-                      AudioFrame::kMaxDataSizeSamples,
-                      out_length) == 0) {
-    dst_frame->samples_per_channel_ = out_length / audio_ptr_num_channels;
-  } else {
+  const int src_length = src_frame.samples_per_channel_ *
+                         audio_ptr_num_channels;
+  int out_length = resampler->Resample(audio_ptr, src_length, dst_frame->data_,
+                                       AudioFrame::kMaxDataSizeSamples);
+  if (out_length == -1) {
     dst_frame->CopyFrom(src_frame);
-    WEBRTC_TRACE(kTraceError, kTraceVoice, -1,
-                 "%s resampling failed", __FUNCTION__);
+    LOG_FERR3(LS_ERROR, Resample, src_length, dst_frame->data_,
+              AudioFrame::kMaxDataSizeSamples);
     return -1;
   }
+  dst_frame->samples_per_channel_ = out_length / audio_ptr_num_channels;
 
   // Upmix after resampling.
   if (src_frame.num_channels_ == 1 && dst_frame->num_channels_ == 2) {
diff --git a/webrtc/voice_engine/output_mixer_internal.h b/webrtc/voice_engine/output_mixer_internal.h
index 8d23a14..88a3a5b 100644
--- a/webrtc/voice_engine/output_mixer_internal.h
+++ b/webrtc/voice_engine/output_mixer_internal.h
@@ -14,7 +14,7 @@
 namespace webrtc {
 
 class AudioFrame;
-class Resampler;
+class PushResampler;
 
 namespace voe {
 
@@ -24,7 +24,7 @@
 //
 // On failure, returns -1 and copies |src_frame| to |dst_frame|.
 int RemixAndResample(const AudioFrame& src_frame,
-                     Resampler* resampler,
+                     PushResampler* resampler,
                      AudioFrame* dst_frame);
 
 }  // namespace voe
diff --git a/webrtc/voice_engine/output_mixer_unittest.cc b/webrtc/voice_engine/output_mixer_unittest.cc
index dbcb251..24d3917 100644
--- a/webrtc/voice_engine/output_mixer_unittest.cc
+++ b/webrtc/voice_engine/output_mixer_unittest.cc
@@ -10,10 +10,9 @@
 
 #include <math.h>
 
-#include "gtest/gtest.h"
-
-#include "output_mixer.h"
-#include "output_mixer_internal.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/voice_engine/output_mixer.h"
+#include "webrtc/voice_engine/output_mixer_internal.h"
 
 namespace webrtc {
 namespace voe {
@@ -32,7 +31,7 @@
   void RunResampleTest(int src_channels, int src_sample_rate_hz,
                        int dst_channels, int dst_sample_rate_hz);
 
-  Resampler resampler_;
+  PushResampler resampler_;
   AudioFrame src_frame_;
   AudioFrame dst_frame_;
   AudioFrame golden_frame_;
@@ -42,6 +41,7 @@
 // used so non-integer values result in rounding error, but not an accumulating
 // error.
 void SetMonoFrame(AudioFrame* frame, float data, int sample_rate_hz) {
+  memset(frame->data_, 0, sizeof(frame->data_));
   frame->num_channels_ = 1;
   frame->sample_rate_hz_ = sample_rate_hz;
   frame->samples_per_channel_ = sample_rate_hz / 100;
@@ -59,6 +59,7 @@
 // each channel respectively.
 void SetStereoFrame(AudioFrame* frame, float left, float right,
                     int sample_rate_hz) {
+  memset(frame->data_, 0, sizeof(frame->data_));
   frame->num_channels_ = 2;
   frame->sample_rate_hz_ = sample_rate_hz;
   frame->samples_per_channel_ = sample_rate_hz / 100;
@@ -80,13 +81,14 @@
 }
 
 // Computes the best SNR based on the error between |ref_frame| and
-// |test_frame|. It allows for up to a 30 sample delay between the signals to
-// compensate for the resampling delay.
-float ComputeSNR(const AudioFrame& ref_frame, const AudioFrame& test_frame) {
+// |test_frame|. It allows for up to a |max_delay| in samples between the
+// signals to compensate for the resampling delay.
+float ComputeSNR(const AudioFrame& ref_frame, const AudioFrame& test_frame,
+                 int max_delay) {
   VerifyParams(ref_frame, test_frame);
   float best_snr = 0;
   int best_delay = 0;
-  for (int delay = 0; delay < 30; delay++) {
+  for (int delay = 0; delay <= max_delay; delay++) {
     float mse = 0;
     float variance = 0;
     for (int i = 0; i < ref_frame.samples_per_channel_ *
@@ -120,14 +122,14 @@
                                       int src_sample_rate_hz,
                                       int dst_channels,
                                       int dst_sample_rate_hz) {
-  Resampler resampler;  // Create a new one with every test.
-  const int16_t kSrcLeft = 60;  // Shouldn't overflow for any used sample rate.
-  const int16_t kSrcRight = 30;
-  const float kResamplingFactor = (1.0 * src_sample_rate_hz) /
+  PushResampler resampler;  // Create a new one with every test.
+  const int16_t kSrcLeft = 30;  // Shouldn't overflow for any used sample rate.
+  const int16_t kSrcRight = 15;
+  const float resampling_factor = (1.0 * src_sample_rate_hz) /
       dst_sample_rate_hz;
-  const float kDstLeft = kResamplingFactor * kSrcLeft;
-  const float kDstRight = kResamplingFactor * kSrcRight;
-  const float kDstMono = (kDstLeft + kDstRight) / 2;
+  const float dst_left = resampling_factor * kSrcLeft;
+  const float dst_right = resampling_factor * kSrcRight;
+  const float dst_mono = (dst_left + dst_right) / 2;
   if (src_channels == 1)
     SetMonoFrame(&src_frame_, kSrcLeft, src_sample_rate_hz);
   else
@@ -136,27 +138,27 @@
   if (dst_channels == 1) {
     SetMonoFrame(&dst_frame_, 0, dst_sample_rate_hz);
     if (src_channels == 1)
-      SetMonoFrame(&golden_frame_, kDstLeft, dst_sample_rate_hz);
+      SetMonoFrame(&golden_frame_, dst_left, dst_sample_rate_hz);
     else
-      SetMonoFrame(&golden_frame_, kDstMono, dst_sample_rate_hz);
+      SetMonoFrame(&golden_frame_, dst_mono, dst_sample_rate_hz);
   } else {
     SetStereoFrame(&dst_frame_, 0, 0, dst_sample_rate_hz);
     if (src_channels == 1)
-      SetStereoFrame(&golden_frame_, kDstLeft, kDstLeft, dst_sample_rate_hz);
+      SetStereoFrame(&golden_frame_, dst_left, dst_left, dst_sample_rate_hz);
     else
-      SetStereoFrame(&golden_frame_, kDstLeft, kDstRight, dst_sample_rate_hz);
+      SetStereoFrame(&golden_frame_, dst_left, dst_right, dst_sample_rate_hz);
   }
 
+  // The sinc resampler has a known delay, which we compute here. Multiplying by
+  // two gives us a crude maximum for any resampling, as the old resampler
+  // typically (but not always) has lower delay.
+  static const int kInputKernelDelaySamples = 16;
+  const int max_delay = static_cast<double>(dst_sample_rate_hz)
+      / src_sample_rate_hz * kInputKernelDelaySamples * dst_channels * 2;
   printf("(%d, %d Hz) -> (%d, %d Hz) ",  // SNR reported on the same line later.
       src_channels, src_sample_rate_hz, dst_channels, dst_sample_rate_hz);
   EXPECT_EQ(0, RemixAndResample(src_frame_, &resampler, &dst_frame_));
-  EXPECT_GT(ComputeSNR(golden_frame_, dst_frame_), 40.0f);
-}
-
-TEST_F(OutputMixerTest, RemixAndResampleFailsWithBadSampleRate) {
-  SetMonoFrame(&dst_frame_, 10, 44100);
-  EXPECT_EQ(-1, RemixAndResample(src_frame_, &resampler_, &dst_frame_));
-  VerifyFramesAreEqual(src_frame_, dst_frame_);
+  EXPECT_GT(ComputeSNR(golden_frame_, dst_frame_, max_delay), 39.0f);
 }
 
 TEST_F(OutputMixerTest, RemixAndResampleCopyFrameSucceeds) {
@@ -190,10 +192,9 @@
 }
 
 TEST_F(OutputMixerTest, RemixAndResampleSucceeds) {
-  // We don't attempt to be exhaustive here, but just get good coverage. Some
-  // combinations of rates will not be resampled, and some give an odd
-  // resampling factor which makes it more difficult to evaluate.
-  const int kSampleRates[] = {16000, 32000, 48000};
+  // TODO(ajm): convert this to the parameterized TEST_P style used in
+  // sinc_resampler_unittest.cc. We can then easily add tighter SNR thresholds.
+  const int kSampleRates[] = {8000, 16000, 32000, 44100, 48000, 96000};
   const int kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
   const int kChannels[] = {1, 2};
   const int kChannelsSize = sizeof(kChannels) / sizeof(*kChannels);