audio_coding: remove "main" directory

This is the last piece of the old directory layout of the modules.

Duplicated header files are left in audio_coding/main/include until
downstream code is updated to the new location. They have pragma
warnings added to them and identical header guards as the new headers to avoid breaking things.

BUG=webrtc:5095
TESTED=Passing compile-trybots with --clobber flag:
git cl try --clobber --bot=win_compile_rel --bot=linux_compile_rel --bot=android_compile_rel --bot=mac_compile_rel --bot=ios_rel --bot=linux_gn_rel --bot=win_x64_gn_rel --bot=mac_x64_gn_rel --bot=android_gn_rel -m tryserver.webrtc
NOTRY=True
NOPRESUBMIT=True

Review URL: https://codereview.webrtc.org/1481493004

Cr-Commit-Position: refs/heads/master@{#10803}
diff --git a/webrtc/modules/audio_coding/acm2/acm_codec_database.cc b/webrtc/modules/audio_coding/acm2/acm_codec_database.cc
new file mode 100644
index 0000000..8d4072f
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/acm_codec_database.cc
@@ -0,0 +1,331 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This file generates databases with information about all supported audio
+ * codecs.
+ */
+
+// TODO(tlegrand): Change constant input pointers in all functions to constant
+// references, where appropriate.
+#include "webrtc/modules/audio_coding/acm2/acm_codec_database.h"
+
+#include <assert.h>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/modules/audio_coding/acm2/acm_common_defs.h"
+#include "webrtc/system_wrappers/include/trace.h"
+
+namespace webrtc {
+
+namespace acm2 {
+
+namespace {
+
+// Checks if the bitrate is valid for iSAC.
+bool IsISACRateValid(int rate) {
+  return (rate == -1) || ((rate <= 56000) && (rate >= 10000));
+}
+
+// Checks if the bitrate is valid for iLBC.
+bool IsILBCRateValid(int rate, int frame_size_samples) {
+  if (((frame_size_samples == 240) || (frame_size_samples == 480)) &&
+      (rate == 13300)) {
+    return true;
+  } else if (((frame_size_samples == 160) || (frame_size_samples == 320)) &&
+      (rate == 15200)) {
+    return true;
+  } else {
+    return false;
+  }
+}
+
+// Checks if the bitrate is valid for Opus.
+bool IsOpusRateValid(int rate) {
+  return (rate >= 6000) && (rate <= 510000);
+}
+
+}  // namespace
+
+// Not yet used payload-types.
+// 83,  82,  81, 80, 79,  78,  77,  76,  75,  74,  73,  72,  71,  70,  69, 68,
+// 67, 66, 65
+
+const CodecInst ACMCodecDB::database_[] = {
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
+  {103, "ISAC", 16000, kIsacPacSize480, 1, kIsacWbDefaultRate},
+# if (defined(WEBRTC_CODEC_ISAC))
+  {104, "ISAC", 32000, kIsacPacSize960, 1, kIsacSwbDefaultRate},
+# endif
+#endif
+  // Mono
+  {107, "L16", 8000, 80, 1, 128000},
+  {108, "L16", 16000, 160, 1, 256000},
+  {109, "L16", 32000, 320, 1, 512000},
+  // Stereo
+  {111, "L16", 8000, 80, 2, 128000},
+  {112, "L16", 16000, 160, 2, 256000},
+  {113, "L16", 32000, 320, 2, 512000},
+  // G.711, PCM mu-law and A-law.
+  // Mono
+  {0, "PCMU", 8000, 160, 1, 64000},
+  {8, "PCMA", 8000, 160, 1, 64000},
+  // Stereo
+  {110, "PCMU", 8000, 160, 2, 64000},
+  {118, "PCMA", 8000, 160, 2, 64000},
+#ifdef WEBRTC_CODEC_ILBC
+  {102, "ILBC", 8000, 240, 1, 13300},
+#endif
+#ifdef WEBRTC_CODEC_G722
+  // Mono
+  {9, "G722", 16000, 320, 1, 64000},
+  // Stereo
+  {119, "G722", 16000, 320, 2, 64000},
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+  // Opus internally supports 48, 24, 16, 12, 8 kHz.
+  // Mono and stereo.
+  {120, "opus", 48000, 960, 2, 64000},
+#endif
+  // Comfort noise for four different sampling frequencies.
+  {13, "CN", 8000, 240, 1, 0},
+  {98, "CN", 16000, 480, 1, 0},
+  {99, "CN", 32000, 960, 1, 0},
+#ifdef ENABLE_48000_HZ
+  {100, "CN", 48000, 1440, 1, 0},
+#endif
+  {106, "telephone-event", 8000, 240, 1, 0},
+#ifdef WEBRTC_CODEC_RED
+  {127, "red", 8000, 0, 1, 0},
+#endif
+  // To prevent compile errors due to trailing commas.
+  {-1, "Null", -1, -1, -1, -1}
+};
+
+// Create database with all codec settings at compile time.
+// Each entry needs the following parameters in the given order:
+// Number of allowed packet sizes, a vector with the allowed packet sizes,
+// Basic block samples, max number of channels that are supported.
+const ACMCodecDB::CodecSettings ACMCodecDB::codec_settings_[] = {
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
+    {2, {kIsacPacSize480, kIsacPacSize960}, 0, 1},
+# if (defined(WEBRTC_CODEC_ISAC))
+    {1, {kIsacPacSize960}, 0, 1},
+# endif
+#endif
+    // Mono
+    {4, {80, 160, 240, 320}, 0, 2},
+    {4, {160, 320, 480, 640}, 0, 2},
+    {2, {320, 640}, 0, 2},
+    // Stereo
+    {4, {80, 160, 240, 320}, 0, 2},
+    {4, {160, 320, 480, 640}, 0, 2},
+    {2, {320, 640}, 0, 2},
+    // G.711, PCM mu-law and A-law.
+    // Mono
+    {6, {80, 160, 240, 320, 400, 480}, 0, 2},
+    {6, {80, 160, 240, 320, 400, 480}, 0, 2},
+    // Stereo
+    {6, {80, 160, 240, 320, 400, 480}, 0, 2},
+    {6, {80, 160, 240, 320, 400, 480}, 0, 2},
+#ifdef WEBRTC_CODEC_ILBC
+    {4, {160, 240, 320, 480}, 0, 1},
+#endif
+#ifdef WEBRTC_CODEC_G722
+    // Mono
+    {6, {160, 320, 480, 640, 800, 960}, 0, 2},
+    // Stereo
+    {6, {160, 320, 480, 640, 800, 960}, 0, 2},
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+    // Opus supports frames shorter than 10ms,
+    // but it doesn't help us to use them.
+    // Mono and stereo.
+    {4, {480, 960, 1920, 2880}, 0, 2},
+#endif
+    // Comfort noise for three different sampling frequencies.
+    {1, {240}, 240, 1},
+    {1, {480}, 480, 1},
+    {1, {960}, 960, 1},
+#ifdef ENABLE_48000_HZ
+    {1, {1440}, 1440, 1},
+#endif
+    {1, {240}, 240, 1},
+#ifdef WEBRTC_CODEC_RED
+    {1, {0}, 0, 1},
+#endif
+    // To prevent compile errors due to trailing commas.
+    {-1, {-1}, -1, -1}
+};
+
+// Create a database of all NetEQ decoders at compile time.
+const NetEqDecoder ACMCodecDB::neteq_decoders_[] = {
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
+    NetEqDecoder::kDecoderISAC,
+# if (defined(WEBRTC_CODEC_ISAC))
+    NetEqDecoder::kDecoderISACswb,
+# endif
+#endif
+    // Mono
+    NetEqDecoder::kDecoderPCM16B, NetEqDecoder::kDecoderPCM16Bwb,
+    NetEqDecoder::kDecoderPCM16Bswb32kHz,
+    // Stereo
+    NetEqDecoder::kDecoderPCM16B_2ch, NetEqDecoder::kDecoderPCM16Bwb_2ch,
+    NetEqDecoder::kDecoderPCM16Bswb32kHz_2ch,
+    // G.711, PCM mu-las and A-law.
+    // Mono
+    NetEqDecoder::kDecoderPCMu, NetEqDecoder::kDecoderPCMa,
+    // Stereo
+    NetEqDecoder::kDecoderPCMu_2ch, NetEqDecoder::kDecoderPCMa_2ch,
+#ifdef WEBRTC_CODEC_ILBC
+    NetEqDecoder::kDecoderILBC,
+#endif
+#ifdef WEBRTC_CODEC_G722
+    // Mono
+    NetEqDecoder::kDecoderG722,
+    // Stereo
+    NetEqDecoder::kDecoderG722_2ch,
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+    // Mono and stereo.
+    NetEqDecoder::kDecoderOpus,
+#endif
+    // Comfort noise for three different sampling frequencies.
+    NetEqDecoder::kDecoderCNGnb, NetEqDecoder::kDecoderCNGwb,
+    NetEqDecoder::kDecoderCNGswb32kHz,
+#ifdef ENABLE_48000_HZ
+    NetEqDecoder::kDecoderCNGswb48kHz,
+#endif
+    NetEqDecoder::kDecoderAVT,
+#ifdef WEBRTC_CODEC_RED
+    NetEqDecoder::kDecoderRED,
+#endif
+};
+
+// Enumerator for error codes when asking for codec database id.
+enum {
+  kInvalidCodec = -10,
+  kInvalidPayloadtype = -30,
+  kInvalidPacketSize = -40,
+  kInvalidRate = -50
+};
+
+// Gets the codec id number from the database. If there is some mismatch in
+// the codec settings, the function will return an error code.
+// NOTE! The first mismatch found will generate the return value.
+int ACMCodecDB::CodecNumber(const CodecInst& codec_inst) {
+  // Look for a matching codec in the database.
+  int codec_id = CodecId(codec_inst);
+
+  // Checks if we found a matching codec.
+  if (codec_id == -1) {
+    return kInvalidCodec;
+  }
+
+  // Checks the validity of payload type
+  if (!RentACodec::IsPayloadTypeValid(codec_inst.pltype)) {
+    return kInvalidPayloadtype;
+  }
+
+  // Comfort Noise is special case, packet-size & rate is not checked.
+  if (STR_CASE_CMP(database_[codec_id].plname, "CN") == 0) {
+    return codec_id;
+  }
+
+  // RED is special case, packet-size & rate is not checked.
+  if (STR_CASE_CMP(database_[codec_id].plname, "red") == 0) {
+    return codec_id;
+  }
+
+  // Checks the validity of packet size.
+  if (codec_settings_[codec_id].num_packet_sizes > 0) {
+    bool packet_size_ok = false;
+    int i;
+    int packet_size_samples;
+    for (i = 0; i < codec_settings_[codec_id].num_packet_sizes; i++) {
+      packet_size_samples =
+          codec_settings_[codec_id].packet_sizes_samples[i];
+      if (codec_inst.pacsize == packet_size_samples) {
+        packet_size_ok = true;
+        break;
+      }
+    }
+
+    if (!packet_size_ok) {
+      return kInvalidPacketSize;
+    }
+  }
+
+  if (codec_inst.pacsize < 1) {
+    return kInvalidPacketSize;
+  }
+
+  // Check the validity of rate. Codecs with multiple rates have their own
+  // function for this.
+  if (STR_CASE_CMP("isac", codec_inst.plname) == 0) {
+    return IsISACRateValid(codec_inst.rate) ? codec_id : kInvalidRate;
+  } else if (STR_CASE_CMP("ilbc", codec_inst.plname) == 0) {
+    return IsILBCRateValid(codec_inst.rate, codec_inst.pacsize)
+        ? codec_id : kInvalidRate;
+  } else if (STR_CASE_CMP("opus", codec_inst.plname) == 0) {
+    return IsOpusRateValid(codec_inst.rate)
+        ? codec_id : kInvalidRate;
+  }
+
+  return database_[codec_id].rate == codec_inst.rate ? codec_id : kInvalidRate;
+}
+
+// Looks for a matching payload name, frequency, and channels in the
+// codec list. Need to check all three since some codecs have several codec
+// entries with different frequencies and/or channels.
+// Does not check other codec settings, such as payload type and packet size.
+// Returns the id of the codec, or -1 if no match is found.
+int ACMCodecDB::CodecId(const CodecInst& codec_inst) {
+  return (CodecId(codec_inst.plname, codec_inst.plfreq,
+                  codec_inst.channels));
+}
+
+int ACMCodecDB::CodecId(const char* payload_name, int frequency, int channels) {
+  for (const CodecInst& ci : RentACodec::Database()) {
+    bool name_match = false;
+    bool frequency_match = false;
+    bool channels_match = false;
+
+    // Payload name, sampling frequency and number of channels need to match.
+    // NOTE! If |frequency| is -1, the frequency is not applicable, and is
+    // always treated as true, like for RED.
+    name_match = (STR_CASE_CMP(ci.plname, payload_name) == 0);
+    frequency_match = (frequency == ci.plfreq) || (frequency == -1);
+    // The number of channels must match for all codecs but Opus.
+    if (STR_CASE_CMP(payload_name, "opus") != 0) {
+      channels_match = (channels == ci.channels);
+    } else {
+      // For opus we just check that number of channels is valid.
+      channels_match = (channels == 1 || channels == 2);
+    }
+
+    if (name_match && frequency_match && channels_match) {
+      // We have found a matching codec in the list.
+      return &ci - RentACodec::Database().data();
+    }
+  }
+
+  // We didn't find a matching codec.
+  return -1;
+}
+// Gets codec id number from database for the receiver.
+int ACMCodecDB::ReceiverCodecNumber(const CodecInst& codec_inst) {
+  // Look for a matching codec in the database.
+  return CodecId(codec_inst);
+}
+
+}  // namespace acm2
+
+}  // namespace webrtc
diff --git a/webrtc/modules/audio_coding/acm2/acm_codec_database.h b/webrtc/modules/audio_coding/acm2/acm_codec_database.h
new file mode 100644
index 0000000..9e87238
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/acm_codec_database.h
@@ -0,0 +1,83 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This file generates databases with information about all supported audio
+ * codecs.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_CODEC_DATABASE_H_
+#define WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_CODEC_DATABASE_H_
+
+#include "webrtc/common_types.h"
+#include "webrtc/engine_configurations.h"
+#include "webrtc/modules/audio_coding/acm2/rent_a_codec.h"
+#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
+
+namespace webrtc {
+
+namespace acm2 {
+
+// TODO(tlegrand): replace class ACMCodecDB with a namespace.
+class ACMCodecDB {
+ public:
+  // kMaxNumCodecs - Maximum number of codecs that can be activated in one
+  //                 build.
+  // kMaxNumPacketSize - Maximum number of allowed packet sizes for one codec.
+  // These might need to be increased if adding a new codec to the database
+  static const int kMaxNumCodecs =  50;
+  static const int kMaxNumPacketSize = 6;
+
+  // Codec specific settings
+  //
+  // num_packet_sizes     - number of allowed packet sizes.
+  // packet_sizes_samples - list of the allowed packet sizes.
+  // basic_block_samples  - assigned a value different from 0 if the codec
+  //                        requires to be fed with a specific number of samples
+  //                        that can be different from packet size.
+  // channel_support      - number of channels supported to encode;
+  //                        1 = mono, 2 = stereo, etc.
+  struct CodecSettings {
+    int num_packet_sizes;
+    int packet_sizes_samples[kMaxNumPacketSize];
+    int basic_block_samples;
+    int channel_support;
+  };
+
+  // Returns codec id from database, given the information received in the input
+  // [codec_inst].
+  // Input:
+  //   [codec_inst] - Information about the codec for which we require the
+  //                  database id.
+  // Return:
+  //   codec id if successful, otherwise < 0.
+  static int CodecNumber(const CodecInst& codec_inst);
+  static int CodecId(const CodecInst& codec_inst);
+  static int CodecId(const char* payload_name, int frequency, int channels);
+  static int ReceiverCodecNumber(const CodecInst& codec_inst);
+
+  // Databases with information about the supported codecs
+  // database_ - stored information about all codecs: payload type, name,
+  //             sampling frequency, packet size in samples, default channel
+  //             support, and default rate.
+  // codec_settings_ - stored codec settings: number of allowed packet sizes,
+  //                   a vector with the allowed packet sizes, basic block
+  //                   samples, and max number of channels that are supported.
+  // neteq_decoders_ - list of supported decoders in NetEQ.
+  static const CodecInst database_[kMaxNumCodecs];
+  static const CodecSettings codec_settings_[kMaxNumCodecs];
+  static const NetEqDecoder neteq_decoders_[kMaxNumCodecs];
+};
+
+}  // namespace acm2
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_CODEC_DATABASE_H_
diff --git a/webrtc/modules/audio_coding/acm2/acm_common_defs.h b/webrtc/modules/audio_coding/acm2/acm_common_defs.h
new file mode 100644
index 0000000..483bdd9
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/acm_common_defs.h
@@ -0,0 +1,32 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_COMMON_DEFS_H_
+#define WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_COMMON_DEFS_H_
+
+#include "webrtc/engine_configurations.h"
+
+// Checks for enabled codecs, we prevent enabling codecs which are not
+// compatible.
+#if ((defined WEBRTC_CODEC_ISAC) && (defined WEBRTC_CODEC_ISACFX))
+#error iSAC and iSACFX codecs cannot be enabled at the same time
+#endif
+
+namespace webrtc {
+
+// General codec specific defines
+const int kIsacWbDefaultRate = 32000;
+const int kIsacSwbDefaultRate = 56000;
+const int kIsacPacSize480 = 480;
+const int kIsacPacSize960 = 960;
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_COMMON_DEFS_H_
diff --git a/webrtc/modules/audio_coding/acm2/acm_neteq_unittest.cc b/webrtc/modules/audio_coding/acm2/acm_neteq_unittest.cc
new file mode 100644
index 0000000..607b933
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/acm_neteq_unittest.cc
@@ -0,0 +1,15 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This file contains unit tests for ACM's NetEQ wrapper (class ACMNetEQ).
+
+namespace webrtc {
+
+namespace acm2 {}  // namespace
diff --git a/webrtc/modules/audio_coding/acm2/acm_receive_test_oldapi.cc b/webrtc/modules/audio_coding/acm2/acm_receive_test_oldapi.cc
new file mode 100644
index 0000000..bb83e77
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/acm_receive_test_oldapi.cc
@@ -0,0 +1,221 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/acm2/acm_receive_test_oldapi.h"
+
+#include <assert.h>
+#include <stdio.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/neteq/tools/audio_sink.h"
+#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
+#include "webrtc/modules/audio_coding/neteq/tools/packet_source.h"
+
+namespace webrtc {
+namespace test {
+
+namespace {
+// Returns true if the codec should be registered, otherwise false. Changes
+// the number of channels for the Opus codec to always be 1.
+bool ModifyAndUseThisCodec(CodecInst* codec_param) {
+  if (STR_CASE_CMP(codec_param->plname, "CN") == 0 &&
+      codec_param->plfreq == 48000)
+    return false;  // Skip 48 kHz comfort noise.
+
+  if (STR_CASE_CMP(codec_param->plname, "telephone-event") == 0)
+    return false;  // Skip DTFM.
+
+  return true;
+}
+
+// Remaps payload types from ACM's default to those used in the resource file
+// neteq_universal_new.rtp. Returns true if the codec should be registered,
+// otherwise false. The payload types are set as follows (all are mono codecs):
+// PCMu = 0;
+// PCMa = 8;
+// Comfort noise 8 kHz = 13
+// Comfort noise 16 kHz = 98
+// Comfort noise 32 kHz = 99
+// iLBC = 102
+// iSAC wideband = 103
+// iSAC super-wideband = 104
+// AVT/DTMF = 106
+// RED = 117
+// PCM16b 8 kHz = 93
+// PCM16b 16 kHz = 94
+// PCM16b 32 kHz = 95
+// G.722 = 94
+bool RemapPltypeAndUseThisCodec(const char* plname,
+                                int plfreq,
+                                int channels,
+                                int* pltype) {
+  if (channels != 1)
+    return false;  // Don't use non-mono codecs.
+
+  // Re-map pltypes to those used in the NetEq test files.
+  if (STR_CASE_CMP(plname, "PCMU") == 0 && plfreq == 8000) {
+    *pltype = 0;
+  } else if (STR_CASE_CMP(plname, "PCMA") == 0 && plfreq == 8000) {
+    *pltype = 8;
+  } else if (STR_CASE_CMP(plname, "CN") == 0 && plfreq == 8000) {
+    *pltype = 13;
+  } else if (STR_CASE_CMP(plname, "CN") == 0 && plfreq == 16000) {
+    *pltype = 98;
+  } else if (STR_CASE_CMP(plname, "CN") == 0 && plfreq == 32000) {
+    *pltype = 99;
+  } else if (STR_CASE_CMP(plname, "ILBC") == 0) {
+    *pltype = 102;
+  } else if (STR_CASE_CMP(plname, "ISAC") == 0 && plfreq == 16000) {
+    *pltype = 103;
+  } else if (STR_CASE_CMP(plname, "ISAC") == 0 && plfreq == 32000) {
+    *pltype = 104;
+  } else if (STR_CASE_CMP(plname, "telephone-event") == 0) {
+    *pltype = 106;
+  } else if (STR_CASE_CMP(plname, "red") == 0) {
+    *pltype = 117;
+  } else if (STR_CASE_CMP(plname, "L16") == 0 && plfreq == 8000) {
+    *pltype = 93;
+  } else if (STR_CASE_CMP(plname, "L16") == 0 && plfreq == 16000) {
+    *pltype = 94;
+  } else if (STR_CASE_CMP(plname, "L16") == 0 && plfreq == 32000) {
+    *pltype = 95;
+  } else if (STR_CASE_CMP(plname, "G722") == 0) {
+    *pltype = 9;
+  } else {
+    // Don't use any other codecs.
+    return false;
+  }
+  return true;
+}
+}  // namespace
+
+AcmReceiveTestOldApi::AcmReceiveTestOldApi(
+    PacketSource* packet_source,
+    AudioSink* audio_sink,
+    int output_freq_hz,
+    NumOutputChannels exptected_output_channels)
+    : clock_(0),
+      acm_(webrtc::AudioCodingModule::Create(0, &clock_)),
+      packet_source_(packet_source),
+      audio_sink_(audio_sink),
+      output_freq_hz_(output_freq_hz),
+      exptected_output_channels_(exptected_output_channels) {
+}
+
+void AcmReceiveTestOldApi::RegisterDefaultCodecs() {
+  CodecInst my_codec_param;
+  for (int n = 0; n < acm_->NumberOfCodecs(); n++) {
+    ASSERT_EQ(0, acm_->Codec(n, &my_codec_param)) << "Failed to get codec.";
+    if (ModifyAndUseThisCodec(&my_codec_param)) {
+      ASSERT_EQ(0, acm_->RegisterReceiveCodec(my_codec_param))
+          << "Couldn't register receive codec.\n";
+    }
+  }
+}
+
+void AcmReceiveTestOldApi::RegisterNetEqTestCodecs() {
+  CodecInst my_codec_param;
+  for (int n = 0; n < acm_->NumberOfCodecs(); n++) {
+    ASSERT_EQ(0, acm_->Codec(n, &my_codec_param)) << "Failed to get codec.";
+    if (!ModifyAndUseThisCodec(&my_codec_param)) {
+      // Skip this codec.
+      continue;
+    }
+
+    if (RemapPltypeAndUseThisCodec(my_codec_param.plname,
+                                   my_codec_param.plfreq,
+                                   my_codec_param.channels,
+                                   &my_codec_param.pltype)) {
+      ASSERT_EQ(0, acm_->RegisterReceiveCodec(my_codec_param))
+          << "Couldn't register receive codec.\n";
+    }
+  }
+}
+
+int AcmReceiveTestOldApi::RegisterExternalReceiveCodec(
+    int rtp_payload_type,
+    AudioDecoder* external_decoder,
+    int sample_rate_hz,
+    int num_channels) {
+  return acm_->RegisterExternalReceiveCodec(rtp_payload_type, external_decoder,
+                                            sample_rate_hz, num_channels);
+}
+
+void AcmReceiveTestOldApi::Run() {
+  for (rtc::scoped_ptr<Packet> packet(packet_source_->NextPacket()); packet;
+       packet.reset(packet_source_->NextPacket())) {
+    // Pull audio until time to insert packet.
+    while (clock_.TimeInMilliseconds() < packet->time_ms()) {
+      AudioFrame output_frame;
+      EXPECT_EQ(0, acm_->PlayoutData10Ms(output_freq_hz_, &output_frame));
+      EXPECT_EQ(output_freq_hz_, output_frame.sample_rate_hz_);
+      const size_t samples_per_block =
+          static_cast<size_t>(output_freq_hz_ * 10 / 1000);
+      EXPECT_EQ(samples_per_block, output_frame.samples_per_channel_);
+      if (exptected_output_channels_ != kArbitraryChannels) {
+        if (output_frame.speech_type_ == webrtc::AudioFrame::kPLC) {
+          // Don't check number of channels for PLC output, since each test run
+          // usually starts with a short period of mono PLC before decoding the
+          // first packet.
+        } else {
+          EXPECT_EQ(exptected_output_channels_, output_frame.num_channels_);
+        }
+      }
+      ASSERT_TRUE(audio_sink_->WriteAudioFrame(output_frame));
+      clock_.AdvanceTimeMilliseconds(10);
+      AfterGetAudio();
+    }
+
+    // Insert packet after converting from RTPHeader to WebRtcRTPHeader.
+    WebRtcRTPHeader header;
+    header.header = packet->header();
+    header.frameType = kAudioFrameSpeech;
+    memset(&header.type.Audio, 0, sizeof(RTPAudioHeader));
+    EXPECT_EQ(0,
+              acm_->IncomingPacket(
+                  packet->payload(),
+                  static_cast<int32_t>(packet->payload_length_bytes()),
+                  header))
+        << "Failure when inserting packet:" << std::endl
+        << "  PT = " << static_cast<int>(header.header.payloadType) << std::endl
+        << "  TS = " << header.header.timestamp << std::endl
+        << "  SN = " << header.header.sequenceNumber;
+  }
+}
+
+AcmReceiveTestToggleOutputFreqOldApi::AcmReceiveTestToggleOutputFreqOldApi(
+    PacketSource* packet_source,
+    AudioSink* audio_sink,
+    int output_freq_hz_1,
+    int output_freq_hz_2,
+    int toggle_period_ms,
+    NumOutputChannels exptected_output_channels)
+    : AcmReceiveTestOldApi(packet_source,
+                           audio_sink,
+                           output_freq_hz_1,
+                           exptected_output_channels),
+      output_freq_hz_1_(output_freq_hz_1),
+      output_freq_hz_2_(output_freq_hz_2),
+      toggle_period_ms_(toggle_period_ms),
+      last_toggle_time_ms_(clock_.TimeInMilliseconds()) {
+}
+
+void AcmReceiveTestToggleOutputFreqOldApi::AfterGetAudio() {
+  if (clock_.TimeInMilliseconds() >= last_toggle_time_ms_ + toggle_period_ms_) {
+    output_freq_hz_ = (output_freq_hz_ == output_freq_hz_1_)
+                          ? output_freq_hz_2_
+                          : output_freq_hz_1_;
+    last_toggle_time_ms_ = clock_.TimeInMilliseconds();
+  }
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/webrtc/modules/audio_coding/acm2/acm_receive_test_oldapi.h b/webrtc/modules/audio_coding/acm2/acm_receive_test_oldapi.h
new file mode 100644
index 0000000..091513d
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/acm_receive_test_oldapi.h
@@ -0,0 +1,94 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_RECEIVE_TEST_OLDAPI_H_
+#define WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_RECEIVE_TEST_OLDAPI_H_
+
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/system_wrappers/include/clock.h"
+
+namespace webrtc {
+class AudioCodingModule;
+class AudioDecoder;
+struct CodecInst;
+
+namespace test {
+class AudioSink;
+class PacketSource;
+
+class AcmReceiveTestOldApi {
+ public:
+  enum NumOutputChannels {
+    kArbitraryChannels = 0,
+    kMonoOutput = 1,
+    kStereoOutput = 2
+  };
+
+  AcmReceiveTestOldApi(PacketSource* packet_source,
+                       AudioSink* audio_sink,
+                       int output_freq_hz,
+                       NumOutputChannels exptected_output_channels);
+  virtual ~AcmReceiveTestOldApi() {}
+
+  // Registers the codecs with default parameters from ACM.
+  void RegisterDefaultCodecs();
+
+  // Registers codecs with payload types matching the pre-encoded NetEq test
+  // files.
+  void RegisterNetEqTestCodecs();
+
+  int RegisterExternalReceiveCodec(int rtp_payload_type,
+                                   AudioDecoder* external_decoder,
+                                   int sample_rate_hz,
+                                   int num_channels);
+
+  // Runs the test and returns true if successful.
+  void Run();
+
+ protected:
+  // Method is called after each block of output audio is received from ACM.
+  virtual void AfterGetAudio() {}
+
+  SimulatedClock clock_;
+  rtc::scoped_ptr<AudioCodingModule> acm_;
+  PacketSource* packet_source_;
+  AudioSink* audio_sink_;
+  int output_freq_hz_;
+  NumOutputChannels exptected_output_channels_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(AcmReceiveTestOldApi);
+};
+
+// This test toggles the output frequency every |toggle_period_ms|. The test
+// starts with |output_freq_hz_1|. Except for the toggling, it does the same
+// thing as AcmReceiveTestOldApi.
+class AcmReceiveTestToggleOutputFreqOldApi : public AcmReceiveTestOldApi {
+ public:
+  AcmReceiveTestToggleOutputFreqOldApi(
+      PacketSource* packet_source,
+      AudioSink* audio_sink,
+      int output_freq_hz_1,
+      int output_freq_hz_2,
+      int toggle_period_ms,
+      NumOutputChannels exptected_output_channels);
+
+ protected:
+  void AfterGetAudio() override;
+
+  const int output_freq_hz_1_;
+  const int output_freq_hz_2_;
+  const int toggle_period_ms_;
+  int64_t last_toggle_time_ms_;
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_RECEIVE_TEST_OLDAPI_H_
diff --git a/webrtc/modules/audio_coding/acm2/acm_receiver.cc b/webrtc/modules/audio_coding/acm2/acm_receiver.cc
new file mode 100644
index 0000000..036877c
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/acm_receiver.cc
@@ -0,0 +1,540 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/acm2/acm_receiver.h"
+
+#include <stdlib.h>  // malloc
+
+#include <algorithm>  // sort
+#include <vector>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/format_macros.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
+#include "webrtc/common_types.h"
+#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
+#include "webrtc/modules/audio_coding/acm2/acm_resampler.h"
+#include "webrtc/modules/audio_coding/acm2/call_statistics.h"
+#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
+#include "webrtc/system_wrappers/include/tick_util.h"
+#include "webrtc/system_wrappers/include/trace.h"
+
+namespace webrtc {
+
+namespace acm2 {
+
+namespace {
+
+// |vad_activity_| field of |audio_frame| is set to |previous_audio_activity_|
+// before the call to this function.
+void SetAudioFrameActivityAndType(bool vad_enabled,
+                                  NetEqOutputType type,
+                                  AudioFrame* audio_frame) {
+  if (vad_enabled) {
+    switch (type) {
+      case kOutputNormal: {
+        audio_frame->vad_activity_ = AudioFrame::kVadActive;
+        audio_frame->speech_type_ = AudioFrame::kNormalSpeech;
+        break;
+      }
+      case kOutputVADPassive: {
+        audio_frame->vad_activity_ = AudioFrame::kVadPassive;
+        audio_frame->speech_type_ = AudioFrame::kNormalSpeech;
+        break;
+      }
+      case kOutputCNG: {
+        audio_frame->vad_activity_ = AudioFrame::kVadPassive;
+        audio_frame->speech_type_ = AudioFrame::kCNG;
+        break;
+      }
+      case kOutputPLC: {
+        // Don't change |audio_frame->vad_activity_|, it should be the same as
+        // |previous_audio_activity_|.
+        audio_frame->speech_type_ = AudioFrame::kPLC;
+        break;
+      }
+      case kOutputPLCtoCNG: {
+        audio_frame->vad_activity_ = AudioFrame::kVadPassive;
+        audio_frame->speech_type_ = AudioFrame::kPLCCNG;
+        break;
+      }
+      default:
+        assert(false);
+    }
+  } else {
+    // Always return kVadUnknown when receive VAD is inactive
+    audio_frame->vad_activity_ = AudioFrame::kVadUnknown;
+    switch (type) {
+      case kOutputNormal: {
+        audio_frame->speech_type_ = AudioFrame::kNormalSpeech;
+        break;
+      }
+      case kOutputCNG: {
+        audio_frame->speech_type_ = AudioFrame::kCNG;
+        break;
+      }
+      case kOutputPLC: {
+        audio_frame->speech_type_ = AudioFrame::kPLC;
+        break;
+      }
+      case kOutputPLCtoCNG: {
+        audio_frame->speech_type_ = AudioFrame::kPLCCNG;
+        break;
+      }
+      case kOutputVADPassive: {
+        // Normally, we should no get any VAD decision if post-decoding VAD is
+        // not active. However, if post-decoding VAD has been active then
+        // disabled, we might be here for couple of frames.
+        audio_frame->speech_type_ = AudioFrame::kNormalSpeech;
+        LOG(WARNING) << "Post-decoding VAD is disabled but output is "
+            << "labeled VAD-passive";
+        break;
+      }
+      default:
+        assert(false);
+    }
+  }
+}
+
+// Is the given codec a CNG codec?
+// TODO(kwiberg): Move to RentACodec.
+bool IsCng(int codec_id) {
+  auto i = RentACodec::CodecIdFromIndex(codec_id);
+  return (i && (*i == RentACodec::CodecId::kCNNB ||
+                *i == RentACodec::CodecId::kCNWB ||
+                *i == RentACodec::CodecId::kCNSWB ||
+                *i == RentACodec::CodecId::kCNFB));
+}
+
+}  // namespace
+
+AcmReceiver::AcmReceiver(const AudioCodingModule::Config& config)
+    : crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
+      id_(config.id),
+      last_audio_decoder_(nullptr),
+      previous_audio_activity_(AudioFrame::kVadPassive),
+      audio_buffer_(new int16_t[AudioFrame::kMaxDataSizeSamples]),
+      last_audio_buffer_(new int16_t[AudioFrame::kMaxDataSizeSamples]),
+      neteq_(NetEq::Create(config.neteq_config)),
+      vad_enabled_(config.neteq_config.enable_post_decode_vad),
+      clock_(config.clock),
+      resampled_last_output_frame_(true) {
+  assert(clock_);
+  memset(audio_buffer_.get(), 0, AudioFrame::kMaxDataSizeSamples);
+  memset(last_audio_buffer_.get(), 0, AudioFrame::kMaxDataSizeSamples);
+}
+
+AcmReceiver::~AcmReceiver() {
+  delete neteq_;
+}
+
+int AcmReceiver::SetMinimumDelay(int delay_ms) {
+  if (neteq_->SetMinimumDelay(delay_ms))
+    return 0;
+  LOG(LERROR) << "AcmReceiver::SetExtraDelay " << delay_ms;
+  return -1;
+}
+
+int AcmReceiver::SetMaximumDelay(int delay_ms) {
+  if (neteq_->SetMaximumDelay(delay_ms))
+    return 0;
+  LOG(LERROR) << "AcmReceiver::SetExtraDelay " << delay_ms;
+  return -1;
+}
+
+int AcmReceiver::LeastRequiredDelayMs() const {
+  return neteq_->LeastRequiredDelayMs();
+}
+
+rtc::Optional<int> AcmReceiver::last_packet_sample_rate_hz() const {
+  CriticalSectionScoped lock(crit_sect_.get());
+  return last_packet_sample_rate_hz_;
+}
+
+int AcmReceiver::last_output_sample_rate_hz() const {
+  return neteq_->last_output_sample_rate_hz();
+}
+
+int AcmReceiver::InsertPacket(const WebRtcRTPHeader& rtp_header,
+                              rtc::ArrayView<const uint8_t> incoming_payload) {
+  uint32_t receive_timestamp = 0;
+  const RTPHeader* header = &rtp_header.header;  // Just a shorthand.
+
+  {
+    CriticalSectionScoped lock(crit_sect_.get());
+
+    const Decoder* decoder = RtpHeaderToDecoder(*header, incoming_payload[0]);
+    if (!decoder) {
+      LOG_F(LS_ERROR) << "Payload-type "
+                      << static_cast<int>(header->payloadType)
+                      << " is not registered.";
+      return -1;
+    }
+    const int sample_rate_hz = [&decoder] {
+      const auto ci = RentACodec::CodecIdFromIndex(decoder->acm_codec_id);
+      return ci ? RentACodec::CodecInstById(*ci)->plfreq : -1;
+    }();
+    receive_timestamp = NowInTimestamp(sample_rate_hz);
+
+    // If this is a CNG while the audio codec is not mono, skip pushing in
+    // packets into NetEq.
+    if (IsCng(decoder->acm_codec_id) && last_audio_decoder_ &&
+        last_audio_decoder_->channels > 1)
+        return 0;
+    if (!IsCng(decoder->acm_codec_id) &&
+        decoder->acm_codec_id !=
+            *RentACodec::CodecIndexFromId(RentACodec::CodecId::kAVT)) {
+      last_audio_decoder_ = decoder;
+      last_packet_sample_rate_hz_ = rtc::Optional<int>(decoder->sample_rate_hz);
+    }
+
+  }  // |crit_sect_| is released.
+
+  if (neteq_->InsertPacket(rtp_header, incoming_payload, receive_timestamp) <
+      0) {
+    LOG(LERROR) << "AcmReceiver::InsertPacket "
+                << static_cast<int>(header->payloadType)
+                << " Failed to insert packet";
+    return -1;
+  }
+  return 0;
+}
+
+int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
+  enum NetEqOutputType type;
+  size_t samples_per_channel;
+  int num_channels;
+
+  // Accessing members, take the lock.
+  CriticalSectionScoped lock(crit_sect_.get());
+
+  // Always write the output to |audio_buffer_| first.
+  if (neteq_->GetAudio(AudioFrame::kMaxDataSizeSamples,
+                       audio_buffer_.get(),
+                       &samples_per_channel,
+                       &num_channels,
+                       &type) != NetEq::kOK) {
+    LOG(LERROR) << "AcmReceiver::GetAudio - NetEq Failed.";
+    return -1;
+  }
+
+  const int current_sample_rate_hz = neteq_->last_output_sample_rate_hz();
+
+  // Update if resampling is required.
+  const bool need_resampling =
+      (desired_freq_hz != -1) && (current_sample_rate_hz != desired_freq_hz);
+
+  if (need_resampling && !resampled_last_output_frame_) {
+    // Prime the resampler with the last frame.
+    int16_t temp_output[AudioFrame::kMaxDataSizeSamples];
+    int samples_per_channel_int = resampler_.Resample10Msec(
+        last_audio_buffer_.get(), current_sample_rate_hz, desired_freq_hz,
+        num_channels, AudioFrame::kMaxDataSizeSamples, temp_output);
+    if (samples_per_channel_int < 0) {
+      LOG(LERROR) << "AcmReceiver::GetAudio - "
+                     "Resampling last_audio_buffer_ failed.";
+      return -1;
+    }
+    samples_per_channel = static_cast<size_t>(samples_per_channel_int);
+  }
+
+  // The audio in |audio_buffer_| is tansferred to |audio_frame_| below, either
+  // through resampling, or through straight memcpy.
+  // TODO(henrik.lundin) Glitches in the output may appear if the output rate
+  // from NetEq changes. See WebRTC issue 3923.
+  if (need_resampling) {
+    int samples_per_channel_int = resampler_.Resample10Msec(
+        audio_buffer_.get(), current_sample_rate_hz, desired_freq_hz,
+        num_channels, AudioFrame::kMaxDataSizeSamples, audio_frame->data_);
+    if (samples_per_channel_int < 0) {
+      LOG(LERROR) << "AcmReceiver::GetAudio - Resampling audio_buffer_ failed.";
+      return -1;
+    }
+    samples_per_channel = static_cast<size_t>(samples_per_channel_int);
+    resampled_last_output_frame_ = true;
+  } else {
+    resampled_last_output_frame_ = false;
+    // We might end up here ONLY if codec is changed.
+    memcpy(audio_frame->data_,
+           audio_buffer_.get(),
+           samples_per_channel * num_channels * sizeof(int16_t));
+  }
+
+  // Swap buffers, so that the current audio is stored in |last_audio_buffer_|
+  // for next time.
+  audio_buffer_.swap(last_audio_buffer_);
+
+  audio_frame->num_channels_ = num_channels;
+  audio_frame->samples_per_channel_ = samples_per_channel;
+  audio_frame->sample_rate_hz_ = static_cast<int>(samples_per_channel * 100);
+
+  // Should set |vad_activity| before calling SetAudioFrameActivityAndType().
+  audio_frame->vad_activity_ = previous_audio_activity_;
+  SetAudioFrameActivityAndType(vad_enabled_, type, audio_frame);
+  previous_audio_activity_ = audio_frame->vad_activity_;
+  call_stats_.DecodedByNetEq(audio_frame->speech_type_);
+
+  // Computes the RTP timestamp of the first sample in |audio_frame| from
+  // |GetPlayoutTimestamp|, which is the timestamp of the last sample of
+  // |audio_frame|.
+  uint32_t playout_timestamp = 0;
+  if (GetPlayoutTimestamp(&playout_timestamp)) {
+    audio_frame->timestamp_ = playout_timestamp -
+        static_cast<uint32_t>(audio_frame->samples_per_channel_);
+  } else {
+    // Remain 0 until we have a valid |playout_timestamp|.
+    audio_frame->timestamp_ = 0;
+  }
+
+  return 0;
+}
+
+int32_t AcmReceiver::AddCodec(int acm_codec_id,
+                              uint8_t payload_type,
+                              int channels,
+                              int sample_rate_hz,
+                              AudioDecoder* audio_decoder) {
+  const auto neteq_decoder = [acm_codec_id, channels]() -> NetEqDecoder {
+    if (acm_codec_id == -1)
+      return NetEqDecoder::kDecoderArbitrary;  // External decoder.
+    const rtc::Optional<RentACodec::CodecId> cid =
+        RentACodec::CodecIdFromIndex(acm_codec_id);
+    RTC_DCHECK(cid) << "Invalid codec index: " << acm_codec_id;
+    const rtc::Optional<NetEqDecoder> ned =
+        RentACodec::NetEqDecoderFromCodecId(*cid, channels);
+    RTC_DCHECK(ned) << "Invalid codec ID: " << static_cast<int>(*cid);
+    return *ned;
+  }();
+
+  CriticalSectionScoped lock(crit_sect_.get());
+
+  // The corresponding NetEq decoder ID.
+  // If this codec has been registered before.
+  auto it = decoders_.find(payload_type);
+  if (it != decoders_.end()) {
+    const Decoder& decoder = it->second;
+    if (acm_codec_id != -1 && decoder.acm_codec_id == acm_codec_id &&
+        decoder.channels == channels &&
+        decoder.sample_rate_hz == sample_rate_hz) {
+      // Re-registering the same codec. Do nothing and return.
+      return 0;
+    }
+
+    // Changing codec. First unregister the old codec, then register the new
+    // one.
+    if (neteq_->RemovePayloadType(payload_type) != NetEq::kOK) {
+      LOG(LERROR) << "Cannot remove payload " << static_cast<int>(payload_type);
+      return -1;
+    }
+
+    decoders_.erase(it);
+  }
+
+  int ret_val;
+  if (!audio_decoder) {
+    ret_val = neteq_->RegisterPayloadType(neteq_decoder, payload_type);
+  } else {
+    ret_val = neteq_->RegisterExternalDecoder(audio_decoder, neteq_decoder,
+                                              payload_type, sample_rate_hz);
+  }
+  if (ret_val != NetEq::kOK) {
+    LOG(LERROR) << "AcmReceiver::AddCodec " << acm_codec_id
+                << static_cast<int>(payload_type)
+                << " channels: " << channels;
+    return -1;
+  }
+
+  Decoder decoder;
+  decoder.acm_codec_id = acm_codec_id;
+  decoder.payload_type = payload_type;
+  decoder.channels = channels;
+  decoder.sample_rate_hz = sample_rate_hz;
+  decoders_[payload_type] = decoder;
+  return 0;
+}
+
+void AcmReceiver::EnableVad() {
+  neteq_->EnableVad();
+  CriticalSectionScoped lock(crit_sect_.get());
+  vad_enabled_ = true;
+}
+
+void AcmReceiver::DisableVad() {
+  neteq_->DisableVad();
+  CriticalSectionScoped lock(crit_sect_.get());
+  vad_enabled_ = false;
+}
+
+void AcmReceiver::FlushBuffers() {
+  neteq_->FlushBuffers();
+}
+
+// If failed in removing one of the codecs, this method continues to remove as
+// many as it can.
+int AcmReceiver::RemoveAllCodecs() {
+  int ret_val = 0;
+  CriticalSectionScoped lock(crit_sect_.get());
+  for (auto it = decoders_.begin(); it != decoders_.end(); ) {
+    auto cur = it;
+    ++it;  // it will be valid even if we erase cur
+    if (neteq_->RemovePayloadType(cur->second.payload_type) == 0) {
+      decoders_.erase(cur);
+    } else {
+      LOG_F(LS_ERROR) << "Cannot remove payload "
+                      << static_cast<int>(cur->second.payload_type);
+      ret_val = -1;
+    }
+  }
+
+  // No codec is registered, invalidate last audio decoder.
+  last_audio_decoder_ = nullptr;
+  last_packet_sample_rate_hz_ = rtc::Optional<int>();
+  return ret_val;
+}
+
+int AcmReceiver::RemoveCodec(uint8_t payload_type) {
+  CriticalSectionScoped lock(crit_sect_.get());
+  auto it = decoders_.find(payload_type);
+  if (it == decoders_.end()) {  // Such a payload-type is not registered.
+    return 0;
+  }
+  if (neteq_->RemovePayloadType(payload_type) != NetEq::kOK) {
+    LOG(LERROR) << "AcmReceiver::RemoveCodec" << static_cast<int>(payload_type);
+    return -1;
+  }
+  if (last_audio_decoder_ == &it->second) {
+    last_audio_decoder_ = nullptr;
+    last_packet_sample_rate_hz_ = rtc::Optional<int>();
+  }
+  decoders_.erase(it);
+  return 0;
+}
+
+void AcmReceiver::set_id(int id) {
+  CriticalSectionScoped lock(crit_sect_.get());
+  id_ = id;
+}
+
+bool AcmReceiver::GetPlayoutTimestamp(uint32_t* timestamp) {
+  return neteq_->GetPlayoutTimestamp(timestamp);
+}
+
+int AcmReceiver::LastAudioCodec(CodecInst* codec) const {
+  CriticalSectionScoped lock(crit_sect_.get());
+  if (!last_audio_decoder_) {
+    return -1;
+  }
+  *codec = *RentACodec::CodecInstById(
+      *RentACodec::CodecIdFromIndex(last_audio_decoder_->acm_codec_id));
+  codec->pltype = last_audio_decoder_->payload_type;
+  codec->channels = last_audio_decoder_->channels;
+  codec->plfreq = last_audio_decoder_->sample_rate_hz;
+  return 0;
+}
+
+void AcmReceiver::GetNetworkStatistics(NetworkStatistics* acm_stat) {
+  NetEqNetworkStatistics neteq_stat;
+  // NetEq function always returns zero, so we don't check the return value.
+  neteq_->NetworkStatistics(&neteq_stat);
+
+  acm_stat->currentBufferSize = neteq_stat.current_buffer_size_ms;
+  acm_stat->preferredBufferSize = neteq_stat.preferred_buffer_size_ms;
+  acm_stat->jitterPeaksFound = neteq_stat.jitter_peaks_found ? true : false;
+  acm_stat->currentPacketLossRate = neteq_stat.packet_loss_rate;
+  acm_stat->currentDiscardRate = neteq_stat.packet_discard_rate;
+  acm_stat->currentExpandRate = neteq_stat.expand_rate;
+  acm_stat->currentSpeechExpandRate = neteq_stat.speech_expand_rate;
+  acm_stat->currentPreemptiveRate = neteq_stat.preemptive_rate;
+  acm_stat->currentAccelerateRate = neteq_stat.accelerate_rate;
+  acm_stat->currentSecondaryDecodedRate = neteq_stat.secondary_decoded_rate;
+  acm_stat->clockDriftPPM = neteq_stat.clockdrift_ppm;
+  acm_stat->addedSamples = neteq_stat.added_zero_samples;
+  acm_stat->meanWaitingTimeMs = neteq_stat.mean_waiting_time_ms;
+  acm_stat->medianWaitingTimeMs = neteq_stat.median_waiting_time_ms;
+  acm_stat->minWaitingTimeMs = neteq_stat.min_waiting_time_ms;
+  acm_stat->maxWaitingTimeMs = neteq_stat.max_waiting_time_ms;
+}
+
+int AcmReceiver::DecoderByPayloadType(uint8_t payload_type,
+                                      CodecInst* codec) const {
+  CriticalSectionScoped lock(crit_sect_.get());
+  auto it = decoders_.find(payload_type);
+  if (it == decoders_.end()) {
+    LOG(LERROR) << "AcmReceiver::DecoderByPayloadType "
+                << static_cast<int>(payload_type);
+    return -1;
+  }
+  const Decoder& decoder = it->second;
+  *codec = *RentACodec::CodecInstById(
+      *RentACodec::CodecIdFromIndex(decoder.acm_codec_id));
+  codec->pltype = decoder.payload_type;
+  codec->channels = decoder.channels;
+  codec->plfreq = decoder.sample_rate_hz;
+  return 0;
+}
+
+int AcmReceiver::EnableNack(size_t max_nack_list_size) {
+  neteq_->EnableNack(max_nack_list_size);
+  return 0;
+}
+
+void AcmReceiver::DisableNack() {
+  neteq_->DisableNack();
+}
+
+std::vector<uint16_t> AcmReceiver::GetNackList(
+    int64_t round_trip_time_ms) const {
+  return neteq_->GetNackList(round_trip_time_ms);
+}
+
+void AcmReceiver::ResetInitialDelay() {
+  neteq_->SetMinimumDelay(0);
+  // TODO(turajs): Should NetEq Buffer be flushed?
+}
+
+const AcmReceiver::Decoder* AcmReceiver::RtpHeaderToDecoder(
+    const RTPHeader& rtp_header,
+    uint8_t payload_type) const {
+  auto it = decoders_.find(rtp_header.payloadType);
+  const auto red_index =
+      RentACodec::CodecIndexFromId(RentACodec::CodecId::kRED);
+  if (red_index &&  // This ensures that RED is defined in WebRTC.
+      it != decoders_.end() && it->second.acm_codec_id == *red_index) {
+    // This is a RED packet, get the payload of the audio codec.
+    it = decoders_.find(payload_type & 0x7F);
+  }
+
+  // Check if the payload is registered.
+  return it != decoders_.end() ? &it->second : nullptr;
+}
+
+uint32_t AcmReceiver::NowInTimestamp(int decoder_sampling_rate) const {
+  // Down-cast the time to (32-6)-bit since we only care about
+  // the least significant bits. (32-6) bits cover 2^(32-6) = 67108864 ms.
+  // We masked 6 most significant bits of 32-bit so there is no overflow in
+  // the conversion from milliseconds to timestamp.
+  const uint32_t now_in_ms = static_cast<uint32_t>(
+      clock_->TimeInMilliseconds() & 0x03ffffff);
+  return static_cast<uint32_t>(
+      (decoder_sampling_rate / 1000) * now_in_ms);
+}
+
+void AcmReceiver::GetDecodingCallStatistics(
+    AudioDecodingCallStats* stats) const {
+  CriticalSectionScoped lock(crit_sect_.get());
+  *stats = call_stats_.GetDecodingStatistics();
+}
+
+}  // namespace acm2
+
+}  // namespace webrtc
diff --git a/webrtc/modules/audio_coding/acm2/acm_receiver.h b/webrtc/modules/audio_coding/acm2/acm_receiver.h
new file mode 100644
index 0000000..d5a644d
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/acm_receiver.h
@@ -0,0 +1,305 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_RECEIVER_H_
+#define WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_RECEIVER_H_
+
+#include <map>
+#include <vector>
+
+#include "webrtc/base/array_view.h"
+#include "webrtc/base/optional.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/base/thread_annotations.h"
+#include "webrtc/common_audio/vad/include/webrtc_vad.h"
+#include "webrtc/engine_configurations.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/acm2/acm_resampler.h"
+#include "webrtc/modules/audio_coding/acm2/call_statistics.h"
+#include "webrtc/modules/audio_coding/acm2/initial_delay_manager.h"
+#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+struct CodecInst;
+class CriticalSectionWrapper;
+class NetEq;
+
+namespace acm2 {
+
+class AcmReceiver {
+ public:
+  struct Decoder {
+    int acm_codec_id;
+    uint8_t payload_type;
+    // This field is meaningful for codecs where both mono and
+    // stereo versions are registered under the same ID.
+    int channels;
+    int sample_rate_hz;
+  };
+
+  // Constructor of the class
+  explicit AcmReceiver(const AudioCodingModule::Config& config);
+
+  // Destructor of the class.
+  ~AcmReceiver();
+
+  //
+  // Inserts a payload with its associated RTP-header into NetEq.
+  //
+  // Input:
+  //   - rtp_header           : RTP header for the incoming payload containing
+  //                            information about payload type, sequence number,
+  //                            timestamp, SSRC and marker bit.
+  //   - incoming_payload     : Incoming audio payload.
+  //   - length_payload       : Length of incoming audio payload in bytes.
+  //
+  // Return value             : 0 if OK.
+  //                           <0 if NetEq returned an error.
+  //
+  int InsertPacket(const WebRtcRTPHeader& rtp_header,
+                   rtc::ArrayView<const uint8_t> incoming_payload);
+
+  //
+  // Asks NetEq for 10 milliseconds of decoded audio.
+  //
+  // Input:
+  //   -desired_freq_hz       : specifies the sampling rate [Hz] of the output
+  //                            audio. If set -1 indicates to resampling is
+  //                            is required and the audio returned at the
+  //                            sampling rate of the decoder.
+  //
+  // Output:
+  //   -audio_frame           : an audio frame were output data and
+  //                            associated parameters are written to.
+  //
+  // Return value             : 0 if OK.
+  //                           -1 if NetEq returned an error.
+  //
+  int GetAudio(int desired_freq_hz, AudioFrame* audio_frame);
+
+  //
+  // Adds a new codec to the NetEq codec database.
+  //
+  // Input:
+  //   - acm_codec_id        : ACM codec ID; -1 means external decoder.
+  //   - payload_type        : payload type.
+  //   - sample_rate_hz      : sample rate.
+  //   - audio_decoder       : pointer to a decoder object. If it's null, then
+  //                           NetEq will internally create a decoder object
+  //                           based on the value of |acm_codec_id| (which
+  //                           mustn't be -1). Otherwise, NetEq will use the
+  //                           given decoder for the given payload type. NetEq
+  //                           won't take ownership of the decoder; it's up to
+  //                           the caller to delete it when it's no longer
+  //                           needed.
+  //
+  //                           Providing an existing decoder object here is
+  //                           necessary for external decoders, but may also be
+  //                           used for built-in decoders if NetEq doesn't have
+  //                           all the info it needs to construct them properly
+  //                           (e.g. iSAC, where the decoder needs to be paired
+  //                           with an encoder).
+  //
+  // Return value             : 0 if OK.
+  //                           <0 if NetEq returned an error.
+  //
+  int AddCodec(int acm_codec_id,
+               uint8_t payload_type,
+               int channels,
+               int sample_rate_hz,
+               AudioDecoder* audio_decoder);
+
+  //
+  // Sets a minimum delay for packet buffer. The given delay is maintained,
+  // unless channel condition dictates a higher delay.
+  //
+  // Input:
+  //   - delay_ms             : minimum delay in milliseconds.
+  //
+  // Return value             : 0 if OK.
+  //                           <0 if NetEq returned an error.
+  //
+  int SetMinimumDelay(int delay_ms);
+
+  //
+  // Sets a maximum delay [ms] for the packet buffer. The target delay does not
+  // exceed the given value, even if channel condition requires so.
+  //
+  // Input:
+  //   - delay_ms             : maximum delay in milliseconds.
+  //
+  // Return value             : 0 if OK.
+  //                           <0 if NetEq returned an error.
+  //
+  int SetMaximumDelay(int delay_ms);
+
+  //
+  // Get least required delay computed based on channel conditions. Note that
+  // this is before applying any user-defined limits (specified by calling
+  // (SetMinimumDelay() and/or SetMaximumDelay()).
+  //
+  int LeastRequiredDelayMs() const;
+
+  //
+  // Resets the initial delay to zero.
+  //
+  void ResetInitialDelay();
+
+  // Returns the sample rate of the decoder associated with the last incoming
+  // packet. If no packet of a registered non-CNG codec has been received, the
+  // return value is empty. Also, if the decoder was unregistered since the last
+  // packet was inserted, the return value is empty.
+  rtc::Optional<int> last_packet_sample_rate_hz() const;
+
+  // Returns last_output_sample_rate_hz from the NetEq instance.
+  int last_output_sample_rate_hz() const;
+
+  //
+  // Get the current network statistics from NetEq.
+  //
+  // Output:
+  //   - statistics           : The current network statistics.
+  //
+  void GetNetworkStatistics(NetworkStatistics* statistics);
+
+  //
+  // Enable post-decoding VAD.
+  //
+  void EnableVad();
+
+  //
+  // Disable post-decoding VAD.
+  //
+  void DisableVad();
+
+  //
+  // Returns whether post-decoding VAD is enabled (true) or disabled (false).
+  //
+  bool vad_enabled() const { return vad_enabled_; }
+
+  //
+  // Flushes the NetEq packet and speech buffers.
+  //
+  void FlushBuffers();
+
+  //
+  // Removes a payload-type from the NetEq codec database.
+  //
+  // Input:
+  //   - payload_type         : the payload-type to be removed.
+  //
+  // Return value             : 0 if OK.
+  //                           -1 if an error occurred.
+  //
+  int RemoveCodec(uint8_t payload_type);
+
+  //
+  // Remove all registered codecs.
+  //
+  int RemoveAllCodecs();
+
+  //
+  // Set ID.
+  //
+  void set_id(int id);  // TODO(turajs): can be inline.
+
+  //
+  // Gets the RTP timestamp of the last sample delivered by GetAudio().
+  // Returns true if the RTP timestamp is valid, otherwise false.
+  //
+  bool GetPlayoutTimestamp(uint32_t* timestamp);
+
+  //
+  // Get the audio codec associated with the last non-CNG/non-DTMF received
+  // payload. If no non-CNG/non-DTMF packet is received -1 is returned,
+  // otherwise return 0.
+  //
+  int LastAudioCodec(CodecInst* codec) const;
+
+  //
+  // Get a decoder given its registered payload-type.
+  //
+  // Input:
+  //    -payload_type         : the payload-type of the codec to be retrieved.
+  //
+  // Output:
+  //    -codec                : codec associated with the given payload-type.
+  //
+  // Return value             : 0 if succeeded.
+  //                           -1 if failed, e.g. given payload-type is not
+  //                              registered.
+  //
+  int DecoderByPayloadType(uint8_t payload_type,
+                           CodecInst* codec) const;
+
+  //
+  // Enable NACK and set the maximum size of the NACK list. If NACK is already
+  // enabled then the maximum NACK list size is modified accordingly.
+  //
+  // Input:
+  //    -max_nack_list_size  : maximum NACK list size
+  //                           should be positive (none zero) and less than or
+  //                           equal to |Nack::kNackListSizeLimit|
+  // Return value
+  //                         : 0 if succeeded.
+  //                          -1 if failed
+  //
+  int EnableNack(size_t max_nack_list_size);
+
+  // Disable NACK.
+  void DisableNack();
+
+  //
+  // Get a list of packets to be retransmitted.
+  //
+  // Input:
+  //    -round_trip_time_ms : estimate of the round-trip-time (in milliseconds).
+  // Return value           : list of packets to be retransmitted.
+  //
+  std::vector<uint16_t> GetNackList(int64_t round_trip_time_ms) const;
+
+  //
+  // Get statistics of calls to GetAudio().
+  void GetDecodingCallStatistics(AudioDecodingCallStats* stats) const;
+
+ private:
+  const Decoder* RtpHeaderToDecoder(const RTPHeader& rtp_header,
+                                    uint8_t payload_type) const
+      EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
+
+  uint32_t NowInTimestamp(int decoder_sampling_rate) const;
+
+  rtc::scoped_ptr<CriticalSectionWrapper> crit_sect_;
+  int id_;  // TODO(henrik.lundin) Make const.
+  const Decoder* last_audio_decoder_ GUARDED_BY(crit_sect_);
+  AudioFrame::VADActivity previous_audio_activity_ GUARDED_BY(crit_sect_);
+  ACMResampler resampler_ GUARDED_BY(crit_sect_);
+  // Used in GetAudio, declared as member to avoid allocating every 10ms.
+  // TODO(henrik.lundin) Stack-allocate in GetAudio instead?
+  rtc::scoped_ptr<int16_t[]> audio_buffer_ GUARDED_BY(crit_sect_);
+  rtc::scoped_ptr<int16_t[]> last_audio_buffer_ GUARDED_BY(crit_sect_);
+  CallStatistics call_stats_ GUARDED_BY(crit_sect_);
+  NetEq* neteq_;
+  // Decoders map is keyed by payload type
+  std::map<uint8_t, Decoder> decoders_ GUARDED_BY(crit_sect_);
+  bool vad_enabled_;
+  Clock* clock_;  // TODO(henrik.lundin) Make const if possible.
+  bool resampled_last_output_frame_ GUARDED_BY(crit_sect_);
+  rtc::Optional<int> last_packet_sample_rate_hz_ GUARDED_BY(crit_sect_);
+};
+
+}  // namespace acm2
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_RECEIVER_H_
diff --git a/webrtc/modules/audio_coding/acm2/acm_receiver_unittest_oldapi.cc b/webrtc/modules/audio_coding/acm2/acm_receiver_unittest_oldapi.cc
new file mode 100644
index 0000000..8076687
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/acm_receiver_unittest_oldapi.cc
@@ -0,0 +1,369 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/acm2/acm_receiver.h"
+
+#include <algorithm>  // std::min
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/acm2/audio_coding_module_impl.h"
+#include "webrtc/modules/audio_coding/neteq/tools/rtp_generator.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/test/test_suite.h"
+#include "webrtc/test/testsupport/fileutils.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
+
+namespace webrtc {
+
+namespace acm2 {
+namespace {
+
+bool CodecsEqual(const CodecInst& codec_a, const CodecInst& codec_b) {
+    if (strcmp(codec_a.plname, codec_b.plname) != 0 ||
+        codec_a.plfreq != codec_b.plfreq ||
+        codec_a.pltype != codec_b.pltype ||
+        codec_b.channels != codec_a.channels)
+      return false;
+    return true;
+}
+
+struct CodecIdInst {
+  explicit CodecIdInst(RentACodec::CodecId codec_id) {
+    const auto codec_ix = RentACodec::CodecIndexFromId(codec_id);
+    EXPECT_TRUE(codec_ix);
+    id = *codec_ix;
+    const auto codec_inst = RentACodec::CodecInstById(codec_id);
+    EXPECT_TRUE(codec_inst);
+    inst = *codec_inst;
+  }
+  int id;
+  CodecInst inst;
+};
+
+}  // namespace
+
+class AcmReceiverTestOldApi : public AudioPacketizationCallback,
+                              public ::testing::Test {
+ protected:
+  AcmReceiverTestOldApi()
+      : timestamp_(0),
+        packet_sent_(false),
+        last_packet_send_timestamp_(timestamp_),
+        last_frame_type_(kEmptyFrame) {
+    AudioCodingModule::Config config;
+    acm_.reset(new AudioCodingModuleImpl(config));
+    receiver_.reset(new AcmReceiver(config));
+  }
+
+  ~AcmReceiverTestOldApi() {}
+
+  void SetUp() override {
+    ASSERT_TRUE(receiver_.get() != NULL);
+    ASSERT_TRUE(acm_.get() != NULL);
+    codecs_ = RentACodec::Database();
+
+    acm_->InitializeReceiver();
+    acm_->RegisterTransportCallback(this);
+
+    rtp_header_.header.sequenceNumber = 0;
+    rtp_header_.header.timestamp = 0;
+    rtp_header_.header.markerBit = false;
+    rtp_header_.header.ssrc = 0x12345678;  // Arbitrary.
+    rtp_header_.header.numCSRCs = 0;
+    rtp_header_.header.payloadType = 0;
+    rtp_header_.frameType = kAudioFrameSpeech;
+    rtp_header_.type.Audio.isCNG = false;
+  }
+
+  void TearDown() override {}
+
+  void InsertOnePacketOfSilence(int codec_id) {
+    CodecInst codec =
+        *RentACodec::CodecInstById(*RentACodec::CodecIdFromIndex(codec_id));
+    if (timestamp_ == 0) {  // This is the first time inserting audio.
+      ASSERT_EQ(0, acm_->RegisterSendCodec(codec));
+    } else {
+      auto current_codec = acm_->SendCodec();
+      ASSERT_TRUE(current_codec);
+      if (!CodecsEqual(codec, *current_codec))
+        ASSERT_EQ(0, acm_->RegisterSendCodec(codec));
+    }
+    AudioFrame frame;
+    // Frame setup according to the codec.
+    frame.sample_rate_hz_ = codec.plfreq;
+    frame.samples_per_channel_ = codec.plfreq / 100;  // 10 ms.
+    frame.num_channels_ = codec.channels;
+    memset(frame.data_, 0, frame.samples_per_channel_ * frame.num_channels_ *
+           sizeof(int16_t));
+    packet_sent_ = false;
+    last_packet_send_timestamp_ = timestamp_;
+    while (!packet_sent_) {
+      frame.timestamp_ = timestamp_;
+      timestamp_ += frame.samples_per_channel_;
+      ASSERT_GE(acm_->Add10MsData(frame), 0);
+    }
+  }
+
+  template <size_t N>
+  void AddSetOfCodecs(const RentACodec::CodecId(&ids)[N]) {
+    for (auto id : ids) {
+      const auto i = RentACodec::CodecIndexFromId(id);
+      ASSERT_TRUE(i);
+      ASSERT_EQ(
+          0, receiver_->AddCodec(*i, codecs_[*i].pltype, codecs_[*i].channels,
+                                 codecs_[*i].plfreq, nullptr));
+    }
+  }
+
+  int SendData(FrameType frame_type,
+               uint8_t payload_type,
+               uint32_t timestamp,
+               const uint8_t* payload_data,
+               size_t payload_len_bytes,
+               const RTPFragmentationHeader* fragmentation) override {
+    if (frame_type == kEmptyFrame)
+      return 0;
+
+    rtp_header_.header.payloadType = payload_type;
+    rtp_header_.frameType = frame_type;
+    if (frame_type == kAudioFrameSpeech)
+      rtp_header_.type.Audio.isCNG = false;
+    else
+      rtp_header_.type.Audio.isCNG = true;
+    rtp_header_.header.timestamp = timestamp;
+
+    int ret_val = receiver_->InsertPacket(
+        rtp_header_,
+        rtc::ArrayView<const uint8_t>(payload_data, payload_len_bytes));
+    if (ret_val < 0) {
+      assert(false);
+      return -1;
+    }
+    rtp_header_.header.sequenceNumber++;
+    packet_sent_ = true;
+    last_frame_type_ = frame_type;
+    return 0;
+  }
+
+  rtc::scoped_ptr<AcmReceiver> receiver_;
+  rtc::ArrayView<const CodecInst> codecs_;
+  rtc::scoped_ptr<AudioCodingModule> acm_;
+  WebRtcRTPHeader rtp_header_;
+  uint32_t timestamp_;
+  bool packet_sent_;  // Set when SendData is called reset when inserting audio.
+  uint32_t last_packet_send_timestamp_;
+  FrameType last_frame_type_;
+};
+
+TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(AddCodecGetCodec)) {
+  // Add codec.
+  for (size_t n = 0; n < codecs_.size(); ++n) {
+    if (n & 0x1)  // Just add codecs with odd index.
+      EXPECT_EQ(0,
+                receiver_->AddCodec(n, codecs_[n].pltype, codecs_[n].channels,
+                                    codecs_[n].plfreq, NULL));
+  }
+  // Get codec and compare.
+  for (size_t n = 0; n < codecs_.size(); ++n) {
+    CodecInst my_codec;
+    if (n & 0x1) {
+      // Codecs with odd index should match the reference.
+      EXPECT_EQ(0, receiver_->DecoderByPayloadType(codecs_[n].pltype,
+                                                   &my_codec));
+      EXPECT_TRUE(CodecsEqual(codecs_[n], my_codec));
+    } else {
+      // Codecs with even index are not registered.
+      EXPECT_EQ(-1, receiver_->DecoderByPayloadType(codecs_[n].pltype,
+                                                    &my_codec));
+    }
+  }
+}
+
+TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(AddCodecChangePayloadType)) {
+  const CodecIdInst codec1(RentACodec::CodecId::kPCMA);
+  CodecInst codec2 = codec1.inst;
+  ++codec2.pltype;
+  CodecInst test_codec;
+
+  // Register the same codec with different payloads.
+  EXPECT_EQ(0, receiver_->AddCodec(codec1.id, codec1.inst.pltype,
+                                   codec1.inst.channels, codec1.inst.plfreq,
+                                   nullptr));
+  EXPECT_EQ(0, receiver_->AddCodec(codec1.id, codec2.pltype, codec2.channels,
+                                   codec2.plfreq, NULL));
+
+  // Both payload types should exist.
+  EXPECT_EQ(0,
+            receiver_->DecoderByPayloadType(codec1.inst.pltype, &test_codec));
+  EXPECT_EQ(true, CodecsEqual(codec1.inst, test_codec));
+  EXPECT_EQ(0, receiver_->DecoderByPayloadType(codec2.pltype, &test_codec));
+  EXPECT_EQ(true, CodecsEqual(codec2, test_codec));
+}
+
+TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(AddCodecChangeCodecId)) {
+  const CodecIdInst codec1(RentACodec::CodecId::kPCMU);
+  CodecIdInst codec2(RentACodec::CodecId::kPCMA);
+  codec2.inst.pltype = codec1.inst.pltype;
+  CodecInst test_codec;
+
+  // Register the same payload type with different codec ID.
+  EXPECT_EQ(0, receiver_->AddCodec(codec1.id, codec1.inst.pltype,
+                                   codec1.inst.channels, codec1.inst.plfreq,
+                                   nullptr));
+  EXPECT_EQ(0, receiver_->AddCodec(codec2.id, codec2.inst.pltype,
+                                   codec2.inst.channels, codec2.inst.plfreq,
+                                   nullptr));
+
+  // Make sure that the last codec is used.
+  EXPECT_EQ(0,
+            receiver_->DecoderByPayloadType(codec2.inst.pltype, &test_codec));
+  EXPECT_EQ(true, CodecsEqual(codec2.inst, test_codec));
+}
+
+TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(AddCodecRemoveCodec)) {
+  const CodecIdInst codec(RentACodec::CodecId::kPCMA);
+  const int payload_type = codec.inst.pltype;
+  EXPECT_EQ(
+      0, receiver_->AddCodec(codec.id, codec.inst.pltype, codec.inst.channels,
+                             codec.inst.plfreq, nullptr));
+
+  // Remove non-existing codec should not fail. ACM1 legacy.
+  EXPECT_EQ(0, receiver_->RemoveCodec(payload_type + 1));
+
+  // Remove an existing codec.
+  EXPECT_EQ(0, receiver_->RemoveCodec(payload_type));
+
+  // Ask for the removed codec, must fail.
+  CodecInst ci;
+  EXPECT_EQ(-1, receiver_->DecoderByPayloadType(payload_type, &ci));
+}
+
+TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(SampleRate)) {
+  const RentACodec::CodecId kCodecId[] = {RentACodec::CodecId::kISAC,
+                                          RentACodec::CodecId::kISACSWB};
+  AddSetOfCodecs(kCodecId);
+
+  AudioFrame frame;
+  const int kOutSampleRateHz = 8000;  // Different than codec sample rate.
+  for (const auto codec_id : kCodecId) {
+    const CodecIdInst codec(codec_id);
+    const int num_10ms_frames = codec.inst.pacsize / (codec.inst.plfreq / 100);
+    InsertOnePacketOfSilence(codec.id);
+    for (int k = 0; k < num_10ms_frames; ++k) {
+      EXPECT_EQ(0, receiver_->GetAudio(kOutSampleRateHz, &frame));
+    }
+    EXPECT_EQ(codec.inst.plfreq, receiver_->last_output_sample_rate_hz());
+  }
+}
+
+TEST_F(AcmReceiverTestOldApi, DISABLED_ON_ANDROID(PostdecodingVad)) {
+  receiver_->EnableVad();
+  EXPECT_TRUE(receiver_->vad_enabled());
+  const CodecIdInst codec(RentACodec::CodecId::kPCM16Bwb);
+  ASSERT_EQ(
+      0, receiver_->AddCodec(codec.id, codec.inst.pltype, codec.inst.channels,
+                             codec.inst.plfreq, nullptr));
+  const int kNumPackets = 5;
+  const int num_10ms_frames = codec.inst.pacsize / (codec.inst.plfreq / 100);
+  AudioFrame frame;
+  for (int n = 0; n < kNumPackets; ++n) {
+    InsertOnePacketOfSilence(codec.id);
+    for (int k = 0; k < num_10ms_frames; ++k)
+      ASSERT_EQ(0, receiver_->GetAudio(codec.inst.plfreq, &frame));
+  }
+  EXPECT_EQ(AudioFrame::kVadPassive, frame.vad_activity_);
+
+  receiver_->DisableVad();
+  EXPECT_FALSE(receiver_->vad_enabled());
+
+  for (int n = 0; n < kNumPackets; ++n) {
+    InsertOnePacketOfSilence(codec.id);
+    for (int k = 0; k < num_10ms_frames; ++k)
+      ASSERT_EQ(0, receiver_->GetAudio(codec.inst.plfreq, &frame));
+  }
+  EXPECT_EQ(AudioFrame::kVadUnknown, frame.vad_activity_);
+}
+
+#ifdef WEBRTC_CODEC_ISAC
+#define IF_ISAC_FLOAT(x) x
+#else
+#define IF_ISAC_FLOAT(x) DISABLED_##x
+#endif
+
+TEST_F(AcmReceiverTestOldApi,
+       DISABLED_ON_ANDROID(IF_ISAC_FLOAT(LastAudioCodec))) {
+  const RentACodec::CodecId kCodecId[] = {
+      RentACodec::CodecId::kISAC, RentACodec::CodecId::kPCMA,
+      RentACodec::CodecId::kISACSWB, RentACodec::CodecId::kPCM16Bswb32kHz};
+  AddSetOfCodecs(kCodecId);
+
+  const RentACodec::CodecId kCngId[] = {
+      // Not including full-band.
+      RentACodec::CodecId::kCNNB, RentACodec::CodecId::kCNWB,
+      RentACodec::CodecId::kCNSWB};
+  AddSetOfCodecs(kCngId);
+
+  // Register CNG at sender side.
+  for (auto id : kCngId)
+    ASSERT_EQ(0, acm_->RegisterSendCodec(CodecIdInst(id).inst));
+
+  CodecInst codec;
+  // No audio payload is received.
+  EXPECT_EQ(-1, receiver_->LastAudioCodec(&codec));
+
+  // Start with sending DTX.
+  ASSERT_EQ(0, acm_->SetVAD(true, true, VADVeryAggr));
+  packet_sent_ = false;
+  InsertOnePacketOfSilence(CodecIdInst(kCodecId[0]).id);  // Enough to test
+                                                          // with one codec.
+  ASSERT_TRUE(packet_sent_);
+  EXPECT_EQ(kAudioFrameCN, last_frame_type_);
+
+  // Has received, only, DTX. Last Audio codec is undefined.
+  EXPECT_EQ(-1, receiver_->LastAudioCodec(&codec));
+  EXPECT_FALSE(receiver_->last_packet_sample_rate_hz());
+
+  for (auto id : kCodecId) {
+    const CodecIdInst c(id);
+
+    // Set DTX off to send audio payload.
+    acm_->SetVAD(false, false, VADAggr);
+    packet_sent_ = false;
+    InsertOnePacketOfSilence(c.id);
+
+    // Sanity check if Actually an audio payload received, and it should be
+    // of type "speech."
+    ASSERT_TRUE(packet_sent_);
+    ASSERT_EQ(kAudioFrameSpeech, last_frame_type_);
+    EXPECT_EQ(rtc::Optional<int>(c.inst.plfreq),
+              receiver_->last_packet_sample_rate_hz());
+
+    // Set VAD on to send DTX. Then check if the "Last Audio codec" returns
+    // the expected codec.
+    acm_->SetVAD(true, true, VADAggr);
+
+    // Do as many encoding until a DTX is sent.
+    while (last_frame_type_ != kAudioFrameCN) {
+      packet_sent_ = false;
+      InsertOnePacketOfSilence(c.id);
+      ASSERT_TRUE(packet_sent_);
+    }
+    EXPECT_EQ(rtc::Optional<int>(c.inst.plfreq),
+              receiver_->last_packet_sample_rate_hz());
+    EXPECT_EQ(0, receiver_->LastAudioCodec(&codec));
+    EXPECT_TRUE(CodecsEqual(c.inst, codec));
+  }
+}
+
+}  // namespace acm2
+
+}  // namespace webrtc
diff --git a/webrtc/modules/audio_coding/acm2/acm_resampler.cc b/webrtc/modules/audio_coding/acm2/acm_resampler.cc
new file mode 100644
index 0000000..e38cd94
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/acm_resampler.cc
@@ -0,0 +1,68 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/acm2/acm_resampler.h"
+
+#include <assert.h>
+#include <string.h>
+
+#include "webrtc/common_audio/resampler/include/resampler.h"
+#include "webrtc/system_wrappers/include/logging.h"
+
+namespace webrtc {
+namespace acm2 {
+
+ACMResampler::ACMResampler() {
+}
+
+ACMResampler::~ACMResampler() {
+}
+
+int ACMResampler::Resample10Msec(const int16_t* in_audio,
+                                 int in_freq_hz,
+                                 int out_freq_hz,
+                                 int num_audio_channels,
+                                 size_t out_capacity_samples,
+                                 int16_t* out_audio) {
+  size_t in_length = static_cast<size_t>(in_freq_hz * num_audio_channels / 100);
+  int out_length = out_freq_hz * num_audio_channels / 100;
+  if (in_freq_hz == out_freq_hz) {
+    if (out_capacity_samples < in_length) {
+      assert(false);
+      return -1;
+    }
+    memcpy(out_audio, in_audio, in_length * sizeof(int16_t));
+    return static_cast<int>(in_length / num_audio_channels);
+  }
+
+  if (resampler_.InitializeIfNeeded(in_freq_hz, out_freq_hz,
+                                    num_audio_channels) != 0) {
+    LOG_FERR3(LS_ERROR, InitializeIfNeeded, in_freq_hz, out_freq_hz,
+              num_audio_channels);
+    return -1;
+  }
+
+  out_length =
+      resampler_.Resample(in_audio, in_length, out_audio, out_capacity_samples);
+  if (out_length == -1) {
+    LOG_FERR4(LS_ERROR,
+              Resample,
+              in_audio,
+              in_length,
+              out_audio,
+              out_capacity_samples);
+    return -1;
+  }
+
+  return out_length / num_audio_channels;
+}
+
+}  // namespace acm2
+}  // namespace webrtc
diff --git a/webrtc/modules/audio_coding/acm2/acm_resampler.h b/webrtc/modules/audio_coding/acm2/acm_resampler.h
new file mode 100644
index 0000000..700fefa
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/acm_resampler.h
@@ -0,0 +1,39 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_RESAMPLER_H_
+#define WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_RESAMPLER_H_
+
+#include "webrtc/common_audio/resampler/include/push_resampler.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+namespace acm2 {
+
+class ACMResampler {
+ public:
+  ACMResampler();
+  ~ACMResampler();
+
+  int Resample10Msec(const int16_t* in_audio,
+                     int in_freq_hz,
+                     int out_freq_hz,
+                     int num_audio_channels,
+                     size_t out_capacity_samples,
+                     int16_t* out_audio);
+
+ private:
+  PushResampler<int16_t> resampler_;
+};
+
+}  // namespace acm2
+}  // namespace webrtc
+
+#endif  // WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_RESAMPLER_H_
diff --git a/webrtc/modules/audio_coding/acm2/acm_send_test_oldapi.cc b/webrtc/modules/audio_coding/acm2/acm_send_test_oldapi.cc
new file mode 100644
index 0000000..3a89a77
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/acm_send_test_oldapi.cc
@@ -0,0 +1,158 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/acm2/acm_send_test_oldapi.h"
+
+#include <assert.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/checks.h"
+#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
+
+namespace webrtc {
+namespace test {
+
+AcmSendTestOldApi::AcmSendTestOldApi(InputAudioFile* audio_source,
+                                     int source_rate_hz,
+                                     int test_duration_ms)
+    : clock_(0),
+      acm_(webrtc::AudioCodingModule::Create(0, &clock_)),
+      audio_source_(audio_source),
+      source_rate_hz_(source_rate_hz),
+      input_block_size_samples_(
+          static_cast<size_t>(source_rate_hz_ * kBlockSizeMs / 1000)),
+      codec_registered_(false),
+      test_duration_ms_(test_duration_ms),
+      frame_type_(kAudioFrameSpeech),
+      payload_type_(0),
+      timestamp_(0),
+      sequence_number_(0) {
+  input_frame_.sample_rate_hz_ = source_rate_hz_;
+  input_frame_.num_channels_ = 1;
+  input_frame_.samples_per_channel_ = input_block_size_samples_;
+  assert(input_block_size_samples_ * input_frame_.num_channels_ <=
+         AudioFrame::kMaxDataSizeSamples);
+  acm_->RegisterTransportCallback(this);
+}
+
+bool AcmSendTestOldApi::RegisterCodec(const char* payload_name,
+                                      int sampling_freq_hz,
+                                      int channels,
+                                      int payload_type,
+                                      int frame_size_samples) {
+  CodecInst codec;
+  RTC_CHECK_EQ(0, AudioCodingModule::Codec(payload_name, &codec,
+                                           sampling_freq_hz, channels));
+  codec.pltype = payload_type;
+  codec.pacsize = frame_size_samples;
+  codec_registered_ = (acm_->RegisterSendCodec(codec) == 0);
+  input_frame_.num_channels_ = channels;
+  assert(input_block_size_samples_ * input_frame_.num_channels_ <=
+         AudioFrame::kMaxDataSizeSamples);
+  return codec_registered_;
+}
+
+bool AcmSendTestOldApi::RegisterExternalCodec(
+    AudioEncoder* external_speech_encoder) {
+  acm_->RegisterExternalSendCodec(external_speech_encoder);
+  input_frame_.num_channels_ = external_speech_encoder->NumChannels();
+  assert(input_block_size_samples_ * input_frame_.num_channels_ <=
+         AudioFrame::kMaxDataSizeSamples);
+  return codec_registered_ = true;
+}
+
+Packet* AcmSendTestOldApi::NextPacket() {
+  assert(codec_registered_);
+  if (filter_.test(static_cast<size_t>(payload_type_))) {
+    // This payload type should be filtered out. Since the payload type is the
+    // same throughout the whole test run, no packet at all will be delivered.
+    // We can just as well signal that the test is over by returning NULL.
+    return NULL;
+  }
+  // Insert audio and process until one packet is produced.
+  while (clock_.TimeInMilliseconds() < test_duration_ms_) {
+    clock_.AdvanceTimeMilliseconds(kBlockSizeMs);
+    RTC_CHECK(
+        audio_source_->Read(input_block_size_samples_, input_frame_.data_));
+    if (input_frame_.num_channels_ > 1) {
+      InputAudioFile::DuplicateInterleaved(input_frame_.data_,
+                                           input_block_size_samples_,
+                                           input_frame_.num_channels_,
+                                           input_frame_.data_);
+    }
+    data_to_send_ = false;
+    RTC_CHECK_GE(acm_->Add10MsData(input_frame_), 0);
+    input_frame_.timestamp_ += static_cast<uint32_t>(input_block_size_samples_);
+    if (data_to_send_) {
+      // Encoded packet received.
+      return CreatePacket();
+    }
+  }
+  // Test ended.
+  return NULL;
+}
+
+// This method receives the callback from ACM when a new packet is produced.
+int32_t AcmSendTestOldApi::SendData(
+    FrameType frame_type,
+    uint8_t payload_type,
+    uint32_t timestamp,
+    const uint8_t* payload_data,
+    size_t payload_len_bytes,
+    const RTPFragmentationHeader* fragmentation) {
+  // Store the packet locally.
+  frame_type_ = frame_type;
+  payload_type_ = payload_type;
+  timestamp_ = timestamp;
+  last_payload_vec_.assign(payload_data, payload_data + payload_len_bytes);
+  assert(last_payload_vec_.size() == payload_len_bytes);
+  data_to_send_ = true;
+  return 0;
+}
+
+Packet* AcmSendTestOldApi::CreatePacket() {
+  const size_t kRtpHeaderSize = 12;
+  size_t allocated_bytes = last_payload_vec_.size() + kRtpHeaderSize;
+  uint8_t* packet_memory = new uint8_t[allocated_bytes];
+  // Populate the header bytes.
+  packet_memory[0] = 0x80;
+  packet_memory[1] = static_cast<uint8_t>(payload_type_);
+  packet_memory[2] = (sequence_number_ >> 8) & 0xFF;
+  packet_memory[3] = (sequence_number_) & 0xFF;
+  packet_memory[4] = (timestamp_ >> 24) & 0xFF;
+  packet_memory[5] = (timestamp_ >> 16) & 0xFF;
+  packet_memory[6] = (timestamp_ >> 8) & 0xFF;
+  packet_memory[7] = timestamp_ & 0xFF;
+  // Set SSRC to 0x12345678.
+  packet_memory[8] = 0x12;
+  packet_memory[9] = 0x34;
+  packet_memory[10] = 0x56;
+  packet_memory[11] = 0x78;
+
+  ++sequence_number_;
+
+  // Copy the payload data.
+  memcpy(packet_memory + kRtpHeaderSize,
+         &last_payload_vec_[0],
+         last_payload_vec_.size());
+  Packet* packet =
+      new Packet(packet_memory, allocated_bytes, clock_.TimeInMilliseconds());
+  assert(packet);
+  assert(packet->valid_header());
+  return packet;
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/webrtc/modules/audio_coding/acm2/acm_send_test_oldapi.h b/webrtc/modules/audio_coding/acm2/acm_send_test_oldapi.h
new file mode 100644
index 0000000..ce68196
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/acm_send_test_oldapi.h
@@ -0,0 +1,91 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_SEND_TEST_OLDAPI_H_
+#define WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_SEND_TEST_OLDAPI_H_
+
+#include <vector>
+
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/neteq/tools/packet_source.h"
+#include "webrtc/system_wrappers/include/clock.h"
+
+namespace webrtc {
+class AudioEncoder;
+
+namespace test {
+class InputAudioFile;
+class Packet;
+
+class AcmSendTestOldApi : public AudioPacketizationCallback,
+                          public PacketSource {
+ public:
+  AcmSendTestOldApi(InputAudioFile* audio_source,
+                    int source_rate_hz,
+                    int test_duration_ms);
+  virtual ~AcmSendTestOldApi() {}
+
+  // Registers the send codec. Returns true on success, false otherwise.
+  bool RegisterCodec(const char* payload_name,
+                     int sampling_freq_hz,
+                     int channels,
+                     int payload_type,
+                     int frame_size_samples);
+
+  // Registers an external send codec. Returns true on success, false otherwise.
+  bool RegisterExternalCodec(AudioEncoder* external_speech_encoder);
+
+  // Returns the next encoded packet. Returns NULL if the test duration was
+  // exceeded. Ownership of the packet is handed over to the caller.
+  // Inherited from PacketSource.
+  Packet* NextPacket();
+
+  // Inherited from AudioPacketizationCallback.
+  int32_t SendData(FrameType frame_type,
+                   uint8_t payload_type,
+                   uint32_t timestamp,
+                   const uint8_t* payload_data,
+                   size_t payload_len_bytes,
+                   const RTPFragmentationHeader* fragmentation) override;
+
+  AudioCodingModule* acm() { return acm_.get(); }
+
+ private:
+  static const int kBlockSizeMs = 10;
+
+  // Creates a Packet object from the last packet produced by ACM (and received
+  // through the SendData method as a callback). Ownership of the new Packet
+  // object is transferred to the caller.
+  Packet* CreatePacket();
+
+  SimulatedClock clock_;
+  rtc::scoped_ptr<AudioCodingModule> acm_;
+  InputAudioFile* audio_source_;
+  int source_rate_hz_;
+  const size_t input_block_size_samples_;
+  AudioFrame input_frame_;
+  bool codec_registered_;
+  int test_duration_ms_;
+  // The following member variables are set whenever SendData() is called.
+  FrameType frame_type_;
+  int payload_type_;
+  uint32_t timestamp_;
+  uint16_t sequence_number_;
+  std::vector<uint8_t> last_payload_vec_;
+  bool data_to_send_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(AcmSendTestOldApi);
+};
+
+}  // namespace test
+}  // namespace webrtc
+#endif  // WEBRTC_MODULES_AUDIO_CODING_ACM2_ACM_SEND_TEST_OLDAPI_H_
diff --git a/webrtc/modules/audio_coding/acm2/audio_coding_module.cc b/webrtc/modules/audio_coding/acm2/audio_coding_module.cc
new file mode 100644
index 0000000..034de32
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/audio_coding_module.cc
@@ -0,0 +1,98 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/common_types.h"
+#include "webrtc/modules/audio_coding/acm2/audio_coding_module_impl.h"
+#include "webrtc/modules/audio_coding/acm2/rent_a_codec.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/system_wrappers/include/trace.h"
+
+namespace webrtc {
+
+// Create module
+AudioCodingModule* AudioCodingModule::Create(int id) {
+  Config config;
+  config.id = id;
+  config.clock = Clock::GetRealTimeClock();
+  return Create(config);
+}
+
+AudioCodingModule* AudioCodingModule::Create(int id, Clock* clock) {
+  Config config;
+  config.id = id;
+  config.clock = clock;
+  return Create(config);
+}
+
+AudioCodingModule* AudioCodingModule::Create(const Config& config) {
+  return new acm2::AudioCodingModuleImpl(config);
+}
+
+int AudioCodingModule::NumberOfCodecs() {
+  return static_cast<int>(acm2::RentACodec::NumberOfCodecs());
+}
+
+int AudioCodingModule::Codec(int list_id, CodecInst* codec) {
+  auto codec_id = acm2::RentACodec::CodecIdFromIndex(list_id);
+  if (!codec_id)
+    return -1;
+  auto ci = acm2::RentACodec::CodecInstById(*codec_id);
+  if (!ci)
+    return -1;
+  *codec = *ci;
+  return 0;
+}
+
+int AudioCodingModule::Codec(const char* payload_name,
+                             CodecInst* codec,
+                             int sampling_freq_hz,
+                             int channels) {
+  rtc::Optional<CodecInst> ci = acm2::RentACodec::CodecInstByParams(
+      payload_name, sampling_freq_hz, channels);
+  if (ci) {
+    *codec = *ci;
+    return 0;
+  } else {
+    // We couldn't find a matching codec, so set the parameters to unacceptable
+    // values and return.
+    codec->plname[0] = '\0';
+    codec->pltype = -1;
+    codec->pacsize = 0;
+    codec->rate = 0;
+    codec->plfreq = 0;
+    return -1;
+  }
+}
+
+int AudioCodingModule::Codec(const char* payload_name,
+                             int sampling_freq_hz,
+                             int channels) {
+  rtc::Optional<acm2::RentACodec::CodecId> ci =
+      acm2::RentACodec::CodecIdByParams(payload_name, sampling_freq_hz,
+                                        channels);
+  if (!ci)
+    return -1;
+  rtc::Optional<int> i = acm2::RentACodec::CodecIndexFromId(*ci);
+  return i ? *i : -1;
+}
+
+// Checks the validity of the parameters of the given codec
+bool AudioCodingModule::IsCodecValid(const CodecInst& codec) {
+  bool valid = acm2::RentACodec::IsCodecValid(codec);
+  if (!valid)
+    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, -1,
+                 "Invalid codec setting");
+  return valid;
+}
+
+}  // namespace webrtc
diff --git a/webrtc/modules/audio_coding/acm2/audio_coding_module_impl.cc b/webrtc/modules/audio_coding/acm2/audio_coding_module_impl.cc
new file mode 100644
index 0000000..5f61ef6
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/audio_coding_module_impl.cc
@@ -0,0 +1,786 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/acm2/audio_coding_module_impl.h"
+
+#include <assert.h>
+#include <stdlib.h>
+#include <vector>
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/safe_conversions.h"
+#include "webrtc/engine_configurations.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/acm2/acm_common_defs.h"
+#include "webrtc/modules/audio_coding/acm2/acm_resampler.h"
+#include "webrtc/modules/audio_coding/acm2/call_statistics.h"
+#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
+#include "webrtc/system_wrappers/include/logging.h"
+#include "webrtc/system_wrappers/include/metrics.h"
+#include "webrtc/system_wrappers/include/rw_lock_wrapper.h"
+#include "webrtc/system_wrappers/include/trace.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+namespace acm2 {
+
+namespace {
+
+// TODO(turajs): the same functionality is used in NetEq. If both classes
+// need them, make it a static function in ACMCodecDB.
+bool IsCodecRED(const CodecInst& codec) {
+  return (STR_CASE_CMP(codec.plname, "RED") == 0);
+}
+
+bool IsCodecCN(const CodecInst& codec) {
+  return (STR_CASE_CMP(codec.plname, "CN") == 0);
+}
+
+// Stereo-to-mono can be used as in-place.
+int DownMix(const AudioFrame& frame,
+            size_t length_out_buff,
+            int16_t* out_buff) {
+  if (length_out_buff < frame.samples_per_channel_) {
+    return -1;
+  }
+  for (size_t n = 0; n < frame.samples_per_channel_; ++n)
+    out_buff[n] = (frame.data_[2 * n] + frame.data_[2 * n + 1]) >> 1;
+  return 0;
+}
+
+// Mono-to-stereo can be used as in-place.
+int UpMix(const AudioFrame& frame, size_t length_out_buff, int16_t* out_buff) {
+  if (length_out_buff < frame.samples_per_channel_) {
+    return -1;
+  }
+  for (size_t n = frame.samples_per_channel_; n != 0; --n) {
+    size_t i = n - 1;
+    int16_t sample = frame.data_[i];
+    out_buff[2 * i + 1] = sample;
+    out_buff[2 * i] = sample;
+  }
+  return 0;
+}
+
+void ConvertEncodedInfoToFragmentationHeader(
+    const AudioEncoder::EncodedInfo& info,
+    RTPFragmentationHeader* frag) {
+  if (info.redundant.empty()) {
+    frag->fragmentationVectorSize = 0;
+    return;
+  }
+
+  frag->VerifyAndAllocateFragmentationHeader(
+      static_cast<uint16_t>(info.redundant.size()));
+  frag->fragmentationVectorSize = static_cast<uint16_t>(info.redundant.size());
+  size_t offset = 0;
+  for (size_t i = 0; i < info.redundant.size(); ++i) {
+    frag->fragmentationOffset[i] = offset;
+    offset += info.redundant[i].encoded_bytes;
+    frag->fragmentationLength[i] = info.redundant[i].encoded_bytes;
+    frag->fragmentationTimeDiff[i] = rtc::checked_cast<uint16_t>(
+        info.encoded_timestamp - info.redundant[i].encoded_timestamp);
+    frag->fragmentationPlType[i] = info.redundant[i].payload_type;
+  }
+}
+}  // namespace
+
+void AudioCodingModuleImpl::ChangeLogger::MaybeLog(int value) {
+  if (value != last_value_ || first_time_) {
+    first_time_ = false;
+    last_value_ = value;
+    RTC_HISTOGRAM_COUNTS_100(histogram_name_, value);
+  }
+}
+
+AudioCodingModuleImpl::AudioCodingModuleImpl(
+    const AudioCodingModule::Config& config)
+    : acm_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
+      id_(config.id),
+      expected_codec_ts_(0xD87F3F9F),
+      expected_in_ts_(0xD87F3F9F),
+      receiver_(config),
+      bitrate_logger_("WebRTC.Audio.TargetBitrateInKbps"),
+      previous_pltype_(255),
+      receiver_initialized_(false),
+      first_10ms_data_(false),
+      first_frame_(true),
+      callback_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
+      packetization_callback_(NULL),
+      vad_callback_(NULL) {
+  if (InitializeReceiverSafe() < 0) {
+    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
+                 "Cannot initialize receiver");
+  }
+  WEBRTC_TRACE(webrtc::kTraceMemory, webrtc::kTraceAudioCoding, id_, "Created");
+}
+
+AudioCodingModuleImpl::~AudioCodingModuleImpl() = default;
+
+int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
+  AudioEncoder::EncodedInfo encoded_info;
+  uint8_t previous_pltype;
+
+  // Check if there is an encoder before.
+  if (!HaveValidEncoder("Process"))
+    return -1;
+
+  AudioEncoder* audio_encoder = codec_manager_.CurrentEncoder();
+  // Scale the timestamp to the codec's RTP timestamp rate.
+  uint32_t rtp_timestamp =
+      first_frame_ ? input_data.input_timestamp
+                   : last_rtp_timestamp_ +
+                         rtc::CheckedDivExact(
+                             input_data.input_timestamp - last_timestamp_,
+                             static_cast<uint32_t>(rtc::CheckedDivExact(
+                                 audio_encoder->SampleRateHz(),
+                                 audio_encoder->RtpTimestampRateHz())));
+  last_timestamp_ = input_data.input_timestamp;
+  last_rtp_timestamp_ = rtp_timestamp;
+  first_frame_ = false;
+
+  encode_buffer_.SetSize(audio_encoder->MaxEncodedBytes());
+  encoded_info = audio_encoder->Encode(
+      rtp_timestamp, rtc::ArrayView<const int16_t>(
+                         input_data.audio, input_data.audio_channel *
+                                               input_data.length_per_channel),
+      encode_buffer_.size(), encode_buffer_.data());
+  encode_buffer_.SetSize(encoded_info.encoded_bytes);
+  bitrate_logger_.MaybeLog(audio_encoder->GetTargetBitrate() / 1000);
+  if (encode_buffer_.size() == 0 && !encoded_info.send_even_if_empty) {
+    // Not enough data.
+    return 0;
+  }
+  previous_pltype = previous_pltype_;  // Read it while we have the critsect.
+
+  RTPFragmentationHeader my_fragmentation;
+  ConvertEncodedInfoToFragmentationHeader(encoded_info, &my_fragmentation);
+  FrameType frame_type;
+  if (encode_buffer_.size() == 0 && encoded_info.send_even_if_empty) {
+    frame_type = kEmptyFrame;
+    encoded_info.payload_type = previous_pltype;
+  } else {
+    RTC_DCHECK_GT(encode_buffer_.size(), 0u);
+    frame_type = encoded_info.speech ? kAudioFrameSpeech : kAudioFrameCN;
+  }
+
+  {
+    CriticalSectionScoped lock(callback_crit_sect_.get());
+    if (packetization_callback_) {
+      packetization_callback_->SendData(
+          frame_type, encoded_info.payload_type, encoded_info.encoded_timestamp,
+          encode_buffer_.data(), encode_buffer_.size(),
+          my_fragmentation.fragmentationVectorSize > 0 ? &my_fragmentation
+                                                       : nullptr);
+    }
+
+    if (vad_callback_) {
+      // Callback with VAD decision.
+      vad_callback_->InFrameType(frame_type);
+    }
+  }
+  previous_pltype_ = encoded_info.payload_type;
+  return static_cast<int32_t>(encode_buffer_.size());
+}
+
+/////////////////////////////////////////
+//   Sender
+//
+
+// Can be called multiple times for Codec, CNG, RED.
+int AudioCodingModuleImpl::RegisterSendCodec(const CodecInst& send_codec) {
+  CriticalSectionScoped lock(acm_crit_sect_.get());
+  return codec_manager_.RegisterEncoder(send_codec);
+}
+
+void AudioCodingModuleImpl::RegisterExternalSendCodec(
+    AudioEncoder* external_speech_encoder) {
+  CriticalSectionScoped lock(acm_crit_sect_.get());
+  codec_manager_.RegisterEncoder(external_speech_encoder);
+}
+
+// Get current send codec.
+rtc::Optional<CodecInst> AudioCodingModuleImpl::SendCodec() const {
+  CriticalSectionScoped lock(acm_crit_sect_.get());
+  return codec_manager_.GetCodecInst();
+}
+
+// Get current send frequency.
+int AudioCodingModuleImpl::SendFrequency() const {
+  WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
+               "SendFrequency()");
+  CriticalSectionScoped lock(acm_crit_sect_.get());
+
+  if (!codec_manager_.CurrentEncoder()) {
+    WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
+                 "SendFrequency Failed, no codec is registered");
+    return -1;
+  }
+
+  return codec_manager_.CurrentEncoder()->SampleRateHz();
+}
+
+void AudioCodingModuleImpl::SetBitRate(int bitrate_bps) {
+  CriticalSectionScoped lock(acm_crit_sect_.get());
+  if (codec_manager_.CurrentEncoder()) {
+    codec_manager_.CurrentEncoder()->SetTargetBitrate(bitrate_bps);
+  }
+}
+
+// Register a transport callback which will be called to deliver
+// the encoded buffers.
+int AudioCodingModuleImpl::RegisterTransportCallback(
+    AudioPacketizationCallback* transport) {
+  CriticalSectionScoped lock(callback_crit_sect_.get());
+  packetization_callback_ = transport;
+  return 0;
+}
+
+// Add 10MS of raw (PCM) audio data to the encoder.
+int AudioCodingModuleImpl::Add10MsData(const AudioFrame& audio_frame) {
+  InputData input_data;
+  CriticalSectionScoped lock(acm_crit_sect_.get());
+  int r = Add10MsDataInternal(audio_frame, &input_data);
+  return r < 0 ? r : Encode(input_data);
+}
+
+int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
+                                               InputData* input_data) {
+  if (audio_frame.samples_per_channel_ == 0) {
+    assert(false);
+    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
+                 "Cannot Add 10 ms audio, payload length is zero");
+    return -1;
+  }
+
+  if (audio_frame.sample_rate_hz_ > 48000) {
+    assert(false);
+    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
+                 "Cannot Add 10 ms audio, input frequency not valid");
+    return -1;
+  }
+
+  // If the length and frequency matches. We currently just support raw PCM.
+  if (static_cast<size_t>(audio_frame.sample_rate_hz_ / 100) !=
+      audio_frame.samples_per_channel_) {
+    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
+                 "Cannot Add 10 ms audio, input frequency and length doesn't"
+                 " match");
+    return -1;
+  }
+
+  if (audio_frame.num_channels_ != 1 && audio_frame.num_channels_ != 2) {
+    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
+                 "Cannot Add 10 ms audio, invalid number of channels.");
+    return -1;
+  }
+
+  // Do we have a codec registered?
+  if (!HaveValidEncoder("Add10MsData")) {
+    return -1;
+  }
+
+  const AudioFrame* ptr_frame;
+  // Perform a resampling, also down-mix if it is required and can be
+  // performed before resampling (a down mix prior to resampling will take
+  // place if both primary and secondary encoders are mono and input is in
+  // stereo).
+  if (PreprocessToAddData(audio_frame, &ptr_frame) < 0) {
+    return -1;
+  }
+
+  // Check whether we need an up-mix or down-mix?
+  bool remix = ptr_frame->num_channels_ !=
+               codec_manager_.CurrentEncoder()->NumChannels();
+
+  if (remix) {
+    if (ptr_frame->num_channels_ == 1) {
+      if (UpMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0)
+        return -1;
+    } else {
+      if (DownMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0)
+        return -1;
+    }
+  }
+
+  // When adding data to encoders this pointer is pointing to an audio buffer
+  // with correct number of channels.
+  const int16_t* ptr_audio = ptr_frame->data_;
+
+  // For pushing data to primary, point the |ptr_audio| to correct buffer.
+  if (codec_manager_.CurrentEncoder()->NumChannels() !=
+      ptr_frame->num_channels_)
+    ptr_audio = input_data->buffer;
+
+  input_data->input_timestamp = ptr_frame->timestamp_;
+  input_data->audio = ptr_audio;
+  input_data->length_per_channel = ptr_frame->samples_per_channel_;
+  input_data->audio_channel = codec_manager_.CurrentEncoder()->NumChannels();
+
+  return 0;
+}
+
+// Perform a resampling and down-mix if required. We down-mix only if
+// encoder is mono and input is stereo. In case of dual-streaming, both
+// encoders has to be mono for down-mix to take place.
+// |*ptr_out| will point to the pre-processed audio-frame. If no pre-processing
+// is required, |*ptr_out| points to |in_frame|.
+int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
+                                               const AudioFrame** ptr_out) {
+  bool resample = (in_frame.sample_rate_hz_ !=
+                   codec_manager_.CurrentEncoder()->SampleRateHz());
+
+  // This variable is true if primary codec and secondary codec (if exists)
+  // are both mono and input is stereo.
+  bool down_mix = (in_frame.num_channels_ == 2) &&
+                  (codec_manager_.CurrentEncoder()->NumChannels() == 1);
+
+  if (!first_10ms_data_) {
+    expected_in_ts_ = in_frame.timestamp_;
+    expected_codec_ts_ = in_frame.timestamp_;
+    first_10ms_data_ = true;
+  } else if (in_frame.timestamp_ != expected_in_ts_) {
+    // TODO(turajs): Do we need a warning here.
+    expected_codec_ts_ +=
+        (in_frame.timestamp_ - expected_in_ts_) *
+        static_cast<uint32_t>(
+            (static_cast<double>(
+                 codec_manager_.CurrentEncoder()->SampleRateHz()) /
+             static_cast<double>(in_frame.sample_rate_hz_)));
+    expected_in_ts_ = in_frame.timestamp_;
+  }
+
+
+  if (!down_mix && !resample) {
+    // No pre-processing is required.
+    expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_);
+    expected_codec_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_);
+    *ptr_out = &in_frame;
+    return 0;
+  }
+
+  *ptr_out = &preprocess_frame_;
+  preprocess_frame_.num_channels_ = in_frame.num_channels_;
+  int16_t audio[WEBRTC_10MS_PCM_AUDIO];
+  const int16_t* src_ptr_audio = in_frame.data_;
+  int16_t* dest_ptr_audio = preprocess_frame_.data_;
+  if (down_mix) {
+    // If a resampling is required the output of a down-mix is written into a
+    // local buffer, otherwise, it will be written to the output frame.
+    if (resample)
+      dest_ptr_audio = audio;
+    if (DownMix(in_frame, WEBRTC_10MS_PCM_AUDIO, dest_ptr_audio) < 0)
+      return -1;
+    preprocess_frame_.num_channels_ = 1;
+    // Set the input of the resampler is the down-mixed signal.
+    src_ptr_audio = audio;
+  }
+
+  preprocess_frame_.timestamp_ = expected_codec_ts_;
+  preprocess_frame_.samples_per_channel_ = in_frame.samples_per_channel_;
+  preprocess_frame_.sample_rate_hz_ = in_frame.sample_rate_hz_;
+  // If it is required, we have to do a resampling.
+  if (resample) {
+    // The result of the resampler is written to output frame.
+    dest_ptr_audio = preprocess_frame_.data_;
+
+    int samples_per_channel = resampler_.Resample10Msec(
+        src_ptr_audio, in_frame.sample_rate_hz_,
+        codec_manager_.CurrentEncoder()->SampleRateHz(),
+        preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples,
+        dest_ptr_audio);
+
+    if (samples_per_channel < 0) {
+      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
+                   "Cannot add 10 ms audio, resampling failed");
+      return -1;
+    }
+    preprocess_frame_.samples_per_channel_ =
+        static_cast<size_t>(samples_per_channel);
+    preprocess_frame_.sample_rate_hz_ =
+        codec_manager_.CurrentEncoder()->SampleRateHz();
+  }
+
+  expected_codec_ts_ +=
+      static_cast<uint32_t>(preprocess_frame_.samples_per_channel_);
+  expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_);
+
+  return 0;
+}
+
+/////////////////////////////////////////
+//   (RED) Redundant Coding
+//
+
+bool AudioCodingModuleImpl::REDStatus() const {
+  CriticalSectionScoped lock(acm_crit_sect_.get());
+  return codec_manager_.red_enabled();
+}
+
+// Configure RED status i.e on/off.
+int AudioCodingModuleImpl::SetREDStatus(
+#ifdef WEBRTC_CODEC_RED
+    bool enable_red) {
+  CriticalSectionScoped lock(acm_crit_sect_.get());
+  return codec_manager_.SetCopyRed(enable_red) ? 0 : -1;
+#else
+    bool /* enable_red */) {
+  WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_,
+               "  WEBRTC_CODEC_RED is undefined");
+  return -1;
+#endif
+}
+
+/////////////////////////////////////////
+//   (FEC) Forward Error Correction (codec internal)
+//
+
+bool AudioCodingModuleImpl::CodecFEC() const {
+  CriticalSectionScoped lock(acm_crit_sect_.get());
+  return codec_manager_.codec_fec_enabled();
+}
+
+int AudioCodingModuleImpl::SetCodecFEC(bool enable_codec_fec) {
+  CriticalSectionScoped lock(acm_crit_sect_.get());
+  return codec_manager_.SetCodecFEC(enable_codec_fec);
+}
+
+int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) {
+  CriticalSectionScoped lock(acm_crit_sect_.get());
+  if (HaveValidEncoder("SetPacketLossRate")) {
+    codec_manager_.CurrentEncoder()->SetProjectedPacketLossRate(loss_rate /
+                                                                100.0);
+  }
+  return 0;
+}
+
+/////////////////////////////////////////
+//   (VAD) Voice Activity Detection
+//
+int AudioCodingModuleImpl::SetVAD(bool enable_dtx,
+                                  bool enable_vad,
+                                  ACMVADMode mode) {
+  // Note: |enable_vad| is not used; VAD is enabled based on the DTX setting.
+  RTC_DCHECK_EQ(enable_dtx, enable_vad);
+  CriticalSectionScoped lock(acm_crit_sect_.get());
+  return codec_manager_.SetVAD(enable_dtx, mode);
+}
+
+// Get VAD/DTX settings.
+int AudioCodingModuleImpl::VAD(bool* dtx_enabled, bool* vad_enabled,
+                               ACMVADMode* mode) const {
+  CriticalSectionScoped lock(acm_crit_sect_.get());
+  codec_manager_.VAD(dtx_enabled, vad_enabled, mode);
+  return 0;
+}
+
+/////////////////////////////////////////
+//   Receiver
+//
+
+int AudioCodingModuleImpl::InitializeReceiver() {
+  CriticalSectionScoped lock(acm_crit_sect_.get());
+  return InitializeReceiverSafe();
+}
+
+// Initialize receiver, resets codec database etc.
+int AudioCodingModuleImpl::InitializeReceiverSafe() {
+  // If the receiver is already initialized then we want to destroy any
+  // existing decoders. After a call to this function, we should have a clean
+  // start-up.
+  if (receiver_initialized_) {
+    if (receiver_.RemoveAllCodecs() < 0)
+      return -1;
+  }
+  receiver_.set_id(id_);
+  receiver_.ResetInitialDelay();
+  receiver_.SetMinimumDelay(0);
+  receiver_.SetMaximumDelay(0);
+  receiver_.FlushBuffers();
+
+  // Register RED and CN.
+  auto db = RentACodec::Database();
+  for (size_t i = 0; i < db.size(); i++) {
+    if (IsCodecRED(db[i]) || IsCodecCN(db[i])) {
+      if (receiver_.AddCodec(static_cast<int>(i),
+                             static_cast<uint8_t>(db[i].pltype), 1,
+                             db[i].plfreq, nullptr) < 0) {
+        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
+                     "Cannot register master codec.");
+        return -1;
+      }
+    }
+  }
+  receiver_initialized_ = true;
+  return 0;
+}
+
+// Get current receive frequency.
+int AudioCodingModuleImpl::ReceiveFrequency() const {
+  const auto last_packet_sample_rate = receiver_.last_packet_sample_rate_hz();
+  return last_packet_sample_rate ? *last_packet_sample_rate
+                                 : receiver_.last_output_sample_rate_hz();
+}
+
+// Get current playout frequency.
+int AudioCodingModuleImpl::PlayoutFrequency() const {
+  WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
+               "PlayoutFrequency()");
+  return receiver_.last_output_sample_rate_hz();
+}
+
+// Register possible receive codecs, can be called multiple times,
+// for codecs, CNG (NB, WB and SWB), DTMF, RED.
+int AudioCodingModuleImpl::RegisterReceiveCodec(const CodecInst& codec) {
+  CriticalSectionScoped lock(acm_crit_sect_.get());
+  RTC_DCHECK(receiver_initialized_);
+  if (codec.channels > 2 || codec.channels < 0) {
+    LOG_F(LS_ERROR) << "Unsupported number of channels: " << codec.channels;
+    return -1;
+  }
+
+  auto codec_id =
+      RentACodec::CodecIdByParams(codec.plname, codec.plfreq, codec.channels);
+  if (!codec_id) {
+    LOG_F(LS_ERROR) << "Wrong codec params to be registered as receive codec";
+    return -1;
+  }
+  auto codec_index = RentACodec::CodecIndexFromId(*codec_id);
+  RTC_CHECK(codec_index) << "Invalid codec ID: " << static_cast<int>(*codec_id);
+
+  // Check if the payload-type is valid.
+  if (!RentACodec::IsPayloadTypeValid(codec.pltype)) {
+    LOG_F(LS_ERROR) << "Invalid payload type " << codec.pltype << " for "
+                    << codec.plname;
+    return -1;
+  }
+
+  // Get |decoder| associated with |codec|. |decoder| is NULL if |codec| does
+  // not own its decoder.
+  return receiver_.AddCodec(*codec_index, codec.pltype, codec.channels,
+                            codec.plfreq,
+                            codec_manager_.GetAudioDecoder(codec));
+}
+
+int AudioCodingModuleImpl::RegisterExternalReceiveCodec(
+    int rtp_payload_type,
+    AudioDecoder* external_decoder,
+    int sample_rate_hz,
+    int num_channels) {
+  CriticalSectionScoped lock(acm_crit_sect_.get());
+  RTC_DCHECK(receiver_initialized_);
+  if (num_channels > 2 || num_channels < 0) {
+    LOG_F(LS_ERROR) << "Unsupported number of channels: " << num_channels;
+    return -1;
+  }
+
+  // Check if the payload-type is valid.
+  if (!RentACodec::IsPayloadTypeValid(rtp_payload_type)) {
+    LOG_F(LS_ERROR) << "Invalid payload-type " << rtp_payload_type
+                    << " for external decoder.";
+    return -1;
+  }
+
+  return receiver_.AddCodec(-1 /* external */, rtp_payload_type, num_channels,
+                            sample_rate_hz, external_decoder);
+}
+
+// Get current received codec.
+int AudioCodingModuleImpl::ReceiveCodec(CodecInst* current_codec) const {
+  CriticalSectionScoped lock(acm_crit_sect_.get());
+  return receiver_.LastAudioCodec(current_codec);
+}
+
+// Incoming packet from network parsed and ready for decode.
+int AudioCodingModuleImpl::IncomingPacket(const uint8_t* incoming_payload,
+                                          const size_t payload_length,
+                                          const WebRtcRTPHeader& rtp_header) {
+  return receiver_.InsertPacket(
+      rtp_header,
+      rtc::ArrayView<const uint8_t>(incoming_payload, payload_length));
+}
+
+// Minimum playout delay (Used for lip-sync).
+int AudioCodingModuleImpl::SetMinimumPlayoutDelay(int time_ms) {
+  if ((time_ms < 0) || (time_ms > 10000)) {
+    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
+                 "Delay must be in the range of 0-1000 milliseconds.");
+    return -1;
+  }
+  return receiver_.SetMinimumDelay(time_ms);
+}
+
+int AudioCodingModuleImpl::SetMaximumPlayoutDelay(int time_ms) {
+  if ((time_ms < 0) || (time_ms > 10000)) {
+    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
+                 "Delay must be in the range of 0-1000 milliseconds.");
+    return -1;
+  }
+  return receiver_.SetMaximumDelay(time_ms);
+}
+
+// Get 10 milliseconds of raw audio data to play out.
+// Automatic resample to the requested frequency.
+int AudioCodingModuleImpl::PlayoutData10Ms(int desired_freq_hz,
+                                           AudioFrame* audio_frame) {
+  // GetAudio always returns 10 ms, at the requested sample rate.
+  if (receiver_.GetAudio(desired_freq_hz, audio_frame) != 0) {
+    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
+                 "PlayoutData failed, RecOut Failed");
+    return -1;
+  }
+  audio_frame->id_ = id_;
+  return 0;
+}
+
+/////////////////////////////////////////
+//   Statistics
+//
+
+// TODO(turajs) change the return value to void. Also change the corresponding
+// NetEq function.
+int AudioCodingModuleImpl::GetNetworkStatistics(NetworkStatistics* statistics) {
+  receiver_.GetNetworkStatistics(statistics);
+  return 0;
+}
+
+int AudioCodingModuleImpl::RegisterVADCallback(ACMVADCallback* vad_callback) {
+  WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, id_,
+               "RegisterVADCallback()");
+  CriticalSectionScoped lock(callback_crit_sect_.get());
+  vad_callback_ = vad_callback;
+  return 0;
+}
+
+// TODO(kwiberg): Remove this method, and have callers call IncomingPacket
+// instead. The translation logic and state belong with them, not with
+// AudioCodingModuleImpl.
+int AudioCodingModuleImpl::IncomingPayload(const uint8_t* incoming_payload,
+                                           size_t payload_length,
+                                           uint8_t payload_type,
+                                           uint32_t timestamp) {
+  // We are not acquiring any lock when interacting with |aux_rtp_header_| no
+  // other method uses this member variable.
+  if (!aux_rtp_header_) {
+    // This is the first time that we are using |dummy_rtp_header_|
+    // so we have to create it.
+    aux_rtp_header_.reset(new WebRtcRTPHeader);
+    aux_rtp_header_->header.payloadType = payload_type;
+    // Don't matter in this case.
+    aux_rtp_header_->header.ssrc = 0;
+    aux_rtp_header_->header.markerBit = false;
+    // Start with random numbers.
+    aux_rtp_header_->header.sequenceNumber = 0x1234;  // Arbitrary.
+    aux_rtp_header_->type.Audio.channel = 1;
+  }
+
+  aux_rtp_header_->header.timestamp = timestamp;
+  IncomingPacket(incoming_payload, payload_length, *aux_rtp_header_);
+  // Get ready for the next payload.
+  aux_rtp_header_->header.sequenceNumber++;
+  return 0;
+}
+
+int AudioCodingModuleImpl::SetOpusApplication(OpusApplicationMode application) {
+  CriticalSectionScoped lock(acm_crit_sect_.get());
+  if (!HaveValidEncoder("SetOpusApplication")) {
+    return -1;
+  }
+  if (!codec_manager_.CurrentEncoderIsOpus())
+    return -1;
+  AudioEncoder::Application app;
+  switch (application) {
+    case kVoip:
+      app = AudioEncoder::Application::kSpeech;
+      break;
+    case kAudio:
+      app = AudioEncoder::Application::kAudio;
+      break;
+    default:
+      FATAL();
+      return 0;
+  }
+  return codec_manager_.CurrentEncoder()->SetApplication(app) ? 0 : -1;
+}
+
+// Informs Opus encoder of the maximum playback rate the receiver will render.
+int AudioCodingModuleImpl::SetOpusMaxPlaybackRate(int frequency_hz) {
+  CriticalSectionScoped lock(acm_crit_sect_.get());
+  if (!HaveValidEncoder("SetOpusMaxPlaybackRate")) {
+    return -1;
+  }
+  if (!codec_manager_.CurrentEncoderIsOpus())
+    return -1;
+  codec_manager_.CurrentEncoder()->SetMaxPlaybackRate(frequency_hz);
+  return 0;
+}
+
+int AudioCodingModuleImpl::EnableOpusDtx() {
+  CriticalSectionScoped lock(acm_crit_sect_.get());
+  if (!HaveValidEncoder("EnableOpusDtx")) {
+    return -1;
+  }
+  if (!codec_manager_.CurrentEncoderIsOpus())
+    return -1;
+  return codec_manager_.CurrentEncoder()->SetDtx(true) ? 0 : -1;
+}
+
+int AudioCodingModuleImpl::DisableOpusDtx() {
+  CriticalSectionScoped lock(acm_crit_sect_.get());
+  if (!HaveValidEncoder("DisableOpusDtx")) {
+    return -1;
+  }
+  if (!codec_manager_.CurrentEncoderIsOpus())
+    return -1;
+  return codec_manager_.CurrentEncoder()->SetDtx(false) ? 0 : -1;
+}
+
+int AudioCodingModuleImpl::PlayoutTimestamp(uint32_t* timestamp) {
+  return receiver_.GetPlayoutTimestamp(timestamp) ? 0 : -1;
+}
+
+bool AudioCodingModuleImpl::HaveValidEncoder(const char* caller_name) const {
+  if (!codec_manager_.CurrentEncoder()) {
+    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
+                 "%s failed: No send codec is registered.", caller_name);
+    return false;
+  }
+  return true;
+}
+
+int AudioCodingModuleImpl::UnregisterReceiveCodec(uint8_t payload_type) {
+  return receiver_.RemoveCodec(payload_type);
+}
+
+int AudioCodingModuleImpl::EnableNack(size_t max_nack_list_size) {
+  return receiver_.EnableNack(max_nack_list_size);
+}
+
+void AudioCodingModuleImpl::DisableNack() {
+  receiver_.DisableNack();
+}
+
+std::vector<uint16_t> AudioCodingModuleImpl::GetNackList(
+    int64_t round_trip_time_ms) const {
+  return receiver_.GetNackList(round_trip_time_ms);
+}
+
+int AudioCodingModuleImpl::LeastRequiredDelayMs() const {
+  return receiver_.LeastRequiredDelayMs();
+}
+
+void AudioCodingModuleImpl::GetDecodingCallStatistics(
+      AudioDecodingCallStats* call_stats) const {
+  receiver_.GetDecodingCallStatistics(call_stats);
+}
+
+}  // namespace acm2
+}  // namespace webrtc
diff --git a/webrtc/modules/audio_coding/acm2/audio_coding_module_impl.h b/webrtc/modules/audio_coding/acm2/audio_coding_module_impl.h
new file mode 100644
index 0000000..6006c68
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/audio_coding_module_impl.h
@@ -0,0 +1,280 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_ACM2_AUDIO_CODING_MODULE_IMPL_H_
+#define WEBRTC_MODULES_AUDIO_CODING_ACM2_AUDIO_CODING_MODULE_IMPL_H_
+
+#include <vector>
+
+#include "webrtc/base/buffer.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/base/thread_annotations.h"
+#include "webrtc/common_types.h"
+#include "webrtc/engine_configurations.h"
+#include "webrtc/modules/audio_coding/acm2/acm_receiver.h"
+#include "webrtc/modules/audio_coding/acm2/acm_resampler.h"
+#include "webrtc/modules/audio_coding/acm2/codec_manager.h"
+
+namespace webrtc {
+
+class CriticalSectionWrapper;
+class AudioCodingImpl;
+
+namespace acm2 {
+
+class AudioCodingModuleImpl final : public AudioCodingModule {
+ public:
+  friend webrtc::AudioCodingImpl;
+
+  explicit AudioCodingModuleImpl(const AudioCodingModule::Config& config);
+  ~AudioCodingModuleImpl() override;
+
+  /////////////////////////////////////////
+  //   Sender
+  //
+
+  // Can be called multiple times for Codec, CNG, RED.
+  int RegisterSendCodec(const CodecInst& send_codec) override;
+
+  void RegisterExternalSendCodec(
+      AudioEncoder* external_speech_encoder) override;
+
+  // Get current send codec.
+  rtc::Optional<CodecInst> SendCodec() const override;
+
+  // Get current send frequency.
+  int SendFrequency() const override;
+
+  // Sets the bitrate to the specified value in bits/sec. In case the codec does
+  // not support the requested value it will choose an appropriate value
+  // instead.
+  void SetBitRate(int bitrate_bps) override;
+
+  // Register a transport callback which will be
+  // called to deliver the encoded buffers.
+  int RegisterTransportCallback(AudioPacketizationCallback* transport) override;
+
+  // Add 10 ms of raw (PCM) audio data to the encoder.
+  int Add10MsData(const AudioFrame& audio_frame) override;
+
+  /////////////////////////////////////////
+  // (RED) Redundant Coding
+  //
+
+  // Configure RED status i.e. on/off.
+  int SetREDStatus(bool enable_red) override;
+
+  // Get RED status.
+  bool REDStatus() const override;
+
+  /////////////////////////////////////////
+  // (FEC) Forward Error Correction (codec internal)
+  //
+
+  // Configure FEC status i.e. on/off.
+  int SetCodecFEC(bool enabled_codec_fec) override;
+
+  // Get FEC status.
+  bool CodecFEC() const override;
+
+  // Set target packet loss rate
+  int SetPacketLossRate(int loss_rate) override;
+
+  /////////////////////////////////////////
+  //   (VAD) Voice Activity Detection
+  //   and
+  //   (CNG) Comfort Noise Generation
+  //
+
+  int SetVAD(bool enable_dtx = true,
+             bool enable_vad = false,
+             ACMVADMode mode = VADNormal) override;
+
+  int VAD(bool* dtx_enabled,
+          bool* vad_enabled,
+          ACMVADMode* mode) const override;
+
+  int RegisterVADCallback(ACMVADCallback* vad_callback) override;
+
+  /////////////////////////////////////////
+  //   Receiver
+  //
+
+  // Initialize receiver, resets codec database etc.
+  int InitializeReceiver() override;
+
+  // Get current receive frequency.
+  int ReceiveFrequency() const override;
+
+  // Get current playout frequency.
+  int PlayoutFrequency() const override;
+
+  // Register possible receive codecs, can be called multiple times,
+  // for codecs, CNG, DTMF, RED.
+  int RegisterReceiveCodec(const CodecInst& receive_codec) override;
+
+  int RegisterExternalReceiveCodec(int rtp_payload_type,
+                                   AudioDecoder* external_decoder,
+                                   int sample_rate_hz,
+                                   int num_channels) override;
+
+  // Get current received codec.
+  int ReceiveCodec(CodecInst* current_codec) const override;
+
+  // Incoming packet from network parsed and ready for decode.
+  int IncomingPacket(const uint8_t* incoming_payload,
+                     const size_t payload_length,
+                     const WebRtcRTPHeader& rtp_info) override;
+
+  // Incoming payloads, without rtp-info, the rtp-info will be created in ACM.
+  // One usage for this API is when pre-encoded files are pushed in ACM.
+  int IncomingPayload(const uint8_t* incoming_payload,
+                      const size_t payload_length,
+                      uint8_t payload_type,
+                      uint32_t timestamp) override;
+
+  // Minimum playout delay.
+  int SetMinimumPlayoutDelay(int time_ms) override;
+
+  // Maximum playout delay.
+  int SetMaximumPlayoutDelay(int time_ms) override;
+
+  // Smallest latency NetEq will maintain.
+  int LeastRequiredDelayMs() const override;
+
+  // Get playout timestamp.
+  int PlayoutTimestamp(uint32_t* timestamp) override;
+
+  // Get 10 milliseconds of raw audio data to play out, and
+  // automatic resample to the requested frequency if > 0.
+  int PlayoutData10Ms(int desired_freq_hz, AudioFrame* audio_frame) override;
+
+  /////////////////////////////////////////
+  //   Statistics
+  //
+
+  int GetNetworkStatistics(NetworkStatistics* statistics) override;
+
+  int SetOpusApplication(OpusApplicationMode application) override;
+
+  // If current send codec is Opus, informs it about the maximum playback rate
+  // the receiver will render.
+  int SetOpusMaxPlaybackRate(int frequency_hz) override;
+
+  int EnableOpusDtx() override;
+
+  int DisableOpusDtx() override;
+
+  int UnregisterReceiveCodec(uint8_t payload_type) override;
+
+  int EnableNack(size_t max_nack_list_size) override;
+
+  void DisableNack() override;
+
+  std::vector<uint16_t> GetNackList(int64_t round_trip_time_ms) const override;
+
+  void GetDecodingCallStatistics(AudioDecodingCallStats* stats) const override;
+
+ private:
+  struct InputData {
+    uint32_t input_timestamp;
+    const int16_t* audio;
+    size_t length_per_channel;
+    uint8_t audio_channel;
+    // If a re-mix is required (up or down), this buffer will store a re-mixed
+    // version of the input.
+    int16_t buffer[WEBRTC_10MS_PCM_AUDIO];
+  };
+
+  // This member class writes values to the named UMA histogram, but only if
+  // the value has changed since the last time (and always for the first call).
+  class ChangeLogger {
+   public:
+    explicit ChangeLogger(const std::string& histogram_name)
+        : histogram_name_(histogram_name) {}
+    // Logs the new value if it is different from the last logged value, or if
+    // this is the first call.
+    void MaybeLog(int value);
+
+   private:
+    int last_value_ = 0;
+    int first_time_ = true;
+    const std::string histogram_name_;
+  };
+
+  int Add10MsDataInternal(const AudioFrame& audio_frame, InputData* input_data)
+      EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
+  int Encode(const InputData& input_data)
+      EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
+
+  int InitializeReceiverSafe() EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
+
+  bool HaveValidEncoder(const char* caller_name) const
+      EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
+
+  // Preprocessing of input audio, including resampling and down-mixing if
+  // required, before pushing audio into encoder's buffer.
+  //
+  // in_frame: input audio-frame
+  // ptr_out: pointer to output audio_frame. If no preprocessing is required
+  //          |ptr_out| will be pointing to |in_frame|, otherwise pointing to
+  //          |preprocess_frame_|.
+  //
+  // Return value:
+  //   -1: if encountering an error.
+  //    0: otherwise.
+  int PreprocessToAddData(const AudioFrame& in_frame,
+                          const AudioFrame** ptr_out)
+      EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
+
+  // Change required states after starting to receive the codec corresponding
+  // to |index|.
+  int UpdateUponReceivingCodec(int index);
+
+  const rtc::scoped_ptr<CriticalSectionWrapper> acm_crit_sect_;
+  rtc::Buffer encode_buffer_ GUARDED_BY(acm_crit_sect_);
+  int id_;  // TODO(henrik.lundin) Make const.
+  uint32_t expected_codec_ts_ GUARDED_BY(acm_crit_sect_);
+  uint32_t expected_in_ts_ GUARDED_BY(acm_crit_sect_);
+  ACMResampler resampler_ GUARDED_BY(acm_crit_sect_);
+  AcmReceiver receiver_;  // AcmReceiver has it's own internal lock.
+  ChangeLogger bitrate_logger_ GUARDED_BY(acm_crit_sect_);
+  CodecManager codec_manager_ GUARDED_BY(acm_crit_sect_);
+
+  // This is to keep track of CN instances where we can send DTMFs.
+  uint8_t previous_pltype_ GUARDED_BY(acm_crit_sect_);
+
+  // Used when payloads are pushed into ACM without any RTP info
+  // One example is when pre-encoded bit-stream is pushed from
+  // a file.
+  // IMPORTANT: this variable is only used in IncomingPayload(), therefore,
+  // no lock acquired when interacting with this variable. If it is going to
+  // be used in other methods, locks need to be taken.
+  rtc::scoped_ptr<WebRtcRTPHeader> aux_rtp_header_;
+
+  bool receiver_initialized_ GUARDED_BY(acm_crit_sect_);
+
+  AudioFrame preprocess_frame_ GUARDED_BY(acm_crit_sect_);
+  bool first_10ms_data_ GUARDED_BY(acm_crit_sect_);
+
+  bool first_frame_ GUARDED_BY(acm_crit_sect_);
+  uint32_t last_timestamp_ GUARDED_BY(acm_crit_sect_);
+  uint32_t last_rtp_timestamp_ GUARDED_BY(acm_crit_sect_);
+
+  const rtc::scoped_ptr<CriticalSectionWrapper> callback_crit_sect_;
+  AudioPacketizationCallback* packetization_callback_
+      GUARDED_BY(callback_crit_sect_);
+  ACMVADCallback* vad_callback_ GUARDED_BY(callback_crit_sect_);
+};
+
+}  // namespace acm2
+}  // namespace webrtc
+
+#endif  // WEBRTC_MODULES_AUDIO_CODING_ACM2_AUDIO_CODING_MODULE_IMPL_H_
diff --git a/webrtc/modules/audio_coding/acm2/audio_coding_module_unittest_oldapi.cc b/webrtc/modules/audio_coding/acm2/audio_coding_module_unittest_oldapi.cc
new file mode 100644
index 0000000..39c14a8
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/audio_coding_module_unittest_oldapi.cc
@@ -0,0 +1,1777 @@
+/*
+ *  Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+#include <vector>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/md5digest.h"
+#include "webrtc/base/platform_thread.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/base/thread_annotations.h"
+#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
+#include "webrtc/modules/audio_coding/codecs/g711/audio_decoder_pcm.h"
+#include "webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
+#include "webrtc/modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"
+#include "webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h"
+#include "webrtc/modules/audio_coding/acm2/acm_receive_test_oldapi.h"
+#include "webrtc/modules/audio_coding/acm2/acm_send_test_oldapi.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "webrtc/modules/audio_coding/neteq/audio_decoder_impl.h"
+#include "webrtc/modules/audio_coding/neteq/mock/mock_audio_decoder.h"
+#include "webrtc/modules/audio_coding/neteq/tools/audio_checksum.h"
+#include "webrtc/modules/audio_coding/neteq/tools/audio_loop.h"
+#include "webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.h"
+#include "webrtc/modules/audio_coding/neteq/tools/input_audio_file.h"
+#include "webrtc/modules/audio_coding/neteq/tools/output_audio_file.h"
+#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
+#include "webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h"
+#include "webrtc/modules/include/module_common_types.h"
+#include "webrtc/system_wrappers/include/clock.h"
+#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
+#include "webrtc/system_wrappers/include/event_wrapper.h"
+#include "webrtc/system_wrappers/include/sleep.h"
+#include "webrtc/test/testsupport/fileutils.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
+
+using ::testing::AtLeast;
+using ::testing::Invoke;
+using ::testing::_;
+
+namespace webrtc {
+
+namespace {
+const int kSampleRateHz = 16000;
+const int kNumSamples10ms = kSampleRateHz / 100;
+const int kFrameSizeMs = 10;  // Multiple of 10.
+const int kFrameSizeSamples = kFrameSizeMs / 10 * kNumSamples10ms;
+const int kPayloadSizeBytes = kFrameSizeSamples * sizeof(int16_t);
+const uint8_t kPayloadType = 111;
+}  // namespace
+
+class RtpUtility {
+ public:
+  RtpUtility(int samples_per_packet, uint8_t payload_type)
+      : samples_per_packet_(samples_per_packet), payload_type_(payload_type) {}
+
+  virtual ~RtpUtility() {}
+
+  void Populate(WebRtcRTPHeader* rtp_header) {
+    rtp_header->header.sequenceNumber = 0xABCD;
+    rtp_header->header.timestamp = 0xABCDEF01;
+    rtp_header->header.payloadType = payload_type_;
+    rtp_header->header.markerBit = false;
+    rtp_header->header.ssrc = 0x1234;
+    rtp_header->header.numCSRCs = 0;
+    rtp_header->frameType = kAudioFrameSpeech;
+
+    rtp_header->header.payload_type_frequency = kSampleRateHz;
+    rtp_header->type.Audio.channel = 1;
+    rtp_header->type.Audio.isCNG = false;
+  }
+
+  void Forward(WebRtcRTPHeader* rtp_header) {
+    ++rtp_header->header.sequenceNumber;
+    rtp_header->header.timestamp += samples_per_packet_;
+  }
+
+ private:
+  int samples_per_packet_;
+  uint8_t payload_type_;
+};
+
+class PacketizationCallbackStubOldApi : public AudioPacketizationCallback {
+ public:
+  PacketizationCallbackStubOldApi()
+      : num_calls_(0),
+        last_frame_type_(kEmptyFrame),
+        last_payload_type_(-1),
+        last_timestamp_(0),
+        crit_sect_(CriticalSectionWrapper::CreateCriticalSection()) {}
+
+  int32_t SendData(FrameType frame_type,
+                   uint8_t payload_type,
+                   uint32_t timestamp,
+                   const uint8_t* payload_data,
+                   size_t payload_len_bytes,
+                   const RTPFragmentationHeader* fragmentation) override {
+    CriticalSectionScoped lock(crit_sect_.get());
+    ++num_calls_;
+    last_frame_type_ = frame_type;
+    last_payload_type_ = payload_type;
+    last_timestamp_ = timestamp;
+    last_payload_vec_.assign(payload_data, payload_data + payload_len_bytes);
+    return 0;
+  }
+
+  int num_calls() const {
+    CriticalSectionScoped lock(crit_sect_.get());
+    return num_calls_;
+  }
+
+  int last_payload_len_bytes() const {
+    CriticalSectionScoped lock(crit_sect_.get());
+    return last_payload_vec_.size();
+  }
+
+  FrameType last_frame_type() const {
+    CriticalSectionScoped lock(crit_sect_.get());
+    return last_frame_type_;
+  }
+
+  int last_payload_type() const {
+    CriticalSectionScoped lock(crit_sect_.get());
+    return last_payload_type_;
+  }
+
+  uint32_t last_timestamp() const {
+    CriticalSectionScoped lock(crit_sect_.get());
+    return last_timestamp_;
+  }
+
+  void SwapBuffers(std::vector<uint8_t>* payload) {
+    CriticalSectionScoped lock(crit_sect_.get());
+    last_payload_vec_.swap(*payload);
+  }
+
+ private:
+  int num_calls_ GUARDED_BY(crit_sect_);
+  FrameType last_frame_type_ GUARDED_BY(crit_sect_);
+  int last_payload_type_ GUARDED_BY(crit_sect_);
+  uint32_t last_timestamp_ GUARDED_BY(crit_sect_);
+  std::vector<uint8_t> last_payload_vec_ GUARDED_BY(crit_sect_);
+  const rtc::scoped_ptr<CriticalSectionWrapper> crit_sect_;
+};
+
+class AudioCodingModuleTestOldApi : public ::testing::Test {
+ protected:
+  AudioCodingModuleTestOldApi()
+      : id_(1),
+        rtp_utility_(new RtpUtility(kFrameSizeSamples, kPayloadType)),
+        clock_(Clock::GetRealTimeClock()) {}
+
+  ~AudioCodingModuleTestOldApi() {}
+
+  void TearDown() {}
+
+  void SetUp() {
+    acm_.reset(AudioCodingModule::Create(id_, clock_));
+
+    rtp_utility_->Populate(&rtp_header_);
+
+    input_frame_.sample_rate_hz_ = kSampleRateHz;
+    input_frame_.num_channels_ = 1;
+    input_frame_.samples_per_channel_ = kSampleRateHz * 10 / 1000;  // 10 ms.
+    static_assert(kSampleRateHz * 10 / 1000 <= AudioFrame::kMaxDataSizeSamples,
+                  "audio frame too small");
+    memset(input_frame_.data_,
+           0,
+           input_frame_.samples_per_channel_ * sizeof(input_frame_.data_[0]));
+
+    ASSERT_EQ(0, acm_->RegisterTransportCallback(&packet_cb_));
+
+    SetUpL16Codec();
+  }
+
+  // Set up L16 codec.
+  virtual void SetUpL16Codec() {
+    ASSERT_EQ(0, AudioCodingModule::Codec("L16", &codec_, kSampleRateHz, 1));
+    codec_.pltype = kPayloadType;
+  }
+
+  virtual void RegisterCodec() {
+    ASSERT_EQ(0, acm_->RegisterReceiveCodec(codec_));
+    ASSERT_EQ(0, acm_->RegisterSendCodec(codec_));
+  }
+
+  virtual void InsertPacketAndPullAudio() {
+    InsertPacket();
+    PullAudio();
+  }
+
+  virtual void InsertPacket() {
+    const uint8_t kPayload[kPayloadSizeBytes] = {0};
+    ASSERT_EQ(0,
+              acm_->IncomingPacket(kPayload, kPayloadSizeBytes, rtp_header_));
+    rtp_utility_->Forward(&rtp_header_);
+  }
+
+  virtual void PullAudio() {
+    AudioFrame audio_frame;
+    ASSERT_EQ(0, acm_->PlayoutData10Ms(-1, &audio_frame));
+  }
+
+  virtual void InsertAudio() {
+    ASSERT_GE(acm_->Add10MsData(input_frame_), 0);
+    input_frame_.timestamp_ += kNumSamples10ms;
+  }
+
+  virtual void VerifyEncoding() {
+    int last_length = packet_cb_.last_payload_len_bytes();
+    EXPECT_TRUE(last_length == 2 * codec_.pacsize || last_length == 0)
+        << "Last encoded packet was " << last_length << " bytes.";
+  }
+
+  virtual void InsertAudioAndVerifyEncoding() {
+    InsertAudio();
+    VerifyEncoding();
+  }
+
+  const int id_;
+  rtc::scoped_ptr<RtpUtility> rtp_utility_;
+  rtc::scoped_ptr<AudioCodingModule> acm_;
+  PacketizationCallbackStubOldApi packet_cb_;
+  WebRtcRTPHeader rtp_header_;
+  AudioFrame input_frame_;
+  CodecInst codec_;
+  Clock* clock_;
+};
+
+// Check if the statistics are initialized correctly. Before any call to ACM
+// all fields have to be zero.
+TEST_F(AudioCodingModuleTestOldApi, DISABLED_ON_ANDROID(InitializedToZero)) {
+  RegisterCodec();
+  AudioDecodingCallStats stats;
+  acm_->GetDecodingCallStatistics(&stats);
+  EXPECT_EQ(0, stats.calls_to_neteq);
+  EXPECT_EQ(0, stats.calls_to_silence_generator);
+  EXPECT_EQ(0, stats.decoded_normal);
+  EXPECT_EQ(0, stats.decoded_cng);
+  EXPECT_EQ(0, stats.decoded_plc);
+  EXPECT_EQ(0, stats.decoded_plc_cng);
+}
+
+// Insert some packets and pull audio. Check statistics are valid. Then,
+// simulate packet loss and check if PLC and PLC-to-CNG statistics are
+// correctly updated.
+TEST_F(AudioCodingModuleTestOldApi, DISABLED_ON_ANDROID(NetEqCalls)) {
+  RegisterCodec();
+  AudioDecodingCallStats stats;
+  const int kNumNormalCalls = 10;
+
+  for (int num_calls = 0; num_calls < kNumNormalCalls; ++num_calls) {
+    InsertPacketAndPullAudio();
+  }
+  acm_->GetDecodingCallStatistics(&stats);
+  EXPECT_EQ(kNumNormalCalls, stats.calls_to_neteq);
+  EXPECT_EQ(0, stats.calls_to_silence_generator);
+  EXPECT_EQ(kNumNormalCalls, stats.decoded_normal);
+  EXPECT_EQ(0, stats.decoded_cng);
+  EXPECT_EQ(0, stats.decoded_plc);
+  EXPECT_EQ(0, stats.decoded_plc_cng);
+
+  const int kNumPlc = 3;
+  const int kNumPlcCng = 5;
+
+  // Simulate packet-loss. NetEq first performs PLC then PLC fades to CNG.
+  for (int n = 0; n < kNumPlc + kNumPlcCng; ++n) {
+    PullAudio();
+  }
+  acm_->GetDecodingCallStatistics(&stats);
+  EXPECT_EQ(kNumNormalCalls + kNumPlc + kNumPlcCng, stats.calls_to_neteq);
+  EXPECT_EQ(0, stats.calls_to_silence_generator);
+  EXPECT_EQ(kNumNormalCalls, stats.decoded_normal);
+  EXPECT_EQ(0, stats.decoded_cng);
+  EXPECT_EQ(kNumPlc, stats.decoded_plc);
+  EXPECT_EQ(kNumPlcCng, stats.decoded_plc_cng);
+}
+
+TEST_F(AudioCodingModuleTestOldApi, VerifyOutputFrame) {
+  AudioFrame audio_frame;
+  const int kSampleRateHz = 32000;
+  EXPECT_EQ(0, acm_->PlayoutData10Ms(kSampleRateHz, &audio_frame));
+  EXPECT_EQ(id_, audio_frame.id_);
+  EXPECT_EQ(0u, audio_frame.timestamp_);
+  EXPECT_GT(audio_frame.num_channels_, 0);
+  EXPECT_EQ(static_cast<size_t>(kSampleRateHz / 100),
+            audio_frame.samples_per_channel_);
+  EXPECT_EQ(kSampleRateHz, audio_frame.sample_rate_hz_);
+}
+
+TEST_F(AudioCodingModuleTestOldApi, FailOnZeroDesiredFrequency) {
+  AudioFrame audio_frame;
+  EXPECT_EQ(-1, acm_->PlayoutData10Ms(0, &audio_frame));
+}
+
+// Checks that the transport callback is invoked once for each speech packet.
+// Also checks that the frame type is kAudioFrameSpeech.
+TEST_F(AudioCodingModuleTestOldApi, TransportCallbackIsInvokedForEachPacket) {
+  const int k10MsBlocksPerPacket = 3;
+  codec_.pacsize = k10MsBlocksPerPacket * kSampleRateHz / 100;
+  RegisterCodec();
+  const int kLoops = 10;
+  for (int i = 0; i < kLoops; ++i) {
+    EXPECT_EQ(i / k10MsBlocksPerPacket, packet_cb_.num_calls());
+    if (packet_cb_.num_calls() > 0)
+      EXPECT_EQ(kAudioFrameSpeech, packet_cb_.last_frame_type());
+    InsertAudioAndVerifyEncoding();
+  }
+  EXPECT_EQ(kLoops / k10MsBlocksPerPacket, packet_cb_.num_calls());
+  EXPECT_EQ(kAudioFrameSpeech, packet_cb_.last_frame_type());
+}
+
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+#define IF_ISAC(x) x
+#else
+#define IF_ISAC(x) DISABLED_##x
+#endif
+
+// Verifies that the RTP timestamp series is not reset when the codec is
+// changed.
+TEST_F(AudioCodingModuleTestOldApi,
+       IF_ISAC(TimestampSeriesContinuesWhenCodecChanges)) {
+  RegisterCodec();  // This registers the default codec.
+  uint32_t expected_ts = input_frame_.timestamp_;
+  int blocks_per_packet = codec_.pacsize / (kSampleRateHz / 100);
+  // Encode 5 packets of the first codec type.
+  const int kNumPackets1 = 5;
+  for (int j = 0; j < kNumPackets1; ++j) {
+    for (int i = 0; i < blocks_per_packet; ++i) {
+      EXPECT_EQ(j, packet_cb_.num_calls());
+      InsertAudio();
+    }
+    EXPECT_EQ(j + 1, packet_cb_.num_calls());
+    EXPECT_EQ(expected_ts, packet_cb_.last_timestamp());
+    expected_ts += codec_.pacsize;
+  }
+
+  // Change codec.
+  ASSERT_EQ(0, AudioCodingModule::Codec("ISAC", &codec_, kSampleRateHz, 1));
+  RegisterCodec();
+  blocks_per_packet = codec_.pacsize / (kSampleRateHz / 100);
+  // Encode another 5 packets.
+  const int kNumPackets2 = 5;
+  for (int j = 0; j < kNumPackets2; ++j) {
+    for (int i = 0; i < blocks_per_packet; ++i) {
+      EXPECT_EQ(kNumPackets1 + j, packet_cb_.num_calls());
+      InsertAudio();
+    }
+    EXPECT_EQ(kNumPackets1 + j + 1, packet_cb_.num_calls());
+    EXPECT_EQ(expected_ts, packet_cb_.last_timestamp());
+    expected_ts += codec_.pacsize;
+  }
+}
+
+// Introduce this class to set different expectations on the number of encoded
+// bytes. This class expects all encoded packets to be 9 bytes (matching one
+// CNG SID frame) or 0 bytes. This test depends on |input_frame_| containing
+// (near-)zero values. It also introduces a way to register comfort noise with
+// a custom payload type.
+class AudioCodingModuleTestWithComfortNoiseOldApi
+    : public AudioCodingModuleTestOldApi {
+ protected:
+  void RegisterCngCodec(int rtp_payload_type) {
+    CodecInst codec;
+    AudioCodingModule::Codec("CN", &codec, kSampleRateHz, 1);
+    codec.pltype = rtp_payload_type;
+    ASSERT_EQ(0, acm_->RegisterReceiveCodec(codec));
+    ASSERT_EQ(0, acm_->RegisterSendCodec(codec));
+  }
+
+  void VerifyEncoding() override {
+    int last_length = packet_cb_.last_payload_len_bytes();
+    EXPECT_TRUE(last_length == 9 || last_length == 0)
+        << "Last encoded packet was " << last_length << " bytes.";
+  }
+
+  void DoTest(int blocks_per_packet, int cng_pt) {
+    const int kLoops = 40;
+    // This array defines the expected frame types, and when they should arrive.
+    // We expect a frame to arrive each time the speech encoder would have
+    // produced a packet, and once every 100 ms the frame should be non-empty,
+    // that is contain comfort noise.
+    const struct {
+      int ix;
+      FrameType type;
+    } expectation[] = {{2, kAudioFrameCN},
+                       {5, kEmptyFrame},
+                       {8, kEmptyFrame},
+                       {11, kAudioFrameCN},
+                       {14, kEmptyFrame},
+                       {17, kEmptyFrame},
+                       {20, kAudioFrameCN},
+                       {23, kEmptyFrame},
+                       {26, kEmptyFrame},
+                       {29, kEmptyFrame},
+                       {32, kAudioFrameCN},
+                       {35, kEmptyFrame},
+                       {38, kEmptyFrame}};
+    for (int i = 0; i < kLoops; ++i) {
+      int num_calls_before = packet_cb_.num_calls();
+      EXPECT_EQ(i / blocks_per_packet, num_calls_before);
+      InsertAudioAndVerifyEncoding();
+      int num_calls = packet_cb_.num_calls();
+      if (num_calls == num_calls_before + 1) {
+        EXPECT_EQ(expectation[num_calls - 1].ix, i);
+        EXPECT_EQ(expectation[num_calls - 1].type, packet_cb_.last_frame_type())
+            << "Wrong frame type for lap " << i;
+        EXPECT_EQ(cng_pt, packet_cb_.last_payload_type());
+      } else {
+        EXPECT_EQ(num_calls, num_calls_before);
+      }
+    }
+  }
+};
+
+// Checks that the transport callback is invoked once per frame period of the
+// underlying speech encoder, even when comfort noise is produced.
+// Also checks that the frame type is kAudioFrameCN or kEmptyFrame.
+// This test and the next check the same thing, but differ in the order of
+// speech codec and CNG registration.
+TEST_F(AudioCodingModuleTestWithComfortNoiseOldApi,
+       TransportCallbackTestForComfortNoiseRegisterCngLast) {
+  const int k10MsBlocksPerPacket = 3;
+  codec_.pacsize = k10MsBlocksPerPacket * kSampleRateHz / 100;
+  RegisterCodec();
+  const int kCngPayloadType = 105;
+  RegisterCngCodec(kCngPayloadType);
+  ASSERT_EQ(0, acm_->SetVAD(true, true));
+  DoTest(k10MsBlocksPerPacket, kCngPayloadType);
+}
+
+TEST_F(AudioCodingModuleTestWithComfortNoiseOldApi,
+       TransportCallbackTestForComfortNoiseRegisterCngFirst) {
+  const int k10MsBlocksPerPacket = 3;
+  codec_.pacsize = k10MsBlocksPerPacket * kSampleRateHz / 100;
+  const int kCngPayloadType = 105;
+  RegisterCngCodec(kCngPayloadType);
+  RegisterCodec();
+  ASSERT_EQ(0, acm_->SetVAD(true, true));
+  DoTest(k10MsBlocksPerPacket, kCngPayloadType);
+}
+
+// A multi-threaded test for ACM. This base class is using the PCM16b 16 kHz
+// codec, while the derive class AcmIsacMtTest is using iSAC.
+class AudioCodingModuleMtTestOldApi : public AudioCodingModuleTestOldApi {
+ protected:
+  static const int kNumPackets = 500;
+  static const int kNumPullCalls = 500;
+
+  AudioCodingModuleMtTestOldApi()
+      : AudioCodingModuleTestOldApi(),
+        send_thread_(PlatformThread::CreateThread(CbSendThread, this, "send")),
+        insert_packet_thread_(PlatformThread::CreateThread(CbInsertPacketThread,
+                                                           this,
+                                                           "insert_packet")),
+        pull_audio_thread_(PlatformThread::CreateThread(CbPullAudioThread,
+                                                        this,
+                                                        "pull_audio")),
+        test_complete_(EventWrapper::Create()),
+        send_count_(0),
+        insert_packet_count_(0),
+        pull_audio_count_(0),
+        crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
+        next_insert_packet_time_ms_(0),
+        fake_clock_(new SimulatedClock(0)) {
+    clock_ = fake_clock_.get();
+  }
+
+  void SetUp() {
+    AudioCodingModuleTestOldApi::SetUp();
+    RegisterCodec();  // Must be called before the threads start below.
+    StartThreads();
+  }
+
+  void StartThreads() {
+    ASSERT_TRUE(send_thread_->Start());
+    send_thread_->SetPriority(kRealtimePriority);
+    ASSERT_TRUE(insert_packet_thread_->Start());
+    insert_packet_thread_->SetPriority(kRealtimePriority);
+    ASSERT_TRUE(pull_audio_thread_->Start());
+    pull_audio_thread_->SetPriority(kRealtimePriority);
+  }
+
+  void TearDown() {
+    AudioCodingModuleTestOldApi::TearDown();
+    pull_audio_thread_->Stop();
+    send_thread_->Stop();
+    insert_packet_thread_->Stop();
+  }
+
+  EventTypeWrapper RunTest() {
+    return test_complete_->Wait(10 * 60 * 1000);  // 10 minutes' timeout.
+  }
+
+  virtual bool TestDone() {
+    if (packet_cb_.num_calls() > kNumPackets) {
+      CriticalSectionScoped lock(crit_sect_.get());
+      if (pull_audio_count_ > kNumPullCalls) {
+        // Both conditions for completion are met. End the test.
+        return true;
+      }
+    }
+    return false;
+  }
+
+  static bool CbSendThread(void* context) {
+    return reinterpret_cast<AudioCodingModuleMtTestOldApi*>(context)
+        ->CbSendImpl();
+  }
+
+  // The send thread doesn't have to care about the current simulated time,
+  // since only the AcmReceiver is using the clock.
+  bool CbSendImpl() {
+    SleepMs(1);
+    if (HasFatalFailure()) {
+      // End the test early if a fatal failure (ASSERT_*) has occurred.
+      test_complete_->Set();
+    }
+    ++send_count_;
+    InsertAudioAndVerifyEncoding();
+    if (TestDone()) {
+      test_complete_->Set();
+    }
+    return true;
+  }
+
+  static bool CbInsertPacketThread(void* context) {
+    return reinterpret_cast<AudioCodingModuleMtTestOldApi*>(context)
+        ->CbInsertPacketImpl();
+  }
+
+  bool CbInsertPacketImpl() {
+    SleepMs(1);
+    {
+      CriticalSectionScoped lock(crit_sect_.get());
+      if (clock_->TimeInMilliseconds() < next_insert_packet_time_ms_) {
+        return true;
+      }
+      next_insert_packet_time_ms_ += 10;
+    }
+    // Now we're not holding the crit sect when calling ACM.
+    ++insert_packet_count_;
+    InsertPacket();
+    return true;
+  }
+
+  static bool CbPullAudioThread(void* context) {
+    return reinterpret_cast<AudioCodingModuleMtTestOldApi*>(context)
+        ->CbPullAudioImpl();
+  }
+
+  bool CbPullAudioImpl() {
+    SleepMs(1);
+    {
+      CriticalSectionScoped lock(crit_sect_.get());
+      // Don't let the insert thread fall behind.
+      if (next_insert_packet_time_ms_ < clock_->TimeInMilliseconds()) {
+        return true;
+      }
+      ++pull_audio_count_;
+    }
+    // Now we're not holding the crit sect when calling ACM.
+    PullAudio();
+    fake_clock_->AdvanceTimeMilliseconds(10);
+    return true;
+  }
+
+  rtc::scoped_ptr<PlatformThread> send_thread_;
+  rtc::scoped_ptr<PlatformThread> insert_packet_thread_;
+  rtc::scoped_ptr<PlatformThread> pull_audio_thread_;
+  const rtc::scoped_ptr<EventWrapper> test_complete_;
+  int send_count_;
+  int insert_packet_count_;
+  int pull_audio_count_ GUARDED_BY(crit_sect_);
+  const rtc::scoped_ptr<CriticalSectionWrapper> crit_sect_;
+  int64_t next_insert_packet_time_ms_ GUARDED_BY(crit_sect_);
+  rtc::scoped_ptr<SimulatedClock> fake_clock_;
+};
+
+TEST_F(AudioCodingModuleMtTestOldApi, DISABLED_ON_IOS(DoTest)) {
+  EXPECT_EQ(kEventSignaled, RunTest());
+}
+
+// This is a multi-threaded ACM test using iSAC. The test encodes audio
+// from a PCM file. The most recent encoded frame is used as input to the
+// receiving part. Depending on timing, it may happen that the same RTP packet
+// is inserted into the receiver multiple times, but this is a valid use-case,
+// and simplifies the test code a lot.
+class AcmIsacMtTestOldApi : public AudioCodingModuleMtTestOldApi {
+ protected:
+  static const int kNumPackets = 500;
+  static const int kNumPullCalls = 500;
+
+  AcmIsacMtTestOldApi()
+      : AudioCodingModuleMtTestOldApi(), last_packet_number_(0) {}
+
+  ~AcmIsacMtTestOldApi() {}
+
+  void SetUp() {
+    AudioCodingModuleTestOldApi::SetUp();
+    RegisterCodec();  // Must be called before the threads start below.
+
+    // Set up input audio source to read from specified file, loop after 5
+    // seconds, and deliver blocks of 10 ms.
+    const std::string input_file_name =
+        webrtc::test::ResourcePath("audio_coding/speech_mono_16kHz", "pcm");
+    audio_loop_.Init(input_file_name, 5 * kSampleRateHz, kNumSamples10ms);
+
+    // Generate one packet to have something to insert.
+    int loop_counter = 0;
+    while (packet_cb_.last_payload_len_bytes() == 0) {
+      InsertAudio();
+      ASSERT_LT(loop_counter++, 10);
+    }
+    // Set |last_packet_number_| to one less that |num_calls| so that the packet
+    // will be fetched in the next InsertPacket() call.
+    last_packet_number_ = packet_cb_.num_calls() - 1;
+
+    StartThreads();
+  }
+
+  void RegisterCodec() override {
+    static_assert(kSampleRateHz == 16000, "test designed for iSAC 16 kHz");
+    AudioCodingModule::Codec("ISAC", &codec_, kSampleRateHz, 1);
+    codec_.pltype = kPayloadType;
+
+    // Register iSAC codec in ACM, effectively unregistering the PCM16B codec
+    // registered in AudioCodingModuleTestOldApi::SetUp();
+    ASSERT_EQ(0, acm_->RegisterReceiveCodec(codec_));
+    ASSERT_EQ(0, acm_->RegisterSendCodec(codec_));
+  }
+
+  void InsertPacket() {
+    int num_calls = packet_cb_.num_calls();  // Store locally for thread safety.
+    if (num_calls > last_packet_number_) {
+      // Get the new payload out from the callback handler.
+      // Note that since we swap buffers here instead of directly inserting
+      // a pointer to the data in |packet_cb_|, we avoid locking the callback
+      // for the duration of the IncomingPacket() call.
+      packet_cb_.SwapBuffers(&last_payload_vec_);
+      ASSERT_GT(last_payload_vec_.size(), 0u);
+      rtp_utility_->Forward(&rtp_header_);
+      last_packet_number_ = num_calls;
+    }
+    ASSERT_GT(last_payload_vec_.size(), 0u);
+    ASSERT_EQ(
+        0,
+        acm_->IncomingPacket(
+            &last_payload_vec_[0], last_payload_vec_.size(), rtp_header_));
+  }
+
+  void InsertAudio() {
+    // TODO(kwiberg): Use std::copy here. Might be complications because AFAICS
+    // this call confuses the number of samples with the number of bytes, and
+    // ends up copying only half of what it should.
+    memcpy(input_frame_.data_, audio_loop_.GetNextBlock().data(),
+           kNumSamples10ms);
+    AudioCodingModuleTestOldApi::InsertAudio();
+  }
+
+  // Override the verification function with no-op, since iSAC produces variable
+  // payload sizes.
+  void VerifyEncoding() override {}
+
+  // This method is the same as AudioCodingModuleMtTestOldApi::TestDone(), but
+  // here it is using the constants defined in this class (i.e., shorter test
+  // run).
+  virtual bool TestDone() {
+    if (packet_cb_.num_calls() > kNumPackets) {
+      CriticalSectionScoped lock(crit_sect_.get());
+      if (pull_audio_count_ > kNumPullCalls) {
+        // Both conditions for completion are met. End the test.
+        return true;
+      }
+    }
+    return false;
+  }
+
+  int last_packet_number_;
+  std::vector<uint8_t> last_payload_vec_;
+  test::AudioLoop audio_loop_;
+};
+
+TEST_F(AcmIsacMtTestOldApi, DISABLED_ON_IOS(IF_ISAC(DoTest))) {
+  EXPECT_EQ(kEventSignaled, RunTest());
+}
+
+class AcmReRegisterIsacMtTestOldApi : public AudioCodingModuleTestOldApi {
+ protected:
+  static const int kRegisterAfterNumPackets = 5;
+  static const int kNumPackets = 10;
+  static const int kPacketSizeMs = 30;
+  static const int kPacketSizeSamples = kPacketSizeMs * 16;
+
+  AcmReRegisterIsacMtTestOldApi()
+      : AudioCodingModuleTestOldApi(),
+        receive_thread_(
+            PlatformThread::CreateThread(CbReceiveThread, this, "receive")),
+        codec_registration_thread_(
+            PlatformThread::CreateThread(CbCodecRegistrationThread,
+                                         this,
+                                         "codec_registration")),
+        test_complete_(EventWrapper::Create()),
+        crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
+        codec_registered_(false),
+        receive_packet_count_(0),
+        next_insert_packet_time_ms_(0),
+        fake_clock_(new SimulatedClock(0)) {
+    AudioEncoderIsac::Config config;
+    config.payload_type = kPayloadType;
+    isac_encoder_.reset(new AudioEncoderIsac(config));
+    clock_ = fake_clock_.get();
+  }
+
+  void SetUp() {
+    AudioCodingModuleTestOldApi::SetUp();
+    // Set up input audio source to read from specified file, loop after 5
+    // seconds, and deliver blocks of 10 ms.
+    const std::string input_file_name =
+        webrtc::test::ResourcePath("audio_coding/speech_mono_16kHz", "pcm");
+    audio_loop_.Init(input_file_name, 5 * kSampleRateHz, kNumSamples10ms);
+    RegisterCodec();  // Must be called before the threads start below.
+    StartThreads();
+  }
+
+  void RegisterCodec() override {
+    static_assert(kSampleRateHz == 16000, "test designed for iSAC 16 kHz");
+    AudioCodingModule::Codec("ISAC", &codec_, kSampleRateHz, 1);
+    codec_.pltype = kPayloadType;
+
+    // Register iSAC codec in ACM, effectively unregistering the PCM16B codec
+    // registered in AudioCodingModuleTestOldApi::SetUp();
+    // Only register the decoder for now. The encoder is registered later.
+    ASSERT_EQ(0, acm_->RegisterReceiveCodec(codec_));
+  }
+
+  void StartThreads() {
+    ASSERT_TRUE(receive_thread_->Start());
+    receive_thread_->SetPriority(kRealtimePriority);
+    ASSERT_TRUE(codec_registration_thread_->Start());
+    codec_registration_thread_->SetPriority(kRealtimePriority);
+  }
+
+  void TearDown() {
+    AudioCodingModuleTestOldApi::TearDown();
+    receive_thread_->Stop();
+    codec_registration_thread_->Stop();
+  }
+
+  EventTypeWrapper RunTest() {
+    return test_complete_->Wait(10 * 60 * 1000);  // 10 minutes' timeout.
+  }
+
+  static bool CbReceiveThread(void* context) {
+    return reinterpret_cast<AcmReRegisterIsacMtTestOldApi*>(context)
+        ->CbReceiveImpl();
+  }
+
+  bool CbReceiveImpl() {
+    SleepMs(1);
+    const size_t max_encoded_bytes = isac_encoder_->MaxEncodedBytes();
+    rtc::scoped_ptr<uint8_t[]> encoded(new uint8_t[max_encoded_bytes]);
+    AudioEncoder::EncodedInfo info;
+    {
+      CriticalSectionScoped lock(crit_sect_.get());
+      if (clock_->TimeInMilliseconds() < next_insert_packet_time_ms_) {
+        return true;
+      }
+      next_insert_packet_time_ms_ += kPacketSizeMs;
+      ++receive_packet_count_;
+
+      // Encode new frame.
+      uint32_t input_timestamp = rtp_header_.header.timestamp;
+      while (info.encoded_bytes == 0) {
+        info =
+            isac_encoder_->Encode(input_timestamp, audio_loop_.GetNextBlock(),
+                                  max_encoded_bytes, encoded.get());
+        input_timestamp += 160;  // 10 ms at 16 kHz.
+      }
+      EXPECT_EQ(rtp_header_.header.timestamp + kPacketSizeSamples,
+                input_timestamp);
+      EXPECT_EQ(rtp_header_.header.timestamp, info.encoded_timestamp);
+      EXPECT_EQ(rtp_header_.header.payloadType, info.payload_type);
+    }
+    // Now we're not holding the crit sect when calling ACM.
+
+    // Insert into ACM.
+    EXPECT_EQ(0, acm_->IncomingPacket(encoded.get(), info.encoded_bytes,
+                                      rtp_header_));
+
+    // Pull audio.
+    for (int i = 0; i < rtc::CheckedDivExact(kPacketSizeMs, 10); ++i) {
+      AudioFrame audio_frame;
+      EXPECT_EQ(0, acm_->PlayoutData10Ms(-1 /* default output frequency */,
+                                         &audio_frame));
+      fake_clock_->AdvanceTimeMilliseconds(10);
+    }
+    rtp_utility_->Forward(&rtp_header_);
+    return true;
+  }
+
+  static bool CbCodecRegistrationThread(void* context) {
+    return reinterpret_cast<AcmReRegisterIsacMtTestOldApi*>(context)
+        ->CbCodecRegistrationImpl();
+  }
+
+  bool CbCodecRegistrationImpl() {
+    SleepMs(1);
+    if (HasFatalFailure()) {
+      // End the test early if a fatal failure (ASSERT_*) has occurred.
+      test_complete_->Set();
+    }
+    CriticalSectionScoped lock(crit_sect_.get());
+    if (!codec_registered_ &&
+        receive_packet_count_ > kRegisterAfterNumPackets) {
+      // Register the iSAC encoder.
+      EXPECT_EQ(0, acm_->RegisterSendCodec(codec_));
+      codec_registered_ = true;
+    }
+    if (codec_registered_ && receive_packet_count_ > kNumPackets) {
+      test_complete_->Set();
+    }
+    return true;
+  }
+
+  rtc::scoped_ptr<PlatformThread> receive_thread_;
+  rtc::scoped_ptr<PlatformThread> codec_registration_thread_;
+  const rtc::scoped_ptr<EventWrapper> test_complete_;
+  const rtc::scoped_ptr<CriticalSectionWrapper> crit_sect_;
+  bool codec_registered_ GUARDED_BY(crit_sect_);
+  int receive_packet_count_ GUARDED_BY(crit_sect_);
+  int64_t next_insert_packet_time_ms_ GUARDED_BY(crit_sect_);
+  rtc::scoped_ptr<AudioEncoderIsac> isac_encoder_;
+  rtc::scoped_ptr<SimulatedClock> fake_clock_;
+  test::AudioLoop audio_loop_;
+};
+
+TEST_F(AcmReRegisterIsacMtTestOldApi, DISABLED_ON_IOS(IF_ISAC(DoTest))) {
+  EXPECT_EQ(kEventSignaled, RunTest());
+}
+
+// Disabling all of these tests on iOS until file support has been added.
+// See https://code.google.com/p/webrtc/issues/detail?id=4752 for details.
+#if !defined(WEBRTC_IOS)
+
+class AcmReceiverBitExactnessOldApi : public ::testing::Test {
+ public:
+  static std::string PlatformChecksum(std::string win64,
+                                      std::string android,
+                                      std::string others) {
+#if defined(_WIN32) && defined(WEBRTC_ARCH_64_BITS)
+    return win64;
+#elif defined(WEBRTC_ANDROID)
+    return android;
+#else
+    return others;
+#endif
+  }
+
+ protected:
+  struct ExternalDecoder {
+    int rtp_payload_type;
+    AudioDecoder* external_decoder;
+    int sample_rate_hz;
+    int num_channels;
+  };
+
+  void Run(int output_freq_hz,
+           const std::string& checksum_ref,
+           const std::vector<ExternalDecoder>& external_decoders) {
+    const std::string input_file_name =
+        webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp");
+    rtc::scoped_ptr<test::RtpFileSource> packet_source(
+        test::RtpFileSource::Create(input_file_name));
+#ifdef WEBRTC_ANDROID
+    // Filter out iLBC and iSAC-swb since they are not supported on Android.
+    packet_source->FilterOutPayloadType(102);  // iLBC.
+    packet_source->FilterOutPayloadType(104);  // iSAC-swb.
+#endif
+
+    test::AudioChecksum checksum;
+    const std::string output_file_name =
+        webrtc::test::OutputPath() +
+        ::testing::UnitTest::GetInstance()
+            ->current_test_info()
+            ->test_case_name() +
+        "_" + ::testing::UnitTest::GetInstance()->current_test_info()->name() +
+        "_output.pcm";
+    test::OutputAudioFile output_file(output_file_name);
+    test::AudioSinkFork output(&checksum, &output_file);
+
+    test::AcmReceiveTestOldApi test(
+        packet_source.get(),
+        &output,
+        output_freq_hz,
+        test::AcmReceiveTestOldApi::kArbitraryChannels);
+    ASSERT_NO_FATAL_FAILURE(test.RegisterNetEqTestCodecs());
+    for (const auto& ed : external_decoders) {
+      ASSERT_EQ(0, test.RegisterExternalReceiveCodec(
+                       ed.rtp_payload_type, ed.external_decoder,
+                       ed.sample_rate_hz, ed.num_channels));
+    }
+    test.Run();
+
+    std::string checksum_string = checksum.Finish();
+    EXPECT_EQ(checksum_ref, checksum_string);
+  }
+};
+
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISAC)) && \
+    defined(WEBRTC_CODEC_ILBC) && defined(WEBRTC_CODEC_G722)
+#define IF_ALL_CODECS(x) x
+#else
+#define IF_ALL_CODECS(x) DISABLED_##x
+#endif
+
+// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
+#if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
+#define MAYBE_8kHzOutput DISABLED_8kHzOutput
+#else
+#define MAYBE_8kHzOutput 8kHzOutput
+#endif
+TEST_F(AcmReceiverBitExactnessOldApi, IF_ALL_CODECS(MAYBE_8kHzOutput)) {
+  Run(8000, PlatformChecksum("dcee98c623b147ebe1b40dd30efa896e",
+                             "adc92e173f908f93b96ba5844209815a",
+                             "908002dc01fc4eb1d2be24eb1d3f354b"),
+      std::vector<ExternalDecoder>());
+}
+
+// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
+#if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
+#define MAYBE_16kHzOutput DISABLED_16kHzOutput
+#else
+#define MAYBE_16kHzOutput 16kHzOutput
+#endif
+TEST_F(AcmReceiverBitExactnessOldApi, IF_ALL_CODECS(MAYBE_16kHzOutput)) {
+  Run(16000, PlatformChecksum("f790e7a8cce4e2c8b7bb5e0e4c5dac0d",
+                              "8cffa6abcb3e18e33b9d857666dff66a",
+                              "a909560b5ca49fa472b17b7b277195e9"),
+      std::vector<ExternalDecoder>());
+}
+
+// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
+#if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
+#define MAYBE_32kHzOutput DISABLED_32kHzOutput
+#else
+#define MAYBE_32kHzOutput 32kHzOutput
+#endif
+TEST_F(AcmReceiverBitExactnessOldApi, IF_ALL_CODECS(MAYBE_32kHzOutput)) {
+  Run(32000, PlatformChecksum("306e0d990ee6e92de3fbecc0123ece37",
+                              "3e126fe894720c3f85edadcc91964ba5",
+                              "441aab4b347fb3db4e9244337aca8d8e"),
+      std::vector<ExternalDecoder>());
+}
+
+// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
+#if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
+#define MAYBE_48kHzOutput DISABLED_48kHzOutput
+#else
+#define MAYBE_48kHzOutput 48kHzOutput
+#endif
+TEST_F(AcmReceiverBitExactnessOldApi, IF_ALL_CODECS(MAYBE_48kHzOutput)) {
+  Run(48000, PlatformChecksum("aa7c232f63a67b2a72703593bdd172e0",
+                              "0155665e93067c4e89256b944dd11999",
+                              "4ee2730fa1daae755e8a8fd3abd779ec"),
+      std::vector<ExternalDecoder>());
+}
+
+// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
+#if defined(WEBRTC_ANDROID) && defined(__aarch64__)
+#define MAYBE_48kHzOutputExternalDecoder DISABLED_48kHzOutputExternalDecoder
+#else
+#define MAYBE_48kHzOutputExternalDecoder 48kHzOutputExternalDecoder
+#endif
+TEST_F(AcmReceiverBitExactnessOldApi,
+       IF_ALL_CODECS(MAYBE_48kHzOutputExternalDecoder)) {
+  AudioDecoderPcmU decoder(1);
+  MockAudioDecoder mock_decoder;
+  // Set expectations on the mock decoder and also delegate the calls to the
+  // real decoder.
+  EXPECT_CALL(mock_decoder, IncomingPacket(_, _, _, _, _))
+      .Times(AtLeast(1))
+      .WillRepeatedly(Invoke(&decoder, &AudioDecoderPcmU::IncomingPacket));
+  EXPECT_CALL(mock_decoder, Channels())
+      .Times(AtLeast(1))
+      .WillRepeatedly(Invoke(&decoder, &AudioDecoderPcmU::Channels));
+  EXPECT_CALL(mock_decoder, Decode(_, _, _, _, _, _))
+      .Times(AtLeast(1))
+      .WillRepeatedly(Invoke(&decoder, &AudioDecoderPcmU::Decode));
+  EXPECT_CALL(mock_decoder, HasDecodePlc())
+      .Times(AtLeast(1))
+      .WillRepeatedly(Invoke(&decoder, &AudioDecoderPcmU::HasDecodePlc));
+  EXPECT_CALL(mock_decoder, PacketDuration(_, _))
+      .Times(AtLeast(1))
+      .WillRepeatedly(Invoke(&decoder, &AudioDecoderPcmU::PacketDuration));
+  ExternalDecoder ed;
+  ed.rtp_payload_type = 0;
+  ed.external_decoder = &mock_decoder;
+  ed.sample_rate_hz = 8000;
+  ed.num_channels = 1;
+  std::vector<ExternalDecoder> external_decoders;
+  external_decoders.push_back(ed);
+
+  Run(48000, PlatformChecksum("aa7c232f63a67b2a72703593bdd172e0",
+                              "0155665e93067c4e89256b944dd11999",
+                              "4ee2730fa1daae755e8a8fd3abd779ec"),
+      external_decoders);
+
+  EXPECT_CALL(mock_decoder, Die());
+}
+
+// This test verifies bit exactness for the send-side of ACM. The test setup is
+// a chain of three different test classes:
+//
+// test::AcmSendTest -> AcmSenderBitExactness -> test::AcmReceiveTest
+//
+// The receiver side is driving the test by requesting new packets from
+// AcmSenderBitExactness::NextPacket(). This method, in turn, asks for the
+// packet from test::AcmSendTest::NextPacket, which inserts audio from the
+// input file until one packet is produced. (The input file loops indefinitely.)
+// Before passing the packet to the receiver, this test class verifies the
+// packet header and updates a payload checksum with the new payload. The
+// decoded output from the receiver is also verified with a (separate) checksum.
+class AcmSenderBitExactnessOldApi : public ::testing::Test,
+                                    public test::PacketSource {
+ protected:
+  static const int kTestDurationMs = 1000;
+
+  AcmSenderBitExactnessOldApi()
+      : frame_size_rtp_timestamps_(0),
+        packet_count_(0),
+        payload_type_(0),
+        last_sequence_number_(0),
+        last_timestamp_(0) {}
+
+  // Sets up the test::AcmSendTest object. Returns true on success, otherwise
+  // false.
+  bool SetUpSender() {
+    const std::string input_file_name =
+        webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+    // Note that |audio_source_| will loop forever. The test duration is set
+    // explicitly by |kTestDurationMs|.
+    audio_source_.reset(new test::InputAudioFile(input_file_name));
+    static const int kSourceRateHz = 32000;
+    send_test_.reset(new test::AcmSendTestOldApi(
+        audio_source_.get(), kSourceRateHz, kTestDurationMs));
+    return send_test_.get() != NULL;
+  }
+
+  // Registers a send codec in the test::AcmSendTest object. Returns true on
+  // success, false on failure.
+  bool RegisterSendCodec(const char* payload_name,
+                         int sampling_freq_hz,
+                         int channels,
+                         int payload_type,
+                         int frame_size_samples,
+                         int frame_size_rtp_timestamps) {
+    payload_type_ = payload_type;
+    frame_size_rtp_timestamps_ = frame_size_rtp_timestamps;
+    return send_test_->RegisterCodec(payload_name,
+                                     sampling_freq_hz,
+                                     channels,
+                                     payload_type,
+                                     frame_size_samples);
+  }
+
+  bool RegisterExternalSendCodec(AudioEncoder* external_speech_encoder,
+                                 int payload_type) {
+    payload_type_ = payload_type;
+    frame_size_rtp_timestamps_ =
+        external_speech_encoder->Num10MsFramesInNextPacket() *
+        external_speech_encoder->RtpTimestampRateHz() / 100;
+    return send_test_->RegisterExternalCodec(external_speech_encoder);
+  }
+
+  // Runs the test. SetUpSender() and RegisterSendCodec() must have been called
+  // before calling this method.
+  void Run(const std::string& audio_checksum_ref,
+           const std::string& payload_checksum_ref,
+           int expected_packets,
+           test::AcmReceiveTestOldApi::NumOutputChannels expected_channels) {
+    // Set up the receiver used to decode the packets and verify the decoded
+    // output.
+    test::AudioChecksum audio_checksum;
+    const std::string output_file_name =
+        webrtc::test::OutputPath() +
+        ::testing::UnitTest::GetInstance()
+            ->current_test_info()
+            ->test_case_name() +
+        "_" + ::testing::UnitTest::GetInstance()->current_test_info()->name() +
+        "_output.pcm";
+    test::OutputAudioFile output_file(output_file_name);
+    // Have the output audio sent both to file and to the checksum calculator.
+    test::AudioSinkFork output(&audio_checksum, &output_file);
+    const int kOutputFreqHz = 8000;
+    test::AcmReceiveTestOldApi receive_test(
+        this, &output, kOutputFreqHz, expected_channels);
+    ASSERT_NO_FATAL_FAILURE(receive_test.RegisterDefaultCodecs());
+
+    // This is where the actual test is executed.
+    receive_test.Run();
+
+    // Extract and verify the audio checksum.
+    std::string checksum_string = audio_checksum.Finish();
+    EXPECT_EQ(audio_checksum_ref, checksum_string);
+
+    // Extract and verify the payload checksum.
+    char checksum_result[rtc::Md5Digest::kSize];
+    payload_checksum_.Finish(checksum_result, rtc::Md5Digest::kSize);
+    checksum_string = rtc::hex_encode(checksum_result, rtc::Md5Digest::kSize);
+    EXPECT_EQ(payload_checksum_ref, checksum_string);
+
+    // Verify number of packets produced.
+    EXPECT_EQ(expected_packets, packet_count_);
+  }
+
+  // Returns a pointer to the next packet. Returns NULL if the source is
+  // depleted (i.e., the test duration is exceeded), or if an error occurred.
+  // Inherited from test::PacketSource.
+  test::Packet* NextPacket() override {
+    // Get the next packet from AcmSendTest. Ownership of |packet| is
+    // transferred to this method.
+    test::Packet* packet = send_test_->NextPacket();
+    if (!packet)
+      return NULL;
+
+    VerifyPacket(packet);
+    // TODO(henrik.lundin) Save the packet to file as well.
+
+    // Pass it on to the caller. The caller becomes the owner of |packet|.
+    return packet;
+  }
+
+  // Verifies the packet.
+  void VerifyPacket(const test::Packet* packet) {
+    EXPECT_TRUE(packet->valid_header());
+    // (We can check the header fields even if valid_header() is false.)
+    EXPECT_EQ(payload_type_, packet->header().payloadType);
+    if (packet_count_ > 0) {
+      // This is not the first packet.
+      uint16_t sequence_number_diff =
+          packet->header().sequenceNumber - last_sequence_number_;
+      EXPECT_EQ(1, sequence_number_diff);
+      uint32_t timestamp_diff = packet->header().timestamp - last_timestamp_;
+      EXPECT_EQ(frame_size_rtp_timestamps_, timestamp_diff);
+    }
+    ++packet_count_;
+    last_sequence_number_ = packet->header().sequenceNumber;
+    last_timestamp_ = packet->header().timestamp;
+    // Update the checksum.
+    payload_checksum_.Update(packet->payload(), packet->payload_length_bytes());
+  }
+
+  void SetUpTest(const char* codec_name,
+                 int codec_sample_rate_hz,
+                 int channels,
+                 int payload_type,
+                 int codec_frame_size_samples,
+                 int codec_frame_size_rtp_timestamps) {
+    ASSERT_TRUE(SetUpSender());
+    ASSERT_TRUE(RegisterSendCodec(codec_name,
+                                  codec_sample_rate_hz,
+                                  channels,
+                                  payload_type,
+                                  codec_frame_size_samples,
+                                  codec_frame_size_rtp_timestamps));
+  }
+
+  void SetUpTestExternalEncoder(AudioEncoder* external_speech_encoder,
+                                int payload_type) {
+    ASSERT_TRUE(SetUpSender());
+    ASSERT_TRUE(
+        RegisterExternalSendCodec(external_speech_encoder, payload_type));
+  }
+
+  rtc::scoped_ptr<test::AcmSendTestOldApi> send_test_;
+  rtc::scoped_ptr<test::InputAudioFile> audio_source_;
+  uint32_t frame_size_rtp_timestamps_;
+  int packet_count_;
+  uint8_t payload_type_;
+  uint16_t last_sequence_number_;
+  uint32_t last_timestamp_;
+  rtc::Md5Digest payload_checksum_;
+};
+
+// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
+#if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
+#define MAYBE_IsacWb30ms DISABLED_IsacWb30ms
+#else
+#define MAYBE_IsacWb30ms IsacWb30ms
+#endif
+TEST_F(AcmSenderBitExactnessOldApi, IF_ISAC(MAYBE_IsacWb30ms)) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 16000, 1, 103, 480, 480));
+  Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "c7e5bdadfa2871df95639fcc297cf23d",
+          "0499ca260390769b3172136faad925b9",
+          "0b58f9eeee43d5891f5f6c75e77984a3"),
+      AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "d42cb5195463da26c8129bbfe73a22e6",
+          "83de248aea9c3c2bd680b6952401b4ca",
+          "3c79f16f34218271f3dca4e2b1dfe1bb"),
+      33,
+      test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
+#if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
+#define MAYBE_IsacWb60ms DISABLED_IsacWb60ms
+#else
+#define MAYBE_IsacWb60ms IsacWb60ms
+#endif
+TEST_F(AcmSenderBitExactnessOldApi, IF_ISAC(MAYBE_IsacWb60ms)) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 16000, 1, 103, 960, 960));
+  Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "14d63c5f08127d280e722e3191b73bdd",
+          "8da003e16c5371af2dc2be79a50f9076",
+          "1ad29139a04782a33daad8c2b9b35875"),
+      AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "ebe04a819d3a9d83a83a17f271e1139a",
+          "97aeef98553b5a4b5a68f8b716e8eaf0",
+          "9e0a0ab743ad987b55b8e14802769c56"),
+      16,
+      test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+#ifdef WEBRTC_CODEC_ISAC
+#define IF_ISAC_FLOAT(x) x
+#else
+#define IF_ISAC_FLOAT(x) DISABLED_##x
+#endif
+
+TEST_F(AcmSenderBitExactnessOldApi,
+       DISABLED_ON_ANDROID(IF_ISAC_FLOAT(IsacSwb30ms))) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 32000, 1, 104, 960, 960));
+  Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "2b3c387d06f00b7b7aad4c9be56fb83d",
+          "",
+          "5683b58da0fbf2063c7adc2e6bfb3fb8"),
+      AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "bcc2041e7744c7ebd9f701866856849c",
+          "",
+          "ce86106a93419aefb063097108ec94ab"),
+      33, test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_8000khz_10ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 1, 107, 80, 80));
+  Run("de4a98e1406f8b798d99cd0704e862e2",
+      "c1edd36339ce0326cc4550041ad719a0",
+      100,
+      test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_16000khz_10ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 16000, 1, 108, 160, 160));
+  Run("ae646d7b68384a1269cc080dd4501916",
+      "ad786526383178b08d80d6eee06e9bad",
+      100,
+      test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_32000khz_10ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 32000, 1, 109, 320, 320));
+  Run("7fe325e8fbaf755e3c5df0b11a4774fb",
+      "5ef82ea885e922263606c6fdbc49f651",
+      100,
+      test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_stereo_8000khz_10ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 2, 111, 80, 80));
+  Run("fb263b74e7ac3de915474d77e4744ceb",
+      "62ce5adb0d4965d0a52ec98ae7f98974",
+      100,
+      test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_stereo_16000khz_10ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 16000, 2, 112, 160, 160));
+  Run("d09e9239553649d7ac93e19d304281fd",
+      "41ca8edac4b8c71cd54fd9f25ec14870",
+      100,
+      test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcm16_stereo_32000khz_10ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 32000, 2, 113, 320, 320));
+  Run("5f025d4f390982cc26b3d92fe02e3044",
+      "50e58502fb04421bf5b857dda4c96879",
+      100,
+      test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcmu_20ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMU", 8000, 1, 0, 160, 160));
+  Run("81a9d4c0bb72e9becc43aef124c981e9",
+      "8f9b8750bd80fe26b6cbf6659b89f0f9",
+      50,
+      test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcma_20ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMA", 8000, 1, 8, 160, 160));
+  Run("39611f798969053925a49dc06d08de29",
+      "6ad745e55aa48981bfc790d0eeef2dd1",
+      50,
+      test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcmu_stereo_20ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMU", 8000, 2, 110, 160, 160));
+  Run("437bec032fdc5cbaa0d5175430af7b18",
+      "60b6f25e8d1e74cb679cfe756dd9bca5",
+      50,
+      test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, Pcma_stereo_20ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("PCMA", 8000, 2, 118, 160, 160));
+  Run("a5c6d83c5b7cedbeff734238220a4b0c",
+      "92b282c83efd20e7eeef52ba40842cf7",
+      50,
+      test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+#ifdef WEBRTC_CODEC_ILBC
+#define IF_ILBC(x) x
+#else
+#define IF_ILBC(x) DISABLED_##x
+#endif
+
+TEST_F(AcmSenderBitExactnessOldApi, DISABLED_ON_ANDROID(IF_ILBC(Ilbc_30ms))) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("ILBC", 8000, 1, 102, 240, 240));
+  Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "7b6ec10910debd9af08011d3ed5249f7",
+          "android_audio",
+          "7b6ec10910debd9af08011d3ed5249f7"),
+      AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "cfae2e9f6aba96e145f2bcdd5050ce78",
+          "android_payload",
+          "cfae2e9f6aba96e145f2bcdd5050ce78"),
+      33,
+      test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+#ifdef WEBRTC_CODEC_G722
+#define IF_G722(x) x
+#else
+#define IF_G722(x) DISABLED_##x
+#endif
+
+TEST_F(AcmSenderBitExactnessOldApi, DISABLED_ON_ANDROID(IF_G722(G722_20ms))) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("G722", 16000, 1, 9, 320, 160));
+  Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "7d759436f2533582950d148b5161a36c",
+          "android_audio",
+          "7d759436f2533582950d148b5161a36c"),
+      AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "fc68a87e1380614e658087cb35d5ca10",
+          "android_payload",
+          "fc68a87e1380614e658087cb35d5ca10"),
+      50,
+      test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi,
+       DISABLED_ON_ANDROID(IF_G722(G722_stereo_20ms))) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("G722", 16000, 2, 119, 320, 160));
+  Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "7190ee718ab3d80eca181e5f7140c210",
+          "android_audio",
+          "7190ee718ab3d80eca181e5f7140c210"),
+      AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "66516152eeaa1e650ad94ff85f668dac",
+          "android_payload",
+          "66516152eeaa1e650ad94ff85f668dac"),
+      50,
+      test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
+#if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
+#define MAYBE_Opus_stereo_20ms DISABLED_Opus_stereo_20ms
+#else
+#define MAYBE_Opus_stereo_20ms Opus_stereo_20ms
+#endif
+TEST_F(AcmSenderBitExactnessOldApi, MAYBE_Opus_stereo_20ms) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 2, 120, 960, 960));
+  Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "855041f2490b887302bce9d544731849",
+          "1e1a0fce893fef2d66886a7f09e2ebce",
+          "855041f2490b887302bce9d544731849"),
+      AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "d781cce1ab986b618d0da87226cdde30",
+          "1a1fe04dd12e755949987c8d729fb3e0",
+          "d781cce1ab986b618d0da87226cdde30"),
+      50,
+      test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+// Fails Android ARM64. https://code.google.com/p/webrtc/issues/detail?id=4199
+#if defined(WEBRTC_ANDROID) && defined(WEBRTC_ARCH_ARM64)
+#define MAYBE_Opus_stereo_20ms_voip DISABLED_Opus_stereo_20ms_voip
+#else
+#define MAYBE_Opus_stereo_20ms_voip Opus_stereo_20ms_voip
+#endif
+TEST_F(AcmSenderBitExactnessOldApi, MAYBE_Opus_stereo_20ms_voip) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 2, 120, 960, 960));
+  // If not set, default will be kAudio in case of stereo.
+  EXPECT_EQ(0, send_test_->acm()->SetOpusApplication(kVoip));
+  Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "9b9e12bc3cc793740966e11cbfa8b35b",
+          "57412a4b5771d19ff03ec35deffe7067",
+          "9b9e12bc3cc793740966e11cbfa8b35b"),
+      AcmReceiverBitExactnessOldApi::PlatformChecksum(
+          "c7340b1189652ab6b5e80dade7390cb4",
+          "cdfe85939c411d12b61701c566e22d26",
+          "c7340b1189652ab6b5e80dade7390cb4"),
+      50,
+      test::AcmReceiveTestOldApi::kStereoOutput);
+}
+
+// This test is for verifying the SetBitRate function. The bitrate is changed at
+// the beginning, and the number of generated bytes are checked.
+class AcmSetBitRateOldApi : public ::testing::Test {
+ protected:
+  static const int kTestDurationMs = 1000;
+
+  // Sets up the test::AcmSendTest object. Returns true on success, otherwise
+  // false.
+  bool SetUpSender() {
+    const std::string input_file_name =
+        webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
+    // Note that |audio_source_| will loop forever. The test duration is set
+    // explicitly by |kTestDurationMs|.
+    audio_source_.reset(new test::InputAudioFile(input_file_name));
+    static const int kSourceRateHz = 32000;
+    send_test_.reset(new test::AcmSendTestOldApi(
+        audio_source_.get(), kSourceRateHz, kTestDurationMs));
+    return send_test_.get();
+  }
+
+  // Registers a send codec in the test::AcmSendTest object. Returns true on
+  // success, false on failure.
+  virtual bool RegisterSendCodec(const char* payload_name,
+                                 int sampling_freq_hz,
+                                 int channels,
+                                 int payload_type,
+                                 int frame_size_samples,
+                                 int frame_size_rtp_timestamps) {
+    return send_test_->RegisterCodec(payload_name, sampling_freq_hz, channels,
+                                     payload_type, frame_size_samples);
+  }
+
+  // Runs the test. SetUpSender() and RegisterSendCodec() must have been called
+  // before calling this method.
+  void Run(int target_bitrate_bps, int expected_total_bits) {
+    ASSERT_TRUE(send_test_->acm());
+    send_test_->acm()->SetBitRate(target_bitrate_bps);
+    int nr_bytes = 0;
+    while (test::Packet* next_packet = send_test_->NextPacket()) {
+      nr_bytes += next_packet->payload_length_bytes();
+      delete next_packet;
+    }
+    EXPECT_EQ(expected_total_bits, nr_bytes * 8);
+  }
+
+  void SetUpTest(const char* codec_name,
+                 int codec_sample_rate_hz,
+                 int channels,
+                 int payload_type,
+                 int codec_frame_size_samples,
+                 int codec_frame_size_rtp_timestamps) {
+    ASSERT_TRUE(SetUpSender());
+    ASSERT_TRUE(RegisterSendCodec(codec_name, codec_sample_rate_hz, channels,
+                                  payload_type, codec_frame_size_samples,
+                                  codec_frame_size_rtp_timestamps));
+  }
+
+  rtc::scoped_ptr<test::AcmSendTestOldApi> send_test_;
+  rtc::scoped_ptr<test::InputAudioFile> audio_source_;
+};
+
+TEST_F(AcmSetBitRateOldApi, Opus_48khz_20ms_10kbps) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 1, 107, 960, 960));
+#if defined(WEBRTC_ANDROID)
+  Run(10000, 9328);
+#else
+  Run(10000, 9072);
+#endif // WEBRTC_ANDROID
+
+}
+
+TEST_F(AcmSetBitRateOldApi, Opus_48khz_20ms_50kbps) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 1, 107, 960, 960));
+#if defined(WEBRTC_ANDROID)
+  Run(50000, 47952);
+#else
+  Run(50000, 49600);
+#endif // WEBRTC_ANDROID
+}
+
+// The result on the Android platforms is inconsistent for this test case.
+// On android_rel the result is different from android and android arm64 rel.
+TEST_F(AcmSetBitRateOldApi, DISABLED_ON_ANDROID(Opus_48khz_20ms_100kbps)) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 1, 107, 960, 960));
+  Run(100000, 100888);
+}
+
+// These next 2 tests ensure that the SetBitRate function has no effect on PCM
+TEST_F(AcmSetBitRateOldApi, Pcm16_8khz_10ms_8kbps) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 1, 107, 80, 80));
+  Run(8000, 128000);
+}
+
+TEST_F(AcmSetBitRateOldApi, Pcm16_8khz_10ms_32kbps) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 1, 107, 80, 80));
+  Run(32000, 128000);
+}
+
+// This test is for verifying the SetBitRate function. The bitrate is changed
+// in the middle, and the number of generated bytes are before and after the
+// change are checked.
+class AcmChangeBitRateOldApi : public AcmSetBitRateOldApi {
+ protected:
+  AcmChangeBitRateOldApi() : sampling_freq_hz_(0), frame_size_samples_(0) {}
+
+  // Registers a send codec in the test::AcmSendTest object. Returns true on
+  // success, false on failure.
+  bool RegisterSendCodec(const char* payload_name,
+                         int sampling_freq_hz,
+                         int channels,
+                         int payload_type,
+                         int frame_size_samples,
+                         int frame_size_rtp_timestamps) override {
+    frame_size_samples_ = frame_size_samples;
+    sampling_freq_hz_ = sampling_freq_hz;
+    return AcmSetBitRateOldApi::RegisterSendCodec(
+        payload_name, sampling_freq_hz, channels, payload_type,
+        frame_size_samples, frame_size_rtp_timestamps);
+  }
+
+  // Runs the test. SetUpSender() and RegisterSendCodec() must have been called
+  // before calling this method.
+  void Run(int target_bitrate_bps,
+           int expected_before_switch_bits,
+           int expected_after_switch_bits) {
+    ASSERT_TRUE(send_test_->acm());
+    int nr_packets =
+        sampling_freq_hz_ * kTestDurationMs / (frame_size_samples_ * 1000);
+    int nr_bytes_before = 0, nr_bytes_after = 0;
+    int packet_counter = 0;
+    while (test::Packet* next_packet = send_test_->NextPacket()) {
+      if (packet_counter == nr_packets / 2)
+        send_test_->acm()->SetBitRate(target_bitrate_bps);
+      if (packet_counter < nr_packets / 2)
+        nr_bytes_before += next_packet->payload_length_bytes();
+      else
+        nr_bytes_after += next_packet->payload_length_bytes();
+      packet_counter++;
+      delete next_packet;
+    }
+    EXPECT_EQ(expected_before_switch_bits, nr_bytes_before * 8);
+    EXPECT_EQ(expected_after_switch_bits, nr_bytes_after * 8);
+  }
+
+  uint32_t sampling_freq_hz_;
+  uint32_t frame_size_samples_;
+};
+
+TEST_F(AcmChangeBitRateOldApi, Opus_48khz_20ms_10kbps) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 1, 107, 960, 960));
+#if defined(WEBRTC_ANDROID)
+  Run(10000, 32200, 5496);
+#else
+  Run(10000, 32200, 5432);
+#endif // WEBRTC_ANDROID
+}
+
+TEST_F(AcmChangeBitRateOldApi, Opus_48khz_20ms_50kbps) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 1, 107, 960, 960));
+#if defined(WEBRTC_ANDROID)
+  Run(50000, 32200, 24912);
+#else
+  Run(50000, 32200, 24792);
+#endif // WEBRTC_ANDROID
+}
+
+TEST_F(AcmChangeBitRateOldApi, Opus_48khz_20ms_100kbps) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 1, 107, 960, 960));
+#if defined(WEBRTC_ANDROID)
+  Run(100000, 32200, 51480);
+#else
+  Run(100000, 32200, 50584);
+#endif // WEBRTC_ANDROID
+}
+
+// These next 2 tests ensure that the SetBitRate function has no effect on PCM
+TEST_F(AcmChangeBitRateOldApi, Pcm16_8khz_10ms_8kbps) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 1, 107, 80, 80));
+  Run(8000, 64000, 64000);
+}
+
+TEST_F(AcmChangeBitRateOldApi, Pcm16_8khz_10ms_32kbps) {
+  ASSERT_NO_FATAL_FAILURE(SetUpTest("L16", 8000, 1, 107, 80, 80));
+  Run(32000, 64000, 64000);
+}
+
+TEST_F(AcmSenderBitExactnessOldApi, External_Pcmu_20ms) {
+  CodecInst codec_inst;
+  codec_inst.channels = 1;
+  codec_inst.pacsize = 160;
+  codec_inst.pltype = 0;
+  AudioEncoderPcmU encoder(codec_inst);
+  MockAudioEncoder mock_encoder;
+  // Set expectations on the mock encoder and also delegate the calls to the
+  // real encoder.
+  EXPECT_CALL(mock_encoder, MaxEncodedBytes())
+      .Times(AtLeast(1))
+      .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::MaxEncodedBytes));
+  EXPECT_CALL(mock_encoder, SampleRateHz())
+      .Times(AtLeast(1))
+      .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::SampleRateHz));
+  EXPECT_CALL(mock_encoder, NumChannels())
+      .Times(AtLeast(1))
+      .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::NumChannels));
+  EXPECT_CALL(mock_encoder, RtpTimestampRateHz())
+      .Times(AtLeast(1))
+      .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::RtpTimestampRateHz));
+  EXPECT_CALL(mock_encoder, Num10MsFramesInNextPacket())
+      .Times(AtLeast(1))
+      .WillRepeatedly(
+          Invoke(&encoder, &AudioEncoderPcmU::Num10MsFramesInNextPacket));
+  EXPECT_CALL(mock_encoder, Max10MsFramesInAPacket())
+      .Times(AtLeast(1))
+      .WillRepeatedly(
+          Invoke(&encoder, &AudioEncoderPcmU::Max10MsFramesInAPacket));
+  EXPECT_CALL(mock_encoder, GetTargetBitrate())
+      .Times(AtLeast(1))
+      .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::GetTargetBitrate));
+  EXPECT_CALL(mock_encoder, EncodeInternal(_, _, _, _))
+      .Times(AtLeast(1))
+      .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::EncodeInternal));
+  EXPECT_CALL(mock_encoder, SetFec(_))
+      .Times(AtLeast(1))
+      .WillRepeatedly(Invoke(&encoder, &AudioEncoderPcmU::SetFec));
+  ASSERT_NO_FATAL_FAILURE(
+      SetUpTestExternalEncoder(&mock_encoder, codec_inst.pltype));
+  Run("81a9d4c0bb72e9becc43aef124c981e9", "8f9b8750bd80fe26b6cbf6659b89f0f9",
+      50, test::AcmReceiveTestOldApi::kMonoOutput);
+}
+
+// This test fixture is implemented to run ACM and change the desired output
+// frequency during the call. The input packets are simply PCM16b-wb encoded
+// payloads with a constant value of |kSampleValue|. The test fixture itself
+// acts as PacketSource in between the receive test class and the constant-
+// payload packet source class. The output is both written to file, and analyzed
+// in this test fixture.
+class AcmSwitchingOutputFrequencyOldApi : public ::testing::Test,
+                                          public test::PacketSource,
+                                          public test::AudioSink {
+ protected:
+  static const size_t kTestNumPackets = 50;
+  static const int kEncodedSampleRateHz = 16000;
+  static const size_t kPayloadLenSamples = 30 * kEncodedSampleRateHz / 1000;
+  static const int kPayloadType = 108;  // Default payload type for PCM16b-wb.
+
+  AcmSwitchingOutputFrequencyOldApi()
+      : first_output_(true),
+        num_packets_(0),
+        packet_source_(kPayloadLenSamples,
+                       kSampleValue,
+                       kEncodedSampleRateHz,
+                       kPayloadType),
+        output_freq_2_(0),
+        has_toggled_(false) {}
+
+  void Run(int output_freq_1, int output_freq_2, int toggle_period_ms) {
+    // Set up the receiver used to decode the packets and verify the decoded
+    // output.
+    const std::string output_file_name =
+        webrtc::test::OutputPath() +
+        ::testing::UnitTest::GetInstance()
+            ->current_test_info()
+            ->test_case_name() +
+        "_" + ::testing::UnitTest::GetInstance()->current_test_info()->name() +
+        "_output.pcm";
+    test::OutputAudioFile output_file(output_file_name);
+    // Have the output audio sent both to file and to the WriteArray method in
+    // this class.
+    test::AudioSinkFork output(this, &output_file);
+    test::AcmReceiveTestToggleOutputFreqOldApi receive_test(
+        this,
+        &output,
+        output_freq_1,
+        output_freq_2,
+        toggle_period_ms,
+        test::AcmReceiveTestOldApi::kMonoOutput);
+    ASSERT_NO_FATAL_FAILURE(receive_test.RegisterDefaultCodecs());
+    output_freq_2_ = output_freq_2;
+
+    // This is where the actual test is executed.
+    receive_test.Run();
+  }
+
+  // Inherited from test::PacketSource.
+  test::Packet* NextPacket() override {
+    // Check if it is time to terminate the test. The packet source is of type
+    // ConstantPcmPacketSource, which is infinite, so we must end the test
+    // "manually".
+    if (num_packets_++ > kTestNumPackets) {
+      EXPECT_TRUE(has_toggled_);
+      return NULL;  // Test ended.
+    }
+
+    // Get the next packet from the source.
+    return packet_source_.NextPacket();
+  }
+
+  // Inherited from test::AudioSink.
+  bool WriteArray(const int16_t* audio, size_t num_samples) {
+    // Skip checking the first output frame, since it has a number of zeros
+    // due to how NetEq is initialized.
+    if (first_output_) {
+      first_output_ = false;
+      return true;
+    }
+    for (size_t i = 0; i < num_samples; ++i) {
+      EXPECT_EQ(kSampleValue, audio[i]);
+    }
+    if (num_samples ==
+        static_cast<size_t>(output_freq_2_ / 100))  // Size of 10 ms frame.
+      has_toggled_ = true;
+    // The return value does not say if the values match the expectation, just
+    // that the method could process the samples.
+    return true;
+  }
+
+  const int16_t kSampleValue = 1000;
+  bool first_output_;
+  size_t num_packets_;
+  test::ConstantPcmPacketSource packet_source_;
+  int output_freq_2_;
+  bool has_toggled_;
+};
+
+TEST_F(AcmSwitchingOutputFrequencyOldApi, TestWithoutToggling) {
+  Run(16000, 16000, 1000);
+}
+
+TEST_F(AcmSwitchingOutputFrequencyOldApi, Toggle16KhzTo32Khz) {
+  Run(16000, 32000, 1000);
+}
+
+TEST_F(AcmSwitchingOutputFrequencyOldApi, Toggle32KhzTo16Khz) {
+  Run(32000, 16000, 1000);
+}
+
+TEST_F(AcmSwitchingOutputFrequencyOldApi, Toggle16KhzTo8Khz) {
+  Run(16000, 8000, 1000);
+}
+
+TEST_F(AcmSwitchingOutputFrequencyOldApi, Toggle8KhzTo16Khz) {
+  Run(8000, 16000, 1000);
+}
+
+#endif
+
+}  // namespace webrtc
diff --git a/webrtc/modules/audio_coding/acm2/call_statistics.cc b/webrtc/modules/audio_coding/acm2/call_statistics.cc
new file mode 100644
index 0000000..4441932
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/call_statistics.cc
@@ -0,0 +1,55 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/acm2/call_statistics.h"
+
+#include <assert.h>
+
+namespace webrtc {
+
+namespace acm2 {
+
+void CallStatistics::DecodedByNetEq(AudioFrame::SpeechType speech_type) {
+  ++decoding_stat_.calls_to_neteq;
+  switch (speech_type) {
+    case AudioFrame::kNormalSpeech: {
+      ++decoding_stat_.decoded_normal;
+      break;
+    }
+    case AudioFrame::kPLC: {
+      ++decoding_stat_.decoded_plc;
+      break;
+    }
+    case AudioFrame::kCNG: {
+      ++decoding_stat_.decoded_cng;
+      break;
+    }
+    case AudioFrame::kPLCCNG: {
+      ++decoding_stat_.decoded_plc_cng;
+      break;
+    }
+    case AudioFrame::kUndefined: {
+      // If the audio is decoded by NetEq, |kUndefined| is not an option.
+      assert(false);
+    }
+  }
+}
+
+void CallStatistics::DecodedBySilenceGenerator() {
+  ++decoding_stat_.calls_to_silence_generator;
+}
+
+const AudioDecodingCallStats& CallStatistics::GetDecodingStatistics() const {
+  return decoding_stat_;
+}
+
+}  // namespace acm2
+
+}  // namespace webrtc
diff --git a/webrtc/modules/audio_coding/acm2/call_statistics.h b/webrtc/modules/audio_coding/acm2/call_statistics.h
new file mode 100644
index 0000000..888afea
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/call_statistics.h
@@ -0,0 +1,63 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_ACM2_CALL_STATISTICS_H_
+#define WEBRTC_MODULES_AUDIO_CODING_ACM2_CALL_STATISTICS_H_
+
+#include "webrtc/common_types.h"
+#include "webrtc/modules/include/module_common_types.h"
+
+//
+// This class is for book keeping of calls to ACM. It is not useful to log API
+// calls which are supposed to be called every 10ms, e.g. PlayoutData10Ms(),
+// however, it is useful to know the number of such calls in a given time
+// interval. The current implementation covers calls to PlayoutData10Ms() with
+// detailed accounting of the decoded speech type.
+//
+// Thread Safety
+// =============
+// Please note that this class in not thread safe. The class must be protected
+// if different APIs are called from different threads.
+//
+
+namespace webrtc {
+
+namespace acm2 {
+
+class CallStatistics {
+ public:
+  CallStatistics() {}
+  ~CallStatistics() {}
+
+  // Call this method to indicate that NetEq engaged in decoding. |speech_type|
+  // is the audio-type according to NetEq.
+  void DecodedByNetEq(AudioFrame::SpeechType speech_type);
+
+  // Call this method to indicate that a decoding call resulted in generating
+  // silence, i.e. call to NetEq is bypassed and the output audio is zero.
+  void DecodedBySilenceGenerator();
+
+  // Get statistics for decoding. The statistics include the number of calls to
+  // NetEq and silence generator, as well as the type of speech pulled of off
+  // NetEq, c.f. declaration of AudioDecodingCallStats for detailed description.
+  const AudioDecodingCallStats& GetDecodingStatistics() const;
+
+ private:
+  // Reset the decoding statistics.
+  void ResetDecodingStatistics();
+
+  AudioDecodingCallStats decoding_stat_;
+};
+
+}  // namespace acm2
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_MODULES_AUDIO_CODING_ACM2_CALL_STATISTICS_H_
diff --git a/webrtc/modules/audio_coding/acm2/call_statistics_unittest.cc b/webrtc/modules/audio_coding/acm2/call_statistics_unittest.cc
new file mode 100644
index 0000000..9ba0774
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/call_statistics_unittest.cc
@@ -0,0 +1,55 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/audio_coding/acm2/call_statistics.h"
+
+namespace webrtc {
+
+namespace acm2 {
+
+TEST(CallStatisticsTest, InitializedZero) {
+  CallStatistics call_stats;
+  AudioDecodingCallStats stats;
+
+  stats = call_stats.GetDecodingStatistics();
+  EXPECT_EQ(0, stats.calls_to_neteq);
+  EXPECT_EQ(0, stats.calls_to_silence_generator);
+  EXPECT_EQ(0, stats.decoded_normal);
+  EXPECT_EQ(0, stats.decoded_cng);
+  EXPECT_EQ(0, stats.decoded_plc);
+  EXPECT_EQ(0, stats.decoded_plc_cng);
+}
+
+TEST(CallStatisticsTest, AllCalls) {
+  CallStatistics call_stats;
+  AudioDecodingCallStats stats;
+
+  call_stats.DecodedBySilenceGenerator();
+  call_stats.DecodedByNetEq(AudioFrame::kNormalSpeech);
+  call_stats.DecodedByNetEq(AudioFrame::kPLC);
+  call_stats.DecodedByNetEq(AudioFrame::kPLCCNG);
+  call_stats.DecodedByNetEq(AudioFrame::kCNG);
+
+  stats = call_stats.GetDecodingStatistics();
+  EXPECT_EQ(4, stats.calls_to_neteq);
+  EXPECT_EQ(1, stats.calls_to_silence_generator);
+  EXPECT_EQ(1, stats.decoded_normal);
+  EXPECT_EQ(1, stats.decoded_cng);
+  EXPECT_EQ(1, stats.decoded_plc);
+  EXPECT_EQ(1, stats.decoded_plc_cng);
+}
+
+}  // namespace acm2
+
+}  // namespace webrtc
+
+
+
diff --git a/webrtc/modules/audio_coding/acm2/codec_manager.cc b/webrtc/modules/audio_coding/acm2/codec_manager.cc
new file mode 100644
index 0000000..a5a9e09
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/codec_manager.cc
@@ -0,0 +1,313 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/acm2/codec_manager.h"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/engine_configurations.h"
+#include "webrtc/modules/audio_coding/acm2/rent_a_codec.h"
+#include "webrtc/system_wrappers/include/trace.h"
+
+namespace webrtc {
+namespace acm2 {
+
+namespace {
+
+// Check if the given codec is a valid to be registered as send codec.
+int IsValidSendCodec(const CodecInst& send_codec) {
+  int dummy_id = 0;
+  if ((send_codec.channels != 1) && (send_codec.channels != 2)) {
+    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
+                 "Wrong number of channels (%d, only mono and stereo are "
+                 "supported)",
+                 send_codec.channels);
+    return -1;
+  }
+
+  auto maybe_codec_id = RentACodec::CodecIdByInst(send_codec);
+  if (!maybe_codec_id) {
+    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
+                 "Invalid codec setting for the send codec.");
+    return -1;
+  }
+
+  // Telephone-event cannot be a send codec.
+  if (!STR_CASE_CMP(send_codec.plname, "telephone-event")) {
+    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
+                 "telephone-event cannot be a send codec");
+    return -1;
+  }
+
+  if (!RentACodec::IsSupportedNumChannels(*maybe_codec_id, send_codec.channels)
+           .value_or(false)) {
+    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
+                 "%d number of channels not supportedn for %s.",
+                 send_codec.channels, send_codec.plname);
+    return -1;
+  }
+  return RentACodec::CodecIndexFromId(*maybe_codec_id).value_or(-1);
+}
+
+bool IsIsac(const CodecInst& codec) {
+  return
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
+      !STR_CASE_CMP(codec.plname, "isac") ||
+#endif
+      false;
+}
+
+bool IsOpus(const CodecInst& codec) {
+  return
+#ifdef WEBRTC_CODEC_OPUS
+      !STR_CASE_CMP(codec.plname, "opus") ||
+#endif
+      false;
+}
+
+bool IsPcmU(const CodecInst& codec) {
+  return !STR_CASE_CMP(codec.plname, "pcmu");
+}
+
+bool IsPcmA(const CodecInst& codec) {
+  return !STR_CASE_CMP(codec.plname, "pcma");
+}
+
+bool IsPcm16B(const CodecInst& codec) {
+  return !STR_CASE_CMP(codec.plname, "l16");
+}
+
+bool IsIlbc(const CodecInst& codec) {
+  return
+#ifdef WEBRTC_CODEC_ILBC
+      !STR_CASE_CMP(codec.plname, "ilbc") ||
+#endif
+      false;
+}
+
+bool IsG722(const CodecInst& codec) {
+  return
+#ifdef WEBRTC_CODEC_G722
+      !STR_CASE_CMP(codec.plname, "g722") ||
+#endif
+      false;
+}
+
+bool CodecSupported(const CodecInst& codec) {
+  return IsOpus(codec) || IsPcmU(codec) || IsPcmA(codec) || IsPcm16B(codec) ||
+         IsIlbc(codec) || IsG722(codec) || IsIsac(codec);
+}
+
+const CodecInst kEmptyCodecInst = {-1, "noCodecRegistered", 0, 0, 0, 0};
+}  // namespace
+
+CodecManager::CodecManager()
+    : send_codec_inst_(kEmptyCodecInst), encoder_is_opus_(false) {
+  thread_checker_.DetachFromThread();
+}
+
+CodecManager::~CodecManager() = default;
+
+int CodecManager::RegisterEncoder(const CodecInst& send_codec) {
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  int codec_id = IsValidSendCodec(send_codec);
+
+  // Check for reported errors from function IsValidSendCodec().
+  if (codec_id < 0) {
+    return -1;
+  }
+
+  int dummy_id = 0;
+  switch (RentACodec::RegisterRedPayloadType(
+      &codec_stack_params_.red_payload_types, send_codec)) {
+    case RentACodec::RegistrationResult::kOk:
+      return 0;
+    case RentACodec::RegistrationResult::kBadFreq:
+      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
+                   "RegisterSendCodec() failed, invalid frequency for RED"
+                   " registration");
+      return -1;
+    case RentACodec::RegistrationResult::kSkip:
+      break;
+  }
+  switch (RentACodec::RegisterCngPayloadType(
+      &codec_stack_params_.cng_payload_types, send_codec)) {
+    case RentACodec::RegistrationResult::kOk:
+      return 0;
+    case RentACodec::RegistrationResult::kBadFreq:
+      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, dummy_id,
+                   "RegisterSendCodec() failed, invalid frequency for CNG"
+                   " registration");
+      return -1;
+    case RentACodec::RegistrationResult::kSkip:
+      break;
+  }
+
+  // Check if the codec is already registered as send codec.
+  bool new_codec = true;
+  if (CurrentEncoder()) {
+    auto new_codec_id = RentACodec::CodecIdByInst(send_codec_inst_);
+    RTC_DCHECK(new_codec_id);
+    auto old_codec_id = RentACodec::CodecIdFromIndex(codec_id);
+    new_codec = !old_codec_id || *new_codec_id != *old_codec_id;
+  }
+
+  encoder_is_opus_ = IsOpus(send_codec);
+
+  if (new_codec) {
+    // This is a new codec. Register it and return.
+    RTC_DCHECK(CodecSupported(send_codec));
+    if (IsOpus(send_codec)) {
+      // VAD/DTX not supported.
+      codec_stack_params_.use_cng = false;
+    }
+    AudioEncoder* enc = rent_a_codec_.RentEncoder(send_codec);
+    if (!enc)
+      return -1;
+    rent_a_codec_.RentEncoderStack(enc, &codec_stack_params_);
+    RTC_DCHECK(CurrentEncoder());
+
+    send_codec_inst_ = send_codec;
+    return 0;
+  }
+
+  // This is an existing codec; re-create it if any parameters have changed.
+  if (send_codec_inst_.plfreq != send_codec.plfreq ||
+      send_codec_inst_.pacsize != send_codec.pacsize ||
+      send_codec_inst_.channels != send_codec.channels) {
+    AudioEncoder* enc = rent_a_codec_.RentEncoder(send_codec);
+    if (!enc)
+      return -1;
+    rent_a_codec_.RentEncoderStack(enc, &codec_stack_params_);
+    RTC_DCHECK(CurrentEncoder());
+  }
+  send_codec_inst_.plfreq = send_codec.plfreq;
+  send_codec_inst_.pacsize = send_codec.pacsize;
+  send_codec_inst_.channels = send_codec.channels;
+  send_codec_inst_.pltype = send_codec.pltype;
+
+  // Check if a change in Rate is required.
+  if (send_codec.rate != send_codec_inst_.rate) {
+    CurrentEncoder()->SetTargetBitrate(send_codec.rate);
+    send_codec_inst_.rate = send_codec.rate;
+  }
+
+  return 0;
+}
+
+void CodecManager::RegisterEncoder(AudioEncoder* external_speech_encoder) {
+  // Make up a CodecInst.
+  send_codec_inst_.channels = external_speech_encoder->NumChannels();
+  send_codec_inst_.plfreq = external_speech_encoder->SampleRateHz();
+  send_codec_inst_.pacsize = rtc::CheckedDivExact(
+      static_cast<int>(external_speech_encoder->Max10MsFramesInAPacket() *
+                       send_codec_inst_.plfreq),
+      100);
+  send_codec_inst_.pltype = -1;  // Not valid.
+  send_codec_inst_.rate = -1;    // Not valid.
+  static const char kName[] = "external";
+  memcpy(send_codec_inst_.plname, kName, sizeof(kName));
+
+  rent_a_codec_.RentEncoderStack(external_speech_encoder, &codec_stack_params_);
+}
+
+rtc::Optional<CodecInst> CodecManager::GetCodecInst() const {
+  int dummy_id = 0;
+  WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, dummy_id,
+               "SendCodec()");
+
+  if (!CurrentEncoder()) {
+    WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, dummy_id,
+                 "SendCodec Failed, no codec is registered");
+    return rtc::Optional<CodecInst>();
+  }
+  return rtc::Optional<CodecInst>(send_codec_inst_);
+}
+
+bool CodecManager::SetCopyRed(bool enable) {
+  if (enable && codec_stack_params_.use_codec_fec) {
+    WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, 0,
+                 "Codec internal FEC and RED cannot be co-enabled.");
+    return false;
+  }
+  if (enable &&
+      codec_stack_params_.red_payload_types.count(send_codec_inst_.plfreq) <
+          1) {
+    WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, 0,
+                 "Cannot enable RED at %i Hz.", send_codec_inst_.plfreq);
+    return false;
+  }
+  if (codec_stack_params_.use_red != enable) {
+    codec_stack_params_.use_red = enable;
+    if (CurrentEncoder())
+      rent_a_codec_.RentEncoderStack(rent_a_codec_.GetEncoder(),
+                                     &codec_stack_params_);
+  }
+  return true;
+}
+
+int CodecManager::SetVAD(bool enable, ACMVADMode mode) {
+  // Sanity check of the mode.
+  RTC_DCHECK(mode == VADNormal || mode == VADLowBitrate || mode == VADAggr ||
+             mode == VADVeryAggr);
+
+  // Check that the send codec is mono. We don't support VAD/DTX for stereo
+  // sending.
+  auto* enc = rent_a_codec_.GetEncoder();
+  const bool stereo_send = enc ? (enc->NumChannels() != 1) : false;
+  if (enable && stereo_send) {
+    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, 0,
+                 "VAD/DTX not supported for stereo sending");
+    codec_stack_params_.use_cng = false;
+    return -1;
+  }
+
+  // If a send codec is registered, set VAD/DTX for the codec.
+  if (IsOpus(send_codec_inst_)) {
+    // VAD/DTX not supported.
+    codec_stack_params_.use_cng = false;
+    return 0;
+  }
+
+  if (codec_stack_params_.use_cng != enable ||
+      codec_stack_params_.vad_mode != mode) {
+    codec_stack_params_.use_cng = enable;
+    codec_stack_params_.vad_mode = mode;
+    if (enc)
+      rent_a_codec_.RentEncoderStack(enc, &codec_stack_params_);
+  }
+  return 0;
+}
+
+void CodecManager::VAD(bool* dtx_enabled,
+                       bool* vad_enabled,
+                       ACMVADMode* mode) const {
+  *dtx_enabled = *vad_enabled = codec_stack_params_.use_cng;
+  *mode = codec_stack_params_.vad_mode;
+}
+
+int CodecManager::SetCodecFEC(bool enable_codec_fec) {
+  if (enable_codec_fec && codec_stack_params_.use_red) {
+    WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, 0,
+                 "Codec internal FEC and RED cannot be co-enabled.");
+    return -1;
+  }
+
+  RTC_CHECK(CurrentEncoder());
+  codec_stack_params_.use_codec_fec =
+      CurrentEncoder()->SetFec(enable_codec_fec) && enable_codec_fec;
+  return codec_stack_params_.use_codec_fec == enable_codec_fec ? 0 : -1;
+}
+
+AudioDecoder* CodecManager::GetAudioDecoder(const CodecInst& codec) {
+  return IsIsac(codec) ? rent_a_codec_.RentIsacDecoder() : nullptr;
+}
+
+}  // namespace acm2
+}  // namespace webrtc
diff --git a/webrtc/modules/audio_coding/acm2/codec_manager.h b/webrtc/modules/audio_coding/acm2/codec_manager.h
new file mode 100644
index 0000000..61832e4
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/codec_manager.h
@@ -0,0 +1,81 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_ACM2_CODEC_MANAGER_H_
+#define WEBRTC_MODULES_AUDIO_CODING_ACM2_CODEC_MANAGER_H_
+
+#include <map>
+
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/optional.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/base/thread_checker.h"
+#include "webrtc/modules/audio_coding/acm2/rent_a_codec.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "webrtc/common_types.h"
+
+namespace webrtc {
+
+class AudioDecoder;
+class AudioEncoder;
+
+namespace acm2 {
+
+class CodecManager final {
+ public:
+  CodecManager();
+  ~CodecManager();
+
+  int RegisterEncoder(const CodecInst& send_codec);
+
+  void RegisterEncoder(AudioEncoder* external_speech_encoder);
+
+  rtc::Optional<CodecInst> GetCodecInst() const;
+
+  bool SetCopyRed(bool enable);
+
+  int SetVAD(bool enable, ACMVADMode mode);
+
+  void VAD(bool* dtx_enabled, bool* vad_enabled, ACMVADMode* mode) const;
+
+  int SetCodecFEC(bool enable_codec_fec);
+
+  // Returns a pointer to AudioDecoder of the given codec. For iSAC, encoding
+  // and decoding have to be performed on a shared codec instance. By calling
+  // this method, we get the codec instance that ACM owns.
+  // If |codec| does not share an instance between encoder and decoder, returns
+  // null.
+  AudioDecoder* GetAudioDecoder(const CodecInst& codec);
+
+  bool red_enabled() const { return codec_stack_params_.use_red; }
+
+  bool codec_fec_enabled() const { return codec_stack_params_.use_codec_fec; }
+
+  AudioEncoder* CurrentEncoder() { return rent_a_codec_.GetEncoderStack(); }
+  const AudioEncoder* CurrentEncoder() const {
+    return rent_a_codec_.GetEncoderStack();
+  }
+
+  bool CurrentEncoderIsOpus() const { return encoder_is_opus_; }
+
+ private:
+  rtc::ThreadChecker thread_checker_;
+  CodecInst send_codec_inst_;
+  RentACodec rent_a_codec_;
+  RentACodec::StackParameters codec_stack_params_;
+
+  bool encoder_is_opus_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(CodecManager);
+};
+
+}  // namespace acm2
+}  // namespace webrtc
+#endif  // WEBRTC_MODULES_AUDIO_CODING_ACM2_CODEC_MANAGER_H_
diff --git a/webrtc/modules/audio_coding/acm2/codec_manager_unittest.cc b/webrtc/modules/audio_coding/acm2/codec_manager_unittest.cc
new file mode 100644
index 0000000..c09f256
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/codec_manager_unittest.cc
@@ -0,0 +1,66 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h"
+#include "webrtc/modules/audio_coding/acm2/codec_manager.h"
+
+namespace webrtc {
+namespace acm2 {
+
+using ::testing::Return;
+
+namespace {
+
+// Create a MockAudioEncoder with some reasonable default behavior.
+rtc::scoped_ptr<MockAudioEncoder> CreateMockEncoder() {
+  auto enc = rtc_make_scoped_ptr(new MockAudioEncoder);
+  EXPECT_CALL(*enc, SampleRateHz()).WillRepeatedly(Return(8000));
+  EXPECT_CALL(*enc, NumChannels()).WillRepeatedly(Return(1));
+  EXPECT_CALL(*enc, Max10MsFramesInAPacket()).WillRepeatedly(Return(1));
+  EXPECT_CALL(*enc, Die());
+  return enc;
+}
+
+}  // namespace
+
+TEST(CodecManagerTest, ExternalEncoderFec) {
+  auto enc0 = CreateMockEncoder();
+  auto enc1 = CreateMockEncoder();
+  {
+    ::testing::InSequence s;
+    EXPECT_CALL(*enc0, SetFec(false)).WillOnce(Return(true));
+    EXPECT_CALL(*enc0, Mark("A"));
+    EXPECT_CALL(*enc0, SetFec(true)).WillOnce(Return(true));
+    EXPECT_CALL(*enc1, SetFec(true)).WillOnce(Return(true));
+    EXPECT_CALL(*enc1, SetFec(false)).WillOnce(Return(true));
+    EXPECT_CALL(*enc0, Mark("B"));
+    EXPECT_CALL(*enc0, SetFec(false)).WillOnce(Return(true));
+  }
+
+  CodecManager cm;
+  EXPECT_FALSE(cm.codec_fec_enabled());
+  cm.RegisterEncoder(enc0.get());
+  EXPECT_FALSE(cm.codec_fec_enabled());
+  enc0->Mark("A");
+  EXPECT_EQ(0, cm.SetCodecFEC(true));
+  EXPECT_TRUE(cm.codec_fec_enabled());
+  cm.RegisterEncoder(enc1.get());
+  EXPECT_TRUE(cm.codec_fec_enabled());
+
+  EXPECT_EQ(0, cm.SetCodecFEC(false));
+  enc0->Mark("B");
+  EXPECT_FALSE(cm.codec_fec_enabled());
+  cm.RegisterEncoder(enc0.get());
+  EXPECT_FALSE(cm.codec_fec_enabled());
+}
+
+}  // namespace acm2
+}  // namespace webrtc
diff --git a/webrtc/modules/audio_coding/acm2/initial_delay_manager.cc b/webrtc/modules/audio_coding/acm2/initial_delay_manager.cc
new file mode 100644
index 0000000..0c31b83
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/initial_delay_manager.cc
@@ -0,0 +1,242 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/acm2/initial_delay_manager.h"
+
+namespace webrtc {
+
+namespace acm2 {
+
+InitialDelayManager::InitialDelayManager(int initial_delay_ms,
+                                         int late_packet_threshold)
+    : last_packet_type_(kUndefinedPacket),
+      last_receive_timestamp_(0),
+      timestamp_step_(0),
+      audio_payload_type_(kInvalidPayloadType),
+      initial_delay_ms_(initial_delay_ms),
+      buffered_audio_ms_(0),
+      buffering_(true),
+      playout_timestamp_(0),
+      late_packet_threshold_(late_packet_threshold) {
+  last_packet_rtp_info_.header.payloadType = kInvalidPayloadType;
+  last_packet_rtp_info_.header.ssrc = 0;
+  last_packet_rtp_info_.header.sequenceNumber = 0;
+  last_packet_rtp_info_.header.timestamp = 0;
+}
+
+void InitialDelayManager::UpdateLastReceivedPacket(
+    const WebRtcRTPHeader& rtp_info,
+    uint32_t receive_timestamp,
+    PacketType type,
+    bool new_codec,
+    int sample_rate_hz,
+    SyncStream* sync_stream) {
+  assert(sync_stream);
+
+  // If payload of audio packets is changing |new_codec| has to be true.
+  assert(!(!new_codec && type == kAudioPacket &&
+         rtp_info.header.payloadType != audio_payload_type_));
+
+  // Just shorthands.
+  const RTPHeader* current_header = &rtp_info.header;
+  RTPHeader* last_header = &last_packet_rtp_info_.header;
+
+  // Don't do anything if getting DTMF. The chance of DTMF in applications where
+  // initial delay is required is very low (we don't know of any). This avoids a
+  // lot of corner cases. The effect of ignoring DTMF packet is minimal. Note
+  // that DTMFs are inserted into NetEq just not accounted here.
+  if (type == kAvtPacket ||
+      (last_packet_type_ != kUndefinedPacket &&
+      !IsNewerSequenceNumber(current_header->sequenceNumber,
+                             last_header->sequenceNumber))) {
+    sync_stream->num_sync_packets = 0;
+    return;
+  }
+
+  // Either if it is a new packet or the first packet record and set variables.
+  if (new_codec ||
+      last_packet_rtp_info_.header.payloadType == kInvalidPayloadType) {
+    timestamp_step_ = 0;
+    if (type == kAudioPacket)
+      audio_payload_type_ = rtp_info.header.payloadType;
+    else
+      audio_payload_type_ = kInvalidPayloadType;  // Invalid.
+
+    RecordLastPacket(rtp_info, receive_timestamp, type);
+    sync_stream->num_sync_packets = 0;
+    buffered_audio_ms_ = 0;
+    buffering_ = true;
+
+    // If |buffering_| is set then |playout_timestamp_| should have correct
+    // value.
+    UpdatePlayoutTimestamp(*current_header, sample_rate_hz);
+    return;
+  }
+
+  uint32_t timestamp_increase = current_header->timestamp -
+      last_header->timestamp;
+
+  // |timestamp_increase| is invalid if this is the first packet. The effect is
+  // that |buffered_audio_ms_| is not increased.
+  if (last_packet_type_ == kUndefinedPacket) {
+    timestamp_increase = 0;
+  }
+
+  if (buffering_) {
+    buffered_audio_ms_ += timestamp_increase * 1000 / sample_rate_hz;
+
+    // A timestamp that reflects the initial delay, while buffering.
+    UpdatePlayoutTimestamp(*current_header, sample_rate_hz);
+
+    if (buffered_audio_ms_ >= initial_delay_ms_)
+      buffering_ = false;
+  }
+
+  if (current_header->sequenceNumber == last_header->sequenceNumber + 1) {
+    // Two consecutive audio packets, the previous packet-type is audio, so we
+    // can update |timestamp_step_|.
+    if (last_packet_type_ == kAudioPacket)
+      timestamp_step_ = timestamp_increase;
+    RecordLastPacket(rtp_info, receive_timestamp, type);
+    sync_stream->num_sync_packets = 0;
+    return;
+  }
+
+  uint16_t packet_gap = current_header->sequenceNumber -
+      last_header->sequenceNumber - 1;
+
+  // For smooth transitions leave a gap between audio and sync packets.
+  sync_stream->num_sync_packets = last_packet_type_ == kSyncPacket ?
+      packet_gap - 1 : packet_gap - 2;
+
+  // Do nothing if we haven't received any audio packet.
+  if (sync_stream->num_sync_packets > 0 &&
+      audio_payload_type_ != kInvalidPayloadType) {
+    if (timestamp_step_ == 0) {
+      // Make an estimate for |timestamp_step_| if it is not updated, yet.
+      assert(packet_gap > 0);
+      timestamp_step_ = timestamp_increase / (packet_gap + 1);
+    }
+    sync_stream->timestamp_step = timestamp_step_;
+
+    // Build the first sync-packet based on the current received packet.
+    memcpy(&sync_stream->rtp_info, &rtp_info, sizeof(rtp_info));
+    sync_stream->rtp_info.header.payloadType = audio_payload_type_;
+
+    uint16_t sequence_number_update = sync_stream->num_sync_packets + 1;
+    uint32_t timestamp_update = timestamp_step_ * sequence_number_update;
+
+    // Rewind sequence number and timestamps. This will give a more accurate
+    // description of the missing packets.
+    //
+    // Note that we leave a gap between the last packet in sync-stream and the
+    // current received packet, so it should be compensated for in the following
+    // computation of timestamps and sequence number.
+    sync_stream->rtp_info.header.sequenceNumber -= sequence_number_update;
+    sync_stream->receive_timestamp = receive_timestamp - timestamp_update;
+    sync_stream->rtp_info.header.timestamp -= timestamp_update;
+    sync_stream->rtp_info.header.payloadType = audio_payload_type_;
+  } else {
+    sync_stream->num_sync_packets = 0;
+  }
+
+  RecordLastPacket(rtp_info, receive_timestamp, type);
+  return;
+}
+
+void InitialDelayManager::RecordLastPacket(const WebRtcRTPHeader& rtp_info,
+                                           uint32_t receive_timestamp,
+                                           PacketType type) {
+  last_packet_type_ = type;
+  last_receive_timestamp_ = receive_timestamp;
+  memcpy(&last_packet_rtp_info_, &rtp_info, sizeof(rtp_info));
+}
+
+void InitialDelayManager::LatePackets(
+    uint32_t timestamp_now, SyncStream* sync_stream) {
+  assert(sync_stream);
+  sync_stream->num_sync_packets = 0;
+
+  // If there is no estimate of timestamp increment, |timestamp_step_|, then
+  // we cannot estimate the number of late packets.
+  // If the last packet has been CNG, estimating late packets is not meaningful,
+  // as a CNG packet is on unknown length.
+  // We can set a higher threshold if the last packet is CNG and continue
+  // execution, but this is how ACM1 code was written.
+  if (timestamp_step_ <= 0 ||
+      last_packet_type_ == kCngPacket ||
+      last_packet_type_ == kUndefinedPacket ||
+      audio_payload_type_ == kInvalidPayloadType)  // No audio packet received.
+    return;
+
+  int num_late_packets = (timestamp_now - last_receive_timestamp_) /
+      timestamp_step_;
+
+  if (num_late_packets < late_packet_threshold_)
+    return;
+
+  int sync_offset = 1;  // One gap at the end of the sync-stream.
+  if (last_packet_type_ != kSyncPacket) {
+    ++sync_offset;  // One more gap at the beginning of the sync-stream.
+    --num_late_packets;
+  }
+  uint32_t timestamp_update = sync_offset * timestamp_step_;
+
+  sync_stream->num_sync_packets = num_late_packets;
+  if (num_late_packets == 0)
+    return;
+
+  // Build the first sync-packet in the sync-stream.
+  memcpy(&sync_stream->rtp_info, &last_packet_rtp_info_,
+         sizeof(last_packet_rtp_info_));
+
+  // Increase sequence number and timestamps.
+  sync_stream->rtp_info.header.sequenceNumber += sync_offset;
+  sync_stream->rtp_info.header.timestamp += timestamp_update;
+  sync_stream->receive_timestamp = last_receive_timestamp_ + timestamp_update;
+  sync_stream->timestamp_step = timestamp_step_;
+
+  // Sync-packets have audio payload-type.
+  sync_stream->rtp_info.header.payloadType = audio_payload_type_;
+
+  uint16_t sequence_number_update = num_late_packets + sync_offset - 1;
+  timestamp_update = sequence_number_update * timestamp_step_;
+
+  // Fake the last RTP, assuming the caller will inject the whole sync-stream.
+  last_packet_rtp_info_.header.timestamp += timestamp_update;
+  last_packet_rtp_info_.header.sequenceNumber += sequence_number_update;
+  last_packet_rtp_info_.header.payloadType = audio_payload_type_;
+  last_receive_timestamp_ += timestamp_update;
+
+  last_packet_type_ = kSyncPacket;
+  return;
+}
+
+bool InitialDelayManager::GetPlayoutTimestamp(uint32_t* playout_timestamp) {
+  if (!buffering_) {
+    return false;
+  }
+  *playout_timestamp = playout_timestamp_;
+  return true;
+}
+
+void InitialDelayManager::DisableBuffering() {
+  buffering_ = false;
+}
+
+void InitialDelayManager::UpdatePlayoutTimestamp(
+    const RTPHeader& current_header, int sample_rate_hz) {
+  playout_timestamp_ = current_header.timestamp - static_cast<uint32_t>(
+      initial_delay_ms_ * sample_rate_hz / 1000);
+}
+
+}  // namespace acm2
+
+}  // namespace webrtc
diff --git a/webrtc/modules/audio_coding/acm2/initial_delay_manager.h b/webrtc/modules/audio_coding/acm2/initial_delay_manager.h
new file mode 100644
index 0000000..32dd126
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/initial_delay_manager.h
@@ -0,0 +1,120 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_ACM2_INITIAL_DELAY_MANAGER_H_
+#define WEBRTC_MODULES_AUDIO_CODING_ACM2_INITIAL_DELAY_MANAGER_H_
+
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/include/module_common_types.h"
+
+namespace webrtc {
+
+namespace acm2 {
+
+class InitialDelayManager {
+ public:
+  enum PacketType {
+    kUndefinedPacket, kCngPacket, kAvtPacket, kAudioPacket, kSyncPacket };
+
+  // Specifies a stream of sync-packets.
+  struct SyncStream {
+    SyncStream()
+        : num_sync_packets(0),
+          receive_timestamp(0),
+          timestamp_step(0) {
+      memset(&rtp_info, 0, sizeof(rtp_info));
+    }
+
+    int num_sync_packets;
+
+    // RTP header of the first sync-packet in the sequence.
+    WebRtcRTPHeader rtp_info;
+
+    // Received timestamp of the first sync-packet in the sequence.
+    uint32_t receive_timestamp;
+
+    // Samples per packet.
+    uint32_t timestamp_step;
+  };
+
+  InitialDelayManager(int initial_delay_ms, int late_packet_threshold);
+
+  // Update with the last received RTP header, |header|, and received timestamp,
+  // |received_timestamp|. |type| indicates the packet type. If codec is changed
+  // since the last time |new_codec| should be true. |sample_rate_hz| is the
+  // decoder's sampling rate in Hz. |header| has a field to store sampling rate
+  // but we are not sure if that is properly set at the send side, and |header|
+  // is declared constant in the caller of this function
+  // (AcmReceiver::InsertPacket()). |sync_stream| contains information required
+  // to generate a stream of sync packets.
+  void UpdateLastReceivedPacket(const WebRtcRTPHeader& header,
+                                uint32_t receive_timestamp,
+                                PacketType type,
+                                bool new_codec,
+                                int sample_rate_hz,
+                                SyncStream* sync_stream);
+
+  // Based on the last received timestamp and given the current timestamp,
+  // sequence of late (or perhaps missing) packets is computed.
+  void LatePackets(uint32_t timestamp_now, SyncStream* sync_stream);
+
+  // Get playout timestamp.
+  // Returns true if the timestamp is valid (when buffering), otherwise false.
+  bool GetPlayoutTimestamp(uint32_t* playout_timestamp);
+
+  // True if buffered audio is less than the given initial delay (specified at
+  // the constructor). Buffering might be disabled by the client of this class.
+  bool buffering() { return buffering_; }
+
+  // Disable buffering in the class.
+  void DisableBuffering();
+
+  // True if any packet received for buffering.
+  bool PacketBuffered() { return last_packet_type_ != kUndefinedPacket; }
+
+ private:
+  static const uint8_t kInvalidPayloadType = 0xFF;
+
+  // Update playout timestamps. While buffering, this is about
+  // |initial_delay_ms| millisecond behind the latest received timestamp.
+  void UpdatePlayoutTimestamp(const RTPHeader& current_header,
+                              int sample_rate_hz);
+
+  // Record an RTP headr and related parameter
+  void RecordLastPacket(const WebRtcRTPHeader& rtp_info,
+                        uint32_t receive_timestamp,
+                        PacketType type);
+
+  PacketType last_packet_type_;
+  WebRtcRTPHeader last_packet_rtp_info_;
+  uint32_t last_receive_timestamp_;
+  uint32_t timestamp_step_;
+  uint8_t audio_payload_type_;
+  const int initial_delay_ms_;
+  int buffered_audio_ms_;
+  bool buffering_;
+
+  // During the initial phase where packets are being accumulated and silence
+  // is played out, |playout_ts| is a timestamp which is equal to
+  // |initial_delay_ms_| milliseconds earlier than the most recently received
+  // RTP timestamp.
+  uint32_t playout_timestamp_;
+
+  // If the number of late packets exceed this value (computed based on current
+  // timestamp and last received timestamp), sequence of sync-packets is
+  // specified.
+  const int late_packet_threshold_;
+};
+
+}  // namespace acm2
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_MODULES_AUDIO_CODING_ACM2_INITIAL_DELAY_MANAGER_H_
diff --git a/webrtc/modules/audio_coding/acm2/initial_delay_manager_unittest.cc b/webrtc/modules/audio_coding/acm2/initial_delay_manager_unittest.cc
new file mode 100644
index 0000000..d86d221
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/initial_delay_manager_unittest.cc
@@ -0,0 +1,376 @@
+/*
+ *  Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/modules/audio_coding/acm2/initial_delay_manager.h"
+
+namespace webrtc {
+
+namespace acm2 {
+
+namespace {
+
+const uint8_t kAudioPayloadType = 0;
+const uint8_t kCngPayloadType = 1;
+const uint8_t kAvtPayloadType = 2;
+
+const int kSamplingRateHz = 16000;
+const int kInitDelayMs = 200;
+const int kFrameSizeMs = 20;
+const uint32_t kTimestampStep = kFrameSizeMs * kSamplingRateHz / 1000;
+const int kLatePacketThreshold = 5;
+
+void InitRtpInfo(WebRtcRTPHeader* rtp_info) {
+  memset(rtp_info, 0, sizeof(*rtp_info));
+  rtp_info->header.markerBit = false;
+  rtp_info->header.payloadType = kAudioPayloadType;
+  rtp_info->header.sequenceNumber = 1234;
+  rtp_info->header.timestamp = 0xFFFFFFFD;  // Close to wrap around.
+  rtp_info->header.ssrc = 0x87654321;  // Arbitrary.
+  rtp_info->header.numCSRCs = 0;  // Arbitrary.
+  rtp_info->header.paddingLength = 0;
+  rtp_info->header.headerLength = sizeof(RTPHeader);
+  rtp_info->header.payload_type_frequency = kSamplingRateHz;
+  rtp_info->header.extension.absoluteSendTime = 0;
+  rtp_info->header.extension.transmissionTimeOffset = 0;
+  rtp_info->frameType = kAudioFrameSpeech;
+}
+
+void ForwardRtpHeader(int n,
+                      WebRtcRTPHeader* rtp_info,
+                      uint32_t* rtp_receive_timestamp) {
+  rtp_info->header.sequenceNumber += n;
+  rtp_info->header.timestamp += n * kTimestampStep;
+  *rtp_receive_timestamp += n * kTimestampStep;
+}
+
+void NextRtpHeader(WebRtcRTPHeader* rtp_info,
+                   uint32_t* rtp_receive_timestamp) {
+  ForwardRtpHeader(1, rtp_info, rtp_receive_timestamp);
+}
+
+}  // namespace
+
+class InitialDelayManagerTest : public ::testing::Test {
+ protected:
+  InitialDelayManagerTest()
+      : manager_(new InitialDelayManager(kInitDelayMs, kLatePacketThreshold)),
+        rtp_receive_timestamp_(1111) { }  // Arbitrary starting point.
+
+  virtual void SetUp() {
+    ASSERT_TRUE(manager_.get() != NULL);
+    InitRtpInfo(&rtp_info_);
+  }
+
+  void GetNextRtpHeader(WebRtcRTPHeader* rtp_info,
+                        uint32_t* rtp_receive_timestamp) const {
+    memcpy(rtp_info, &rtp_info_, sizeof(*rtp_info));
+    *rtp_receive_timestamp = rtp_receive_timestamp_;
+    NextRtpHeader(rtp_info, rtp_receive_timestamp);
+  }
+
+  rtc::scoped_ptr<InitialDelayManager> manager_;
+  WebRtcRTPHeader rtp_info_;
+  uint32_t rtp_receive_timestamp_;
+};
+
+TEST_F(InitialDelayManagerTest, Init) {
+  EXPECT_TRUE(manager_->buffering());
+  EXPECT_FALSE(manager_->PacketBuffered());
+  manager_->DisableBuffering();
+  EXPECT_FALSE(manager_->buffering());
+  InitialDelayManager::SyncStream sync_stream;
+
+  // Call before any packet inserted.
+  manager_->LatePackets(0x6789ABCD, &sync_stream);  // Arbitrary but large
+                                                    // receive timestamp.
+  EXPECT_EQ(0, sync_stream.num_sync_packets);
+
+  // Insert non-audio packets, a CNG and DTMF.
+  rtp_info_.header.payloadType = kCngPayloadType;
+  manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
+                                     InitialDelayManager::kCngPacket, false,
+                                     kSamplingRateHz, &sync_stream);
+  EXPECT_EQ(0, sync_stream.num_sync_packets);
+  ForwardRtpHeader(5, &rtp_info_, &rtp_receive_timestamp_);
+  rtp_info_.header.payloadType = kAvtPayloadType;
+  manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
+                                     InitialDelayManager::kAvtPacket, false,
+                                     kSamplingRateHz, &sync_stream);
+  // Gap in sequence numbers but no audio received, sync-stream should be empty.
+  EXPECT_EQ(0, sync_stream.num_sync_packets);
+  manager_->LatePackets(0x45678987, &sync_stream);  // Large arbitrary receive
+                                                    // timestamp.
+  // |manager_| has no estimate of timestamp-step and has not received any
+  // audio packet.
+  EXPECT_EQ(0, sync_stream.num_sync_packets);
+
+
+  NextRtpHeader(&rtp_info_, &rtp_receive_timestamp_);
+  rtp_info_.header.payloadType = kAudioPayloadType;
+  // First packet.
+  manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
+                                     InitialDelayManager::kAudioPacket, true,
+                                     kSamplingRateHz, &sync_stream);
+  EXPECT_EQ(0, sync_stream.num_sync_packets);
+
+  // Call LatePAcket() after only one packet inserted.
+  manager_->LatePackets(0x6789ABCD, &sync_stream);  // Arbitrary but large
+                                                    // receive timestamp.
+  EXPECT_EQ(0, sync_stream.num_sync_packets);
+
+  // Gap in timestamp, but this packet is also flagged as "new," therefore,
+  // expecting empty sync-stream.
+  ForwardRtpHeader(5, &rtp_info_, &rtp_receive_timestamp_);
+  manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
+                                     InitialDelayManager::kAudioPacket, true,
+                                     kSamplingRateHz, &sync_stream);
+}
+
+TEST_F(InitialDelayManagerTest, MissingPacket) {
+  InitialDelayManager::SyncStream sync_stream;
+  // First packet.
+  manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
+                                     InitialDelayManager::kAudioPacket, true,
+                                     kSamplingRateHz, &sync_stream);
+  ASSERT_EQ(0, sync_stream.num_sync_packets);
+
+  // Second packet.
+  NextRtpHeader(&rtp_info_, &rtp_receive_timestamp_);
+  manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
+                                     InitialDelayManager::kAudioPacket, false,
+                                     kSamplingRateHz, &sync_stream);
+  ASSERT_EQ(0, sync_stream.num_sync_packets);
+
+  // Third packet, missing packets start from here.
+  NextRtpHeader(&rtp_info_, &rtp_receive_timestamp_);
+
+  // First sync-packet in sync-stream is one after the above packet.
+  WebRtcRTPHeader expected_rtp_info;
+  uint32_t expected_receive_timestamp;
+  GetNextRtpHeader(&expected_rtp_info, &expected_receive_timestamp);
+
+  const int kNumMissingPackets = 10;
+  ForwardRtpHeader(kNumMissingPackets, &rtp_info_, &rtp_receive_timestamp_);
+  manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
+                                     InitialDelayManager::kAudioPacket, false,
+                                     kSamplingRateHz, &sync_stream);
+  EXPECT_EQ(kNumMissingPackets - 2, sync_stream.num_sync_packets);
+  EXPECT_EQ(0, memcmp(&expected_rtp_info, &sync_stream.rtp_info,
+                      sizeof(expected_rtp_info)));
+  EXPECT_EQ(kTimestampStep, sync_stream.timestamp_step);
+  EXPECT_EQ(expected_receive_timestamp, sync_stream.receive_timestamp);
+}
+
+// There hasn't been any consecutive packets to estimate timestamp-step.
+TEST_F(InitialDelayManagerTest, MissingPacketEstimateTimestamp) {
+  InitialDelayManager::SyncStream sync_stream;
+  // First packet.
+  manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
+                                     InitialDelayManager::kAudioPacket, true,
+                                     kSamplingRateHz, &sync_stream);
+  ASSERT_EQ(0, sync_stream.num_sync_packets);
+
+  // Second packet, missing packets start here.
+  NextRtpHeader(&rtp_info_, &rtp_receive_timestamp_);
+
+  // First sync-packet in sync-stream is one after the above.
+  WebRtcRTPHeader expected_rtp_info;
+  uint32_t expected_receive_timestamp;
+  GetNextRtpHeader(&expected_rtp_info, &expected_receive_timestamp);
+
+  const int kNumMissingPackets = 10;
+  ForwardRtpHeader(kNumMissingPackets, &rtp_info_, &rtp_receive_timestamp_);
+  manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
+                                     InitialDelayManager::kAudioPacket, false,
+                                     kSamplingRateHz, &sync_stream);
+  EXPECT_EQ(kNumMissingPackets - 2, sync_stream.num_sync_packets);
+  EXPECT_EQ(0, memcmp(&expected_rtp_info, &sync_stream.rtp_info,
+                      sizeof(expected_rtp_info)));
+}
+
+TEST_F(InitialDelayManagerTest, MissingPacketWithCng) {
+  InitialDelayManager::SyncStream sync_stream;
+
+  // First packet.
+  manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
+                                     InitialDelayManager::kAudioPacket, true,
+                                     kSamplingRateHz, &sync_stream);
+  ASSERT_EQ(0, sync_stream.num_sync_packets);
+
+  // Second packet as CNG.
+  NextRtpHeader(&rtp_info_, &rtp_receive_timestamp_);
+  rtp_info_.header.payloadType = kCngPayloadType;
+  manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
+                                     InitialDelayManager::kCngPacket, false,
+                                     kSamplingRateHz, &sync_stream);
+  ASSERT_EQ(0, sync_stream.num_sync_packets);
+
+  // Audio packet after CNG. Missing packets start from this packet.
+  rtp_info_.header.payloadType = kAudioPayloadType;
+  NextRtpHeader(&rtp_info_, &rtp_receive_timestamp_);
+
+  // Timestamps are increased higher than regular packet.
+  const uint32_t kCngTimestampStep = 5 * kTimestampStep;
+  rtp_info_.header.timestamp += kCngTimestampStep;
+  rtp_receive_timestamp_ += kCngTimestampStep;
+
+  // First sync-packet in sync-stream is the one after the above packet.
+  WebRtcRTPHeader expected_rtp_info;
+  uint32_t expected_receive_timestamp;
+  GetNextRtpHeader(&expected_rtp_info, &expected_receive_timestamp);
+
+  const int kNumMissingPackets = 10;
+  ForwardRtpHeader(kNumMissingPackets, &rtp_info_, &rtp_receive_timestamp_);
+  manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
+                                     InitialDelayManager::kAudioPacket, false,
+                                     kSamplingRateHz, &sync_stream);
+  EXPECT_EQ(kNumMissingPackets - 2, sync_stream.num_sync_packets);
+  EXPECT_EQ(0, memcmp(&expected_rtp_info, &sync_stream.rtp_info,
+                      sizeof(expected_rtp_info)));
+  EXPECT_EQ(kTimestampStep, sync_stream.timestamp_step);
+  EXPECT_EQ(expected_receive_timestamp, sync_stream.receive_timestamp);
+}
+
+TEST_F(InitialDelayManagerTest, LatePacket) {
+  InitialDelayManager::SyncStream sync_stream;
+  // First packet.
+  manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
+                                     InitialDelayManager::kAudioPacket, true,
+                                     kSamplingRateHz, &sync_stream);
+  ASSERT_EQ(0, sync_stream.num_sync_packets);
+
+  // Second packet.
+  NextRtpHeader(&rtp_info_, &rtp_receive_timestamp_);
+  manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
+                                     InitialDelayManager::kAudioPacket, false,
+                                     kSamplingRateHz, &sync_stream);
+  ASSERT_EQ(0, sync_stream.num_sync_packets);
+
+  // Timestamp increment for 10ms;
+  const uint32_t kTimestampStep10Ms = kSamplingRateHz / 100;
+
+  // 10 ms after the second packet is inserted.
+  uint32_t timestamp_now = rtp_receive_timestamp_ + kTimestampStep10Ms;
+
+  // Third packet, late packets start from this packet.
+  NextRtpHeader(&rtp_info_, &rtp_receive_timestamp_);
+
+  // First sync-packet in sync-stream, which is one after the above packet.
+  WebRtcRTPHeader expected_rtp_info;
+  uint32_t expected_receive_timestamp;
+  GetNextRtpHeader(&expected_rtp_info, &expected_receive_timestamp);
+
+  const int kLatePacketThreshold = 5;
+
+  int expected_num_late_packets = kLatePacketThreshold - 1;
+  for (int k = 0; k < 2; ++k) {
+    for (int n = 1; n < kLatePacketThreshold * kFrameSizeMs / 10; ++n) {
+      manager_->LatePackets(timestamp_now, &sync_stream);
+      EXPECT_EQ(0, sync_stream.num_sync_packets) <<
+          "try " << k << " loop number " << n;
+      timestamp_now += kTimestampStep10Ms;
+    }
+    manager_->LatePackets(timestamp_now, &sync_stream);
+
+    EXPECT_EQ(expected_num_late_packets, sync_stream.num_sync_packets) <<
+        "try " << k;
+    EXPECT_EQ(kTimestampStep, sync_stream.timestamp_step) <<
+        "try " << k;
+    EXPECT_EQ(expected_receive_timestamp, sync_stream.receive_timestamp) <<
+        "try " << k;
+    EXPECT_EQ(0, memcmp(&expected_rtp_info, &sync_stream.rtp_info,
+                        sizeof(expected_rtp_info)));
+
+    timestamp_now += kTimestampStep10Ms;
+
+    // |manger_| assumes the |sync_stream| obtained by LatePacket() is fully
+    // injected. The last injected packet is sync-packet, therefore, there will
+    // not be any gap between sync stream of this and the next iteration.
+    ForwardRtpHeader(sync_stream.num_sync_packets, &expected_rtp_info,
+        &expected_receive_timestamp);
+    expected_num_late_packets = kLatePacketThreshold;
+  }
+
+  // Test "no-gap" for missing packet after late packet.
+  // |expected_rtp_info| is the expected sync-packet if any packet is missing.
+  memcpy(&rtp_info_, &expected_rtp_info, sizeof(rtp_info_));
+  rtp_receive_timestamp_ = expected_receive_timestamp;
+
+  int kNumMissingPackets = 3;  // Arbitrary.
+  ForwardRtpHeader(kNumMissingPackets, &rtp_info_, &rtp_receive_timestamp_);
+  manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
+                                     InitialDelayManager::kAudioPacket, false,
+                                     kSamplingRateHz, &sync_stream);
+
+  // Note that there is one packet gap between the last sync-packet and the
+  // latest inserted packet.
+  EXPECT_EQ(kNumMissingPackets - 1, sync_stream.num_sync_packets);
+  EXPECT_EQ(kTimestampStep, sync_stream.timestamp_step);
+  EXPECT_EQ(expected_receive_timestamp, sync_stream.receive_timestamp);
+  EXPECT_EQ(0, memcmp(&expected_rtp_info, &sync_stream.rtp_info,
+                      sizeof(expected_rtp_info)));
+}
+
+TEST_F(InitialDelayManagerTest, NoLatePacketAfterCng) {
+  InitialDelayManager::SyncStream sync_stream;
+
+  // First packet.
+  manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
+                                     InitialDelayManager::kAudioPacket, true,
+                                     kSamplingRateHz, &sync_stream);
+  ASSERT_EQ(0, sync_stream.num_sync_packets);
+
+  // Second packet as CNG.
+  NextRtpHeader(&rtp_info_, &rtp_receive_timestamp_);
+  rtp_info_.header.payloadType = kCngPayloadType;
+  manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
+                                     InitialDelayManager::kCngPacket, false,
+                                     kSamplingRateHz, &sync_stream);
+  ASSERT_EQ(0, sync_stream.num_sync_packets);
+
+  // Forward the time more then |kLatePacketThreshold| packets.
+  uint32_t timestamp_now = rtp_receive_timestamp_ + kTimestampStep * (3 +
+      kLatePacketThreshold);
+
+  manager_->LatePackets(timestamp_now, &sync_stream);
+  EXPECT_EQ(0, sync_stream.num_sync_packets);
+}
+
+TEST_F(InitialDelayManagerTest, BufferingAudio) {
+  InitialDelayManager::SyncStream sync_stream;
+
+  // Very first packet is not counted in calculation of buffered audio.
+  for (int n = 0; n < kInitDelayMs / kFrameSizeMs; ++n) {
+    manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
+                                       InitialDelayManager::kAudioPacket,
+                                       n == 0, kSamplingRateHz, &sync_stream);
+    EXPECT_EQ(0, sync_stream.num_sync_packets);
+    EXPECT_TRUE(manager_->buffering());
+    const uint32_t expected_playout_timestamp = rtp_info_.header.timestamp -
+        kInitDelayMs * kSamplingRateHz / 1000;
+    uint32_t actual_playout_timestamp = 0;
+    EXPECT_TRUE(manager_->GetPlayoutTimestamp(&actual_playout_timestamp));
+    EXPECT_EQ(expected_playout_timestamp, actual_playout_timestamp);
+    NextRtpHeader(&rtp_info_, &rtp_receive_timestamp_);
+  }
+
+  manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
+                                     InitialDelayManager::kAudioPacket,
+                                     false, kSamplingRateHz, &sync_stream);
+  EXPECT_EQ(0, sync_stream.num_sync_packets);
+  EXPECT_FALSE(manager_->buffering());
+}
+
+}  // namespace acm2
+
+}  // namespace webrtc
diff --git a/webrtc/modules/audio_coding/acm2/rent_a_codec.cc b/webrtc/modules/audio_coding/acm2/rent_a_codec.cc
new file mode 100644
index 0000000..4800249
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/rent_a_codec.cc
@@ -0,0 +1,306 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_coding/acm2/rent_a_codec.h"
+
+#include "webrtc/base/logging.h"
+#include "webrtc/modules/audio_coding/codecs/cng/audio_encoder_cng.h"
+#include "webrtc/modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
+#ifdef WEBRTC_CODEC_G722
+#include "webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.h"
+#endif
+#ifdef WEBRTC_CODEC_ILBC
+#include "webrtc/modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h"
+#endif
+#ifdef WEBRTC_CODEC_ISACFX
+#include "webrtc/modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h"
+#include "webrtc/modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h"
+#endif
+#ifdef WEBRTC_CODEC_ISAC
+#include "webrtc/modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h"
+#include "webrtc/modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+#include "webrtc/modules/audio_coding/codecs/opus/audio_encoder_opus.h"
+#endif
+#include "webrtc/modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h"
+#ifdef WEBRTC_CODEC_RED
+#include "webrtc/modules/audio_coding/codecs/red/audio_encoder_copy_red.h"
+#endif
+#include "webrtc/modules/audio_coding/acm2/acm_codec_database.h"
+#include "webrtc/modules/audio_coding/acm2/acm_common_defs.h"
+
+namespace webrtc {
+namespace acm2 {
+
+rtc::Optional<RentACodec::CodecId> RentACodec::CodecIdByParams(
+    const char* payload_name,
+    int sampling_freq_hz,
+    int channels) {
+  return CodecIdFromIndex(
+      ACMCodecDB::CodecId(payload_name, sampling_freq_hz, channels));
+}
+
+rtc::Optional<CodecInst> RentACodec::CodecInstById(CodecId codec_id) {
+  rtc::Optional<int> mi = CodecIndexFromId(codec_id);
+  return mi ? rtc::Optional<CodecInst>(Database()[*mi])
+            : rtc::Optional<CodecInst>();
+}
+
+rtc::Optional<RentACodec::CodecId> RentACodec::CodecIdByInst(
+    const CodecInst& codec_inst) {
+  return CodecIdFromIndex(ACMCodecDB::CodecNumber(codec_inst));
+}
+
+rtc::Optional<CodecInst> RentACodec::CodecInstByParams(const char* payload_name,
+                                                       int sampling_freq_hz,
+                                                       int channels) {
+  rtc::Optional<CodecId> codec_id =
+      CodecIdByParams(payload_name, sampling_freq_hz, channels);
+  if (!codec_id)
+    return rtc::Optional<CodecInst>();
+  rtc::Optional<CodecInst> ci = CodecInstById(*codec_id);
+  RTC_DCHECK(ci);
+
+  // Keep the number of channels from the function call. For most codecs it
+  // will be the same value as in default codec settings, but not for all.
+  ci->channels = channels;
+
+  return ci;
+}
+
+bool RentACodec::IsCodecValid(const CodecInst& codec_inst) {
+  return ACMCodecDB::CodecNumber(codec_inst) >= 0;
+}
+
+rtc::Optional<bool> RentACodec::IsSupportedNumChannels(CodecId codec_id,
+                                                       int num_channels) {
+  auto i = CodecIndexFromId(codec_id);
+  return i ? rtc::Optional<bool>(
+                 ACMCodecDB::codec_settings_[*i].channel_support >=
+                 num_channels)
+           : rtc::Optional<bool>();
+}
+
+rtc::ArrayView<const CodecInst> RentACodec::Database() {
+  return rtc::ArrayView<const CodecInst>(ACMCodecDB::database_,
+                                         NumberOfCodecs());
+}
+
+rtc::Optional<NetEqDecoder> RentACodec::NetEqDecoderFromCodecId(
+    CodecId codec_id,
+    int num_channels) {
+  rtc::Optional<int> i = CodecIndexFromId(codec_id);
+  if (!i)
+    return rtc::Optional<NetEqDecoder>();
+  const NetEqDecoder ned = ACMCodecDB::neteq_decoders_[*i];
+  return rtc::Optional<NetEqDecoder>(
+      (ned == NetEqDecoder::kDecoderOpus && num_channels == 2)
+          ? NetEqDecoder::kDecoderOpus_2ch
+          : ned);
+}
+
+RentACodec::RegistrationResult RentACodec::RegisterCngPayloadType(
+    std::map<int, int>* pt_map,
+    const CodecInst& codec_inst) {
+  if (STR_CASE_CMP(codec_inst.plname, "CN") != 0)
+    return RegistrationResult::kSkip;
+  switch (codec_inst.plfreq) {
+    case 8000:
+    case 16000:
+    case 32000:
+    case 48000:
+      (*pt_map)[codec_inst.plfreq] = codec_inst.pltype;
+      return RegistrationResult::kOk;
+    default:
+      return RegistrationResult::kBadFreq;
+  }
+}
+
+RentACodec::RegistrationResult RentACodec::RegisterRedPayloadType(
+    std::map<int, int>* pt_map,
+    const CodecInst& codec_inst) {
+  if (STR_CASE_CMP(codec_inst.plname, "RED") != 0)
+    return RegistrationResult::kSkip;
+  switch (codec_inst.plfreq) {
+    case 8000:
+      (*pt_map)[codec_inst.plfreq] = codec_inst.pltype;
+      return RegistrationResult::kOk;
+    default:
+      return RegistrationResult::kBadFreq;
+  }
+}
+
+namespace {
+
+// Returns a new speech encoder, or null on error.
+// TODO(kwiberg): Don't handle errors here (bug 5033)
+rtc::scoped_ptr<AudioEncoder> CreateEncoder(
+    const CodecInst& speech_inst,
+    LockedIsacBandwidthInfo* bwinfo) {
+#if defined(WEBRTC_CODEC_ISACFX)
+  if (STR_CASE_CMP(speech_inst.plname, "isac") == 0)
+    return rtc_make_scoped_ptr(new AudioEncoderIsacFix(speech_inst, bwinfo));
+#endif
+#if defined(WEBRTC_CODEC_ISAC)
+  if (STR_CASE_CMP(speech_inst.plname, "isac") == 0)
+    return rtc_make_scoped_ptr(new AudioEncoderIsac(speech_inst, bwinfo));
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+  if (STR_CASE_CMP(speech_inst.plname, "opus") == 0)
+    return rtc_make_scoped_ptr(new AudioEncoderOpus(speech_inst));
+#endif
+  if (STR_CASE_CMP(speech_inst.plname, "pcmu") == 0)
+    return rtc_make_scoped_ptr(new AudioEncoderPcmU(speech_inst));
+  if (STR_CASE_CMP(speech_inst.plname, "pcma") == 0)
+    return rtc_make_scoped_ptr(new AudioEncoderPcmA(speech_inst));
+  if (STR_CASE_CMP(speech_inst.plname, "l16") == 0)
+    return rtc_make_scoped_ptr(new AudioEncoderPcm16B(speech_inst));
+#ifdef WEBRTC_CODEC_ILBC
+  if (STR_CASE_CMP(speech_inst.plname, "ilbc") == 0)
+    return rtc_make_scoped_ptr(new AudioEncoderIlbc(speech_inst));
+#endif
+#ifdef WEBRTC_CODEC_G722
+  if (STR_CASE_CMP(speech_inst.plname, "g722") == 0)
+    return rtc_make_scoped_ptr(new AudioEncoderG722(speech_inst));
+#endif
+  LOG_F(LS_ERROR) << "Could not create encoder of type " << speech_inst.plname;
+  return rtc::scoped_ptr<AudioEncoder>();
+}
+
+rtc::scoped_ptr<AudioEncoder> CreateRedEncoder(AudioEncoder* encoder,
+                                               int red_payload_type) {
+#ifdef WEBRTC_CODEC_RED
+  AudioEncoderCopyRed::Config config;
+  config.payload_type = red_payload_type;
+  config.speech_encoder = encoder;
+  return rtc::scoped_ptr<AudioEncoder>(new AudioEncoderCopyRed(config));
+#else
+  return rtc::scoped_ptr<AudioEncoder>();
+#endif
+}
+
+rtc::scoped_ptr<AudioEncoder> CreateCngEncoder(AudioEncoder* encoder,
+                                               int payload_type,
+                                               ACMVADMode vad_mode) {
+  AudioEncoderCng::Config config;
+  config.num_channels = encoder->NumChannels();
+  config.payload_type = payload_type;
+  config.speech_encoder = encoder;
+  switch (vad_mode) {
+    case VADNormal:
+      config.vad_mode = Vad::kVadNormal;
+      break;
+    case VADLowBitrate:
+      config.vad_mode = Vad::kVadLowBitrate;
+      break;
+    case VADAggr:
+      config.vad_mode = Vad::kVadAggressive;
+      break;
+    case VADVeryAggr:
+      config.vad_mode = Vad::kVadVeryAggressive;
+      break;
+    default:
+      FATAL();
+  }
+  return rtc::scoped_ptr<AudioEncoder>(new AudioEncoderCng(config));
+}
+
+rtc::scoped_ptr<AudioDecoder> CreateIsacDecoder(
+    LockedIsacBandwidthInfo* bwinfo) {
+#if defined(WEBRTC_CODEC_ISACFX)
+  return rtc_make_scoped_ptr(new AudioDecoderIsacFix(bwinfo));
+#elif defined(WEBRTC_CODEC_ISAC)
+  return rtc_make_scoped_ptr(new AudioDecoderIsac(bwinfo));
+#else
+  FATAL() << "iSAC is not supported.";
+  return rtc::scoped_ptr<AudioDecoder>();
+#endif
+}
+
+}  // namespace
+
+RentACodec::RentACodec() = default;
+RentACodec::~RentACodec() = default;
+
+AudioEncoder* RentACodec::RentEncoder(const CodecInst& codec_inst) {
+  rtc::scoped_ptr<AudioEncoder> enc =
+      CreateEncoder(codec_inst, &isac_bandwidth_info_);
+  if (!enc)
+    return nullptr;
+  speech_encoder_ = enc.Pass();
+  return speech_encoder_.get();
+}
+
+RentACodec::StackParameters::StackParameters() {
+  // Register the default payload types for RED and CNG.
+  for (const CodecInst& ci : RentACodec::Database()) {
+    RentACodec::RegisterCngPayloadType(&cng_payload_types, ci);
+    RentACodec::RegisterRedPayloadType(&red_payload_types, ci);
+  }
+}
+
+RentACodec::StackParameters::~StackParameters() = default;
+
+AudioEncoder* RentACodec::RentEncoderStack(AudioEncoder* speech_encoder,
+                                           StackParameters* param) {
+  RTC_DCHECK(speech_encoder);
+
+  if (param->use_codec_fec) {
+    // Switch FEC on. On failure, remember that FEC is off.
+    if (!speech_encoder->SetFec(true))
+      param->use_codec_fec = false;
+  } else {
+    // Switch FEC off. This shouldn't fail.
+    const bool success = speech_encoder->SetFec(false);
+    RTC_DCHECK(success);
+  }
+
+  auto pt = [&speech_encoder](const std::map<int, int>& m) {
+    auto it = m.find(speech_encoder->SampleRateHz());
+    return it == m.end() ? rtc::Optional<int>()
+                         : rtc::Optional<int>(it->second);
+  };
+  auto cng_pt = pt(param->cng_payload_types);
+  param->use_cng =
+      param->use_cng && cng_pt && speech_encoder->NumChannels() == 1;
+  auto red_pt = pt(param->red_payload_types);
+  param->use_red = param->use_red && red_pt;
+
+  if (param->use_cng || param->use_red) {
+    // The RED and CNG encoders need to be in sync with the speech encoder, so
+    // reset the latter to ensure its buffer is empty.
+    speech_encoder->Reset();
+  }
+  encoder_stack_ = speech_encoder;
+  if (param->use_red) {
+    red_encoder_ = CreateRedEncoder(encoder_stack_, *red_pt);
+    if (red_encoder_)
+      encoder_stack_ = red_encoder_.get();
+  } else {
+    red_encoder_.reset();
+  }
+  if (param->use_cng) {
+    cng_encoder_ = CreateCngEncoder(encoder_stack_, *cng_pt, param->vad_mode);
+    encoder_stack_ = cng_encoder_.get();
+  } else {
+    cng_encoder_.reset();
+  }
+  return encoder_stack_;
+}
+
+AudioDecoder* RentACodec::RentIsacDecoder() {
+  if (!isac_decoder_)
+    isac_decoder_ = CreateIsacDecoder(&isac_bandwidth_info_);
+  return isac_decoder_.get();
+}
+
+}  // namespace acm2
+}  // namespace webrtc
diff --git a/webrtc/modules/audio_coding/acm2/rent_a_codec.h b/webrtc/modules/audio_coding/acm2/rent_a_codec.h
new file mode 100644
index 0000000..7035104
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/rent_a_codec.h
@@ -0,0 +1,249 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_ACM2_RENT_A_CODEC_H_
+#define WEBRTC_MODULES_AUDIO_CODING_ACM2_RENT_A_CODEC_H_
+
+#include <stddef.h>
+#include <map>
+
+#include "webrtc/base/array_view.h"
+#include "webrtc/base/constructormagic.h"
+#include "webrtc/base/optional.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/audio_coding/codecs/audio_decoder.h"
+#include "webrtc/modules/audio_coding/codecs/audio_encoder.h"
+#include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h"
+#include "webrtc/typedefs.h"
+
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+#include "webrtc/modules/audio_coding/codecs/isac/locked_bandwidth_info.h"
+#else
+// Dummy implementation, for when we don't have iSAC.
+namespace webrtc {
+class LockedIsacBandwidthInfo {};
+}
+#endif
+
+namespace webrtc {
+
+struct CodecInst;
+
+namespace acm2 {
+
+class RentACodec {
+ public:
+  enum class CodecId {
+#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
+    kISAC,
+#endif
+#ifdef WEBRTC_CODEC_ISAC
+    kISACSWB,
+#endif
+    // Mono
+    kPCM16B,
+    kPCM16Bwb,
+    kPCM16Bswb32kHz,
+    // Stereo
+    kPCM16B_2ch,
+    kPCM16Bwb_2ch,
+    kPCM16Bswb32kHz_2ch,
+    // Mono
+    kPCMU,
+    kPCMA,
+    // Stereo
+    kPCMU_2ch,
+    kPCMA_2ch,
+#ifdef WEBRTC_CODEC_ILBC
+    kILBC,
+#endif
+#ifdef WEBRTC_CODEC_G722
+    kG722,      // Mono
+    kG722_2ch,  // Stereo
+#endif
+#ifdef WEBRTC_CODEC_OPUS
+    kOpus,  // Mono and stereo
+#endif
+    kCNNB,
+    kCNWB,
+    kCNSWB,
+#ifdef ENABLE_48000_HZ
+    kCNFB,
+#endif
+    kAVT,
+#ifdef WEBRTC_CODEC_RED
+    kRED,
+#endif
+    kNumCodecs,  // Implementation detail. Don't use.
+
+// Set unsupported codecs to -1.
+#if !defined(WEBRTC_CODEC_ISAC) && !defined(WEBRTC_CODEC_ISACFX)
+    kISAC = -1,
+#endif
+#ifndef WEBRTC_CODEC_ISAC
+    kISACSWB = -1,
+#endif
+    // 48 kHz not supported, always set to -1.
+    kPCM16Bswb48kHz = -1,
+#ifndef WEBRTC_CODEC_ILBC
+    kILBC = -1,
+#endif
+#ifndef WEBRTC_CODEC_G722
+    kG722 = -1,      // Mono
+    kG722_2ch = -1,  // Stereo
+#endif
+#ifndef WEBRTC_CODEC_OPUS
+    kOpus = -1,  // Mono and stereo
+#endif
+#ifndef WEBRTC_CODEC_RED
+    kRED = -1,
+#endif
+#ifndef ENABLE_48000_HZ
+    kCNFB = -1,
+#endif
+
+    kNone = -1
+  };
+
+  enum class NetEqDecoder {
+    kDecoderPCMu,
+    kDecoderPCMa,
+    kDecoderPCMu_2ch,
+    kDecoderPCMa_2ch,
+    kDecoderILBC,
+    kDecoderISAC,
+    kDecoderISACswb,
+    kDecoderPCM16B,
+    kDecoderPCM16Bwb,
+    kDecoderPCM16Bswb32kHz,
+    kDecoderPCM16Bswb48kHz,
+    kDecoderPCM16B_2ch,
+    kDecoderPCM16Bwb_2ch,
+    kDecoderPCM16Bswb32kHz_2ch,
+    kDecoderPCM16Bswb48kHz_2ch,
+    kDecoderPCM16B_5ch,
+    kDecoderG722,
+    kDecoderG722_2ch,
+    kDecoderRED,
+    kDecoderAVT,
+    kDecoderCNGnb,
+    kDecoderCNGwb,
+    kDecoderCNGswb32kHz,
+    kDecoderCNGswb48kHz,
+    kDecoderArbitrary,
+    kDecoderOpus,
+    kDecoderOpus_2ch,
+  };
+
+  static inline size_t NumberOfCodecs() {
+    return static_cast<size_t>(CodecId::kNumCodecs);
+  }
+
+  static inline rtc::Optional<int> CodecIndexFromId(CodecId codec_id) {
+    const int i = static_cast<int>(codec_id);
+    return i >= 0 && i < static_cast<int>(NumberOfCodecs())
+               ? rtc::Optional<int>(i)
+               : rtc::Optional<int>();
+  }
+
+  static inline rtc::Optional<CodecId> CodecIdFromIndex(int codec_index) {
+    return static_cast<size_t>(codec_index) < NumberOfCodecs()
+               ? rtc::Optional<RentACodec::CodecId>(
+                     static_cast<RentACodec::CodecId>(codec_index))
+               : rtc::Optional<RentACodec::CodecId>();
+  }
+
+  static rtc::Optional<CodecId> CodecIdByParams(const char* payload_name,
+                                                int sampling_freq_hz,
+                                                int channels);
+  static rtc::Optional<CodecInst> CodecInstById(CodecId codec_id);
+  static rtc::Optional<CodecId> CodecIdByInst(const CodecInst& codec_inst);
+  static rtc::Optional<CodecInst> CodecInstByParams(const char* payload_name,
+                                                    int sampling_freq_hz,
+                                                    int channels);
+  static bool IsCodecValid(const CodecInst& codec_inst);
+
+  static inline bool IsPayloadTypeValid(int payload_type) {
+    return payload_type >= 0 && payload_type <= 127;
+  }
+
+  static rtc::ArrayView<const CodecInst> Database();
+
+  static rtc::Optional<bool> IsSupportedNumChannels(CodecId codec_id,
+                                                    int num_channels);
+
+  static rtc::Optional<NetEqDecoder> NetEqDecoderFromCodecId(CodecId codec_id,
+                                                             int num_channels);
+
+  // Parse codec_inst and extract payload types. If the given CodecInst was for
+  // the wrong sort of codec, return kSkip; otherwise, if the rate was illegal,
+  // return kBadFreq; otherwise, update the given RTP timestamp rate (Hz) ->
+  // payload type map and return kOk.
+  enum class RegistrationResult { kOk, kSkip, kBadFreq };
+  static RegistrationResult RegisterCngPayloadType(std::map<int, int>* pt_map,
+                                                   const CodecInst& codec_inst);
+  static RegistrationResult RegisterRedPayloadType(std::map<int, int>* pt_map,
+                                                   const CodecInst& codec_inst);
+
+  RentACodec();
+  ~RentACodec();
+
+  // Creates and returns an audio encoder built to the given specification.
+  // Returns null in case of error. The returned encoder is live until the next
+  // successful call to this function, or until the Rent-A-Codec is destroyed.
+  AudioEncoder* RentEncoder(const CodecInst& codec_inst);
+
+  struct StackParameters {
+    StackParameters();
+    ~StackParameters();
+
+    bool use_codec_fec = false;
+    bool use_red = false;
+    bool use_cng = false;
+    ACMVADMode vad_mode = VADNormal;
+
+    // Maps from RTP timestamp rate (in Hz) to payload type.
+    std::map<int, int> cng_payload_types;
+    std::map<int, int> red_payload_types;
+  };
+
+  // Creates and returns an audio encoder stack constructed to the given
+  // specification. If the specification isn't compatible with the encoder, it
+  // will be changed to match (things will be switched off). The returned
+  // encoder is live until the next successful call to this function, or until
+  // the Rent-A-Codec is destroyed.
+  AudioEncoder* RentEncoderStack(AudioEncoder* speech_encoder,
+                                 StackParameters* param);
+
+  // Get the last return values of RentEncoder and RentEncoderStack, or null if
+  // they haven't been called.
+  AudioEncoder* GetEncoder() const { return speech_encoder_.get(); }
+  AudioEncoder* GetEncoderStack() const { return encoder_stack_; }
+
+  // Creates and returns an iSAC decoder, which will remain live until the
+  // Rent-A-Codec is destroyed. Subsequent calls will simply return the same
+  // object.
+  AudioDecoder* RentIsacDecoder();
+
+ private:
+  rtc::scoped_ptr<AudioEncoder> speech_encoder_;
+  rtc::scoped_ptr<AudioEncoder> cng_encoder_;
+  rtc::scoped_ptr<AudioEncoder> red_encoder_;
+  rtc::scoped_ptr<AudioDecoder> isac_decoder_;
+  AudioEncoder* encoder_stack_ = nullptr;
+  LockedIsacBandwidthInfo isac_bandwidth_info_;
+
+  RTC_DISALLOW_COPY_AND_ASSIGN(RentACodec);
+};
+
+}  // namespace acm2
+}  // namespace webrtc
+
+#endif  // WEBRTC_MODULES_AUDIO_CODING_ACM2_RENT_A_CODEC_H_
diff --git a/webrtc/modules/audio_coding/acm2/rent_a_codec_unittest.cc b/webrtc/modules/audio_coding/acm2/rent_a_codec_unittest.cc
new file mode 100644
index 0000000..11c4bcb
--- /dev/null
+++ b/webrtc/modules/audio_coding/acm2/rent_a_codec_unittest.cc
@@ -0,0 +1,209 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/arraysize.h"
+#include "webrtc/modules/audio_coding/codecs/mock/mock_audio_encoder.h"
+#include "webrtc/modules/audio_coding/acm2/rent_a_codec.h"
+
+namespace webrtc {
+namespace acm2 {
+
+using ::testing::Return;
+
+namespace {
+const int kDataLengthSamples = 80;
+const int kPacketSizeSamples = 2 * kDataLengthSamples;
+const int16_t kZeroData[kDataLengthSamples] = {0};
+const CodecInst kDefaultCodecInst = {0, "pcmu", 8000, kPacketSizeSamples,
+                                     1, 64000};
+const int kCngPt = 13;
+}  // namespace
+
+class RentACodecTestF : public ::testing::Test {
+ protected:
+  void CreateCodec() {
+    speech_encoder_ = rent_a_codec_.RentEncoder(kDefaultCodecInst);
+    ASSERT_TRUE(speech_encoder_);
+    RentACodec::StackParameters param;
+    param.use_cng = true;
+    encoder_ = rent_a_codec_.RentEncoderStack(speech_encoder_, &param);
+  }
+
+  void EncodeAndVerify(size_t expected_out_length,
+                       uint32_t expected_timestamp,
+                       int expected_payload_type,
+                       int expected_send_even_if_empty) {
+    uint8_t out[kPacketSizeSamples];
+    AudioEncoder::EncodedInfo encoded_info;
+    encoded_info =
+        encoder_->Encode(timestamp_, kZeroData, kPacketSizeSamples, out);
+    timestamp_ += kDataLengthSamples;
+    EXPECT_TRUE(encoded_info.redundant.empty());
+    EXPECT_EQ(expected_out_length, encoded_info.encoded_bytes);
+    EXPECT_EQ(expected_timestamp, encoded_info.encoded_timestamp);
+    if (expected_payload_type >= 0)
+      EXPECT_EQ(expected_payload_type, encoded_info.payload_type);
+    if (expected_send_even_if_empty >= 0)
+      EXPECT_EQ(static_cast<bool>(expected_send_even_if_empty),
+                encoded_info.send_even_if_empty);
+  }
+
+  RentACodec rent_a_codec_;
+  AudioEncoder* speech_encoder_ = nullptr;
+  AudioEncoder* encoder_ = nullptr;
+  uint32_t timestamp_ = 0;
+};
+
+// This test verifies that CNG frames are delivered as expected. Since the frame
+// size is set to 20 ms, we expect the first encode call to produce no output
+// (which is signaled as 0 bytes output of type kNoEncoding). The next encode
+// call should produce one SID frame of 9 bytes. The third call should not
+// result in any output (just like the first one). The fourth and final encode
+// call should produce an "empty frame", which is like no output, but with
+// AudioEncoder::EncodedInfo::send_even_if_empty set to true. (The reason to
+// produce an empty frame is to drive sending of DTMF packets in the RTP/RTCP
+// module.)
+TEST_F(RentACodecTestF, VerifyCngFrames) {
+  CreateCodec();
+  uint32_t expected_timestamp = timestamp_;
+  // Verify no frame.
+  {
+    SCOPED_TRACE("First encoding");
+    EncodeAndVerify(0, expected_timestamp, -1, -1);
+  }
+
+  // Verify SID frame delivered.
+  {
+    SCOPED_TRACE("Second encoding");
+    EncodeAndVerify(9, expected_timestamp, kCngPt, 1);
+  }
+
+  // Verify no frame.
+  {
+    SCOPED_TRACE("Third encoding");
+    EncodeAndVerify(0, expected_timestamp, -1, -1);
+  }
+
+  // Verify NoEncoding.
+  expected_timestamp += 2 * kDataLengthSamples;
+  {
+    SCOPED_TRACE("Fourth encoding");
+    EncodeAndVerify(0, expected_timestamp, kCngPt, 1);
+  }
+}
+
+TEST(RentACodecTest, ExternalEncoder) {
+  const int kSampleRateHz = 8000;
+  MockAudioEncoder external_encoder;
+  EXPECT_CALL(external_encoder, SampleRateHz())
+      .WillRepeatedly(Return(kSampleRateHz));
+  EXPECT_CALL(external_encoder, NumChannels()).WillRepeatedly(Return(1));
+  EXPECT_CALL(external_encoder, SetFec(false)).WillRepeatedly(Return(true));
+
+  RentACodec rac;
+  RentACodec::StackParameters param;
+  EXPECT_EQ(&external_encoder, rac.RentEncoderStack(&external_encoder, &param));
+  const int kPacketSizeSamples = kSampleRateHz / 100;
+  int16_t audio[kPacketSizeSamples] = {0};
+  uint8_t encoded[kPacketSizeSamples];
+  AudioEncoder::EncodedInfo info;
+
+  {
+    ::testing::InSequence s;
+    info.encoded_timestamp = 0;
+    EXPECT_CALL(external_encoder,
+                EncodeInternal(0, rtc::ArrayView<const int16_t>(audio),
+                               arraysize(encoded), encoded))
+        .WillOnce(Return(info));
+    EXPECT_CALL(external_encoder, Mark("A"));
+    EXPECT_CALL(external_encoder, Mark("B"));
+    info.encoded_timestamp = 2;
+    EXPECT_CALL(external_encoder,
+                EncodeInternal(2, rtc::ArrayView<const int16_t>(audio),
+                               arraysize(encoded), encoded))
+        .WillOnce(Return(info));
+    EXPECT_CALL(external_encoder, Die());
+  }
+
+  info = rac.GetEncoderStack()->Encode(0, audio, arraysize(encoded), encoded);
+  EXPECT_EQ(0u, info.encoded_timestamp);
+  external_encoder.Mark("A");
+
+  // Change to internal encoder.
+  CodecInst codec_inst = kDefaultCodecInst;
+  codec_inst.pacsize = kPacketSizeSamples;
+  AudioEncoder* enc = rac.RentEncoder(codec_inst);
+  ASSERT_TRUE(enc);
+  EXPECT_EQ(enc, rac.RentEncoderStack(enc, &param));
+
+  // Don't expect any more calls to the external encoder.
+  info = rac.GetEncoderStack()->Encode(1, audio, arraysize(encoded), encoded);
+  external_encoder.Mark("B");
+
+  // Change back to external encoder again.
+  EXPECT_EQ(&external_encoder, rac.RentEncoderStack(&external_encoder, &param));
+  info = rac.GetEncoderStack()->Encode(2, audio, arraysize(encoded), encoded);
+  EXPECT_EQ(2u, info.encoded_timestamp);
+}
+
+// Verify that the speech encoder's Reset method is called when CNG or RED
+// (or both) are switched on, but not when they're switched off.
+void TestCngAndRedResetSpeechEncoder(bool use_cng, bool use_red) {
+  MockAudioEncoder speech_encoder;
+  EXPECT_CALL(speech_encoder, NumChannels()).WillRepeatedly(Return(1));
+  EXPECT_CALL(speech_encoder, Max10MsFramesInAPacket())
+      .WillRepeatedly(Return(2));
+  EXPECT_CALL(speech_encoder, SampleRateHz()).WillRepeatedly(Return(8000));
+  EXPECT_CALL(speech_encoder, SetFec(false)).WillRepeatedly(Return(true));
+  {
+    ::testing::InSequence s;
+    EXPECT_CALL(speech_encoder, Mark("disabled"));
+    EXPECT_CALL(speech_encoder, Mark("enabled"));
+    if (use_cng || use_red)
+      EXPECT_CALL(speech_encoder, Reset());
+    EXPECT_CALL(speech_encoder, Die());
+  }
+
+  RentACodec::StackParameters param1, param2;
+  param2.use_cng = use_cng;
+  param2.use_red = use_red;
+  speech_encoder.Mark("disabled");
+  RentACodec rac;
+  rac.RentEncoderStack(&speech_encoder, &param1);
+  speech_encoder.Mark("enabled");
+  rac.RentEncoderStack(&speech_encoder, &param2);
+}
+
+TEST(RentACodecTest, CngResetsSpeechEncoder) {
+  TestCngAndRedResetSpeechEncoder(true, false);
+}
+
+TEST(RentACodecTest, RedResetsSpeechEncoder) {
+  TestCngAndRedResetSpeechEncoder(false, true);
+}
+
+TEST(RentACodecTest, CngAndRedResetsSpeechEncoder) {
+  TestCngAndRedResetSpeechEncoder(true, true);
+}
+
+TEST(RentACodecTest, NoCngAndRedNoSpeechEncoderReset) {
+  TestCngAndRedResetSpeechEncoder(false, false);
+}
+
+TEST(RentACodecTest, RentEncoderError) {
+  const CodecInst codec_inst = {
+      0, "Robert'); DROP TABLE Students;", 8000, 160, 1, 64000};
+  RentACodec rent_a_codec;
+  EXPECT_FALSE(rent_a_codec.RentEncoder(codec_inst));
+}
+
+}  // namespace acm2
+}  // namespace webrtc