git-svn-id: http://webrtc.googlecode.com/svn/trunk@10 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/voice_engine/OWNERS b/voice_engine/OWNERS
new file mode 100644
index 0000000..1d503d2
--- /dev/null
+++ b/voice_engine/OWNERS
@@ -0,0 +1,4 @@
+grunell@google.com
+henrika@google.com
+niklase@google.com
+xians@google.com
diff --git a/voice_engine/main/interface/voe_audio_processing.h b/voice_engine/main/interface/voe_audio_processing.h
new file mode 100644
index 0000000..799d4f9
--- /dev/null
+++ b/voice_engine/main/interface/voe_audio_processing.h
@@ -0,0 +1,192 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+//  - Noise Suppression (NS).
+//  - Automatic Gain Control (AGC).
+//  - Echo Control (EC).
+//  - Receiving side VAD, NS and AGC.
+//  - Measurements of instantaneous speech, noise and echo levels.
+//  - Generation of AP debug recordings.
+//  - Detection of keyboard typing which can disrupt a voice conversation.
+//
+// Usage example, omitting error checking:
+//
+//  using namespace webrtc;
+//  VoiceEngine* voe = VoiceEngine::Create();
+//  VoEBase* base = VoEBase::GetInterface();
+//  VoEAudioProcessing* ap = VoEAudioProcessing::GetInterface(voe);
+//  base->Init();
+//  ap->SetEcStatus(true, kAgcAdaptiveAnalog);
+//  ...
+//  base->Terminate();
+//  base->Release();
+//  ap->Release();
+//  VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_H
+#define WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+// VoERxVadCallback
+class WEBRTC_DLLEXPORT VoERxVadCallback
+{
+public:
+    virtual void OnRxVad(int channel, int vadDecision) = 0;
+
+protected:
+    virtual ~VoERxVadCallback() {}
+};
+
+// VoEAudioProcessing
+class WEBRTC_DLLEXPORT VoEAudioProcessing
+{
+public:
+    // Factory for the VoEAudioProcessing sub-API. Increases an internal
+    // reference counter if successful. Returns NULL if the API is not
+    // supported or if construction fails.
+    static VoEAudioProcessing* GetInterface(VoiceEngine* voiceEngine);
+
+    // Releases the VoEAudioProcessing sub-API and decreases an internal
+    // reference counter. Returns the new reference count. This value should
+    // be zero for all sub-API:s before the VoiceEngine object can be safely
+    // deleted.
+    virtual int Release() = 0;
+
+    // Sets Noise Suppression (NS) status and mode.
+    // The NS reduces noise in the microphone signal.
+    virtual int SetNsStatus(bool enable, NsModes mode = kNsUnchanged) = 0;
+
+    // Gets the NS status and mode.
+    virtual int GetNsStatus(bool& enabled, NsModes& mode) = 0;
+
+    // Sets the Automatic Gain Control (AGC) status and mode.
+    // The AGC adjusts the microphone signal to an appropriate level.
+    virtual int SetAgcStatus(bool enable, AgcModes mode = kAgcUnchanged) = 0;
+
+    // Gets the AGC status and mode.
+    virtual int GetAgcStatus(bool& enabled, AgcModes& mode) = 0;
+
+    // Sets the AGC configuration.
+    // Should only be used in situations where the working environment
+    // is well known.
+    virtual int SetAgcConfig(const AgcConfig config) = 0;
+
+    // Gets the AGC configuration.
+    virtual int GetAgcConfig(AgcConfig& config) = 0;
+
+    // Sets the Echo Control (EC) status and mode.
+    // The EC mitigates acoustic echo where a user can hear their own
+    // speech repeated back due to an acoustic coupling between the
+    // speaker and the microphone at the remote end.
+    virtual int SetEcStatus(bool enable, EcModes mode = kEcUnchanged) = 0;
+
+    // Gets the EC status and mode.
+    virtual int GetEcStatus(bool& enabled, EcModes& mode) = 0;
+
+    // Modifies settings for the AEC designed for mobile devices (AECM).
+    virtual int SetAecmMode(AecmModes mode = kAecmSpeakerphone,
+                            bool enableCNG = true) = 0;
+
+    // Gets settings for the AECM.
+    virtual int GetAecmMode(AecmModes& mode, bool& enabledCNG) = 0;
+
+    // Sets status and mode of the receiving-side (Rx) NS.
+    // The Rx NS reduces noise in the received signal for the specified
+    // |channel|. Intended for advanced usage only.
+    virtual int SetRxNsStatus(int channel,
+                              bool enable,
+                              NsModes mode = kNsUnchanged) = 0;
+
+    // Gets status and mode of the receiving-side NS.
+    virtual int GetRxNsStatus(int channel,
+                              bool& enabled,
+                              NsModes& mode) = 0;
+
+    // Sets status and mode of the receiving-side (Rx) AGC.
+    // The Rx AGC adjusts the received signal to an appropriate level
+    // for the specified |channel|. Intended for advanced usage only.
+    virtual int SetRxAgcStatus(int channel,
+                               bool enable,
+                               AgcModes mode = kAgcUnchanged) = 0;
+
+    // Gets status and mode of the receiving-side AGC.
+    virtual int GetRxAgcStatus(int channel,
+                               bool& enabled,
+                               AgcModes& mode) = 0;
+
+    // Modifies the AGC configuration on the receiving side for the
+    // specified |channel|.
+    virtual int SetRxAgcConfig(int channel, const AgcConfig config) = 0;
+
+    // Gets the AGC configuration on the receiving side.
+    virtual int GetRxAgcConfig(int channel, AgcConfig& config) = 0;
+
+    // Registers a VoERxVadCallback |observer| instance and enables Rx VAD
+    // notifications for the specified |channel|.
+    virtual int RegisterRxVadObserver(int channel,
+                                      VoERxVadCallback &observer) = 0;
+
+    // Deregisters the VoERxVadCallback |observer| and disables Rx VAD
+    // notifications for the specified |channel|.
+    virtual int DeRegisterRxVadObserver(int channel) = 0;
+
+    // Gets the VAD/DTX activity for the specified |channel|.
+    // The returned value is 1 if frames of audio contains speech
+    // and 0 if silence. The output is always 1 if VAD is disabled.
+    virtual int VoiceActivityIndicator(int channel) = 0;
+
+    // Enables or disables the possibility to retrieve instantaneous
+    // speech, noise and echo metrics during an active call.
+    virtual int SetMetricsStatus(bool enable) = 0;
+
+    // Gets the current speech, noise and echo metric status.
+    virtual int GetMetricsStatus(bool& enabled) = 0;
+
+    // Gets the instantaneous speech level metrics for the transmitted
+    // and received signals.
+    virtual int GetSpeechMetrics(int& levelTx, int& levelRx) = 0;
+
+    // Gets the instantaneous noise level metrics for the transmitted
+    // and received signals.
+    virtual int GetNoiseMetrics(int& levelTx, int& levelRx) = 0;
+
+    // Gets the instantaneous echo level metrics for the near-end and
+    // far-end signals.
+    virtual int GetEchoMetrics(int& ERL, int& ERLE, int& RERL, int& A_NLP) = 0;
+
+    // Enables recording of Audio Processing (AP) debugging information.
+    // The file can later be used for off-line analysis of the AP performance.
+    virtual int StartDebugRecording(const char* fileNameUTF8) = 0;
+
+    // Disables recording of AP debugging information.
+    virtual int StopDebugRecording() = 0;
+
+    // Enables or disables detection of disturbing keyboard typing.
+    // An error notification will be given as a callback upon detection.
+    virtual int SetTypingDetectionStatus(bool enable) = 0;
+
+    // Gets the current typing detection status.
+    virtual int GetTypingDetectionStatus(bool& enabled) = 0;
+
+protected:
+    VoEAudioProcessing() {}
+    virtual ~VoEAudioProcessing() {}
+};
+
+}  //  namespace webrtc
+
+#endif  // WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_H
diff --git a/voice_engine/main/interface/voe_base.h b/voice_engine/main/interface/voe_base.h
new file mode 100644
index 0000000..546f925
--- /dev/null
+++ b/voice_engine/main/interface/voe_base.h
@@ -0,0 +1,217 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+//  - Enables full duplex VoIP sessions via RTP using G.711 (mu-Law or A-Law).
+//  - Initialization and termination.
+//  - Trace information on text files or via callbacks.
+//  - Multi-channel support (mixing, sending to multiple destinations etc.).
+//  - Call setup (port and address) for receiving and sending sides.
+//
+// To support other codecs than G.711, the VoECodec sub-API must be utilized.
+//
+// Usage example, omitting error checking:
+//
+//  using namespace webrtc;
+//  VoiceEngine* voe = VoiceEngine::Create();
+//  VoEBase* base = VoEBase::GetInterface(voe);
+//  base->Init();
+//  int ch = base->CreateChannel();
+//  base->StartPlayout(ch);
+//  ...
+//  base->DeleteChannel(ch);
+//  base->Terminate();
+//  base->Release();
+//  VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_BASE_H
+#define WEBRTC_VOICE_ENGINE_VOE_BASE_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class AudioDeviceModule;
+
+const int kVoEDefault = -1;
+
+// VoiceEngineObserver
+class WEBRTC_DLLEXPORT VoiceEngineObserver
+{
+public:
+    // This method will be called after the occurrence of any runtime error
+    // code, or warning notification, when the observer interface has been
+    // installed using VoEBase::RegisterVoiceEngineObserver().
+    virtual void CallbackOnError(const int channel, const int errCode) = 0;
+
+protected:
+    virtual ~VoiceEngineObserver() {}
+};
+
+// VoiceEngine
+class WEBRTC_DLLEXPORT VoiceEngine
+{
+public:
+    // Creates a VoiceEngine object, which can then be used to acquire
+    // sub-APIs. Returns NULL on failure.
+    static VoiceEngine* Create();
+
+    // Deletes a created VoiceEngine object and releases the utilized resources.
+    // If |ignoreRefCounters| is set to false, all reference counters must be
+    // zero to enable a valid release of the allocated resources. When set to
+    // true, a release of all resources allocated by the VoE is performed
+    // without checking the reference counter state.
+    static bool Delete(VoiceEngine*& voiceEngine,
+                       bool ignoreRefCounters = false);
+
+    // Specifies the amount and type of trace information which will be
+    // created by the VoiceEngine.
+    static int SetTraceFilter(const unsigned int filter);
+
+    // Sets the name of the trace file and enables non-encrypted trace messages.
+    static int SetTraceFile(const char* fileNameUTF8,
+                            const bool addFileCounter = false);
+
+    // Installs the TraceCallback implementation to ensure that the user
+    // receives callbacks for generated trace messages.
+    static int SetTraceCallback(TraceCallback* callback);
+
+    static int SetAndroidObjects(void* javaVM, void* env, void* context);
+
+protected:
+    VoiceEngine() {}
+    virtual ~VoiceEngine() {}
+};
+
+// VoEBase
+class WEBRTC_DLLEXPORT VoEBase
+{
+public:
+    // Factory for the VoEBase sub-API. Increases an internal reference
+    // counter if successful. Returns NULL if the API is not supported or if
+    // construction fails.
+    static VoEBase* GetInterface(VoiceEngine* voiceEngine);
+
+    // Releases the VoEBase sub-API and decreases an internal reference
+    // counter. Returns the new reference count. This value should be zero
+    // for all sub-API:s before the VoiceEngine object can be safely deleted.
+    virtual int Release() = 0;
+
+    // Installs the observer class to enable runtime error control and
+    // warning notifications.
+    virtual int RegisterVoiceEngineObserver(VoiceEngineObserver& observer) = 0;
+
+    // Removes and disables the observer class for runtime error control
+    // and warning notifications.
+    virtual int DeRegisterVoiceEngineObserver() = 0;
+
+    // Installs and enables a user-defined external audio device module
+    // which implements all the audio layer functionality.
+    virtual int RegisterAudioDeviceModule(AudioDeviceModule& adm) = 0;
+
+    // Removes and disables the external audio device module.
+    virtual int DeRegisterAudioDeviceModule() = 0;
+
+    // Initiates all common parts of the VoiceEngine; e.g. all
+    // encoders/decoders, the sound card and core receiving components.
+    virtual int Init() = 0;
+
+    // Terminates all VoiceEngine functions and releses allocated resources.
+    virtual int Terminate() = 0;
+
+    // Retrieves the maximum number of channels that can be created.
+    virtual int MaxNumOfChannels() = 0;
+
+    // Creates a new channel and allocates the required resources for it.
+    virtual int CreateChannel() = 0;
+
+    // Deletes an existing channel and releases the utilized resources.
+    virtual int DeleteChannel(int channel) = 0;
+
+    // Sets the local receiver port and address for a specified
+    // |channel| number.
+    virtual int SetLocalReceiver(int channel, int port,
+                                 int RTCPport = kVoEDefault,
+                                 const char ipAddr[64] = NULL,
+                                 const char multiCastAddr[64] = NULL) = 0;
+
+    // Gets the local receiver port and address for a specified
+    // |channel| number.
+    virtual int GetLocalReceiver(int channel, int& port, int& RTCPport,
+                                 char ipAddr[64]) = 0;
+
+    // Sets the destination port and address for a specified |channel| number.
+    virtual int SetSendDestination(int channel, int port,
+                                   const char ipAddr[64],
+                                   int sourcePort = kVoEDefault,
+                                   int RTCPport = kVoEDefault) = 0;
+
+    // Gets the destination port and address for a specified |channel| number.
+    virtual int GetSendDestination(int channel, int& port, char ipAddr[64],
+                                   int& sourcePort, int& RTCPport) = 0;
+
+    // Prepares and initiates the VoiceEngine for reception of
+    // incoming RTP/RTCP packets on the specified |channel|.
+    virtual int StartReceive(int channel) = 0;
+
+    // Stops receiving incoming RTP/RTCP packets on the specified |channel|.
+    virtual int StopReceive(int channel) = 0;
+
+    // Starts forwarding the packets to the mixer/soundcard for a
+    // specified |channel|.
+    virtual int StartPlayout(int channel) = 0;
+
+    // Stops forwarding the packets to the mixer/soundcard for a
+    // specified |channel|.
+    virtual int StopPlayout(int channel) = 0;
+
+    // Starts sending packets to an already specified IP address and
+    // port number for a specified |channel|.
+    virtual int StartSend(int channel) = 0;
+
+    // Stops sending packets from a specified |channel|.
+    virtual int StopSend(int channel) = 0;
+
+    // Gets the version information for VoiceEngine and its components.
+    virtual int GetVersion(char version[1024]) = 0;
+
+    // Gets the last VoiceEngine error code.
+    virtual int LastError() = 0;
+
+
+    // Stops or resumes playout and transmission on a temporary basis.
+    virtual int SetOnHoldStatus(int channel, bool enable,
+                                OnHoldModes mode = kHoldSendAndPlay) = 0;
+
+    // Gets the current playout and transmission status.
+    virtual int GetOnHoldStatus(int channel, bool& enabled,
+                                OnHoldModes& mode) = 0;
+
+    // Sets the NetEQ playout mode for a specified |channel| number.
+    virtual int SetNetEQPlayoutMode(int channel, NetEqModes mode) = 0;
+
+    // Gets the NetEQ playout mode for a specified |channel| number.
+    virtual int GetNetEQPlayoutMode(int channel, NetEqModes& mode) = 0;
+
+    // Sets the NetEQ background noise mode for a specified |channel| number.
+    virtual int SetNetEQBGNMode(int channel, NetEqBgnModes mode) = 0;
+
+    // Gets the NetEQ background noise mode for a specified |channel| number.
+    virtual int GetNetEQBGNMode(int channel, NetEqBgnModes& mode) = 0;
+
+protected:
+    VoEBase() {}
+    virtual ~VoEBase() {}
+};
+
+} // namespace webrtc
+
+#endif  //  WEBRTC_VOICE_ENGINE_VOE_BASE_H
diff --git a/voice_engine/main/interface/voe_call_report.h b/voice_engine/main/interface/voe_call_report.h
new file mode 100644
index 0000000..52842cc
--- /dev/null
+++ b/voice_engine/main/interface/voe_call_report.h
@@ -0,0 +1,90 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+//  - Long-term speech and noise level metrics.
+//  - Long-term echo metric statistics.
+//  - Round Trip Time (RTT) statistics.
+//  - Dead-or-Alive connection summary.
+//  - Generation of call reports to text files.
+//
+// Usage example, omitting error checking:
+//
+//  using namespace webrtc;
+//  VoiceEngine* voe = VoiceEngine::Create();
+//  VoEBase* base = VoEBase::GetInterface(voe);
+//  VoECallReport report = VoECallReport::GetInterface(voe);
+//  base->Init();
+//  LevelStatistics stats;
+//  report->GetSpeechAndNoiseSummary(stats);
+//  ...
+//  base->Terminate();
+//  base->Release();
+//  report->Release();
+//  VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_CALL_REPORT_H
+#define WEBRTC_VOICE_ENGINE_VOE_CALL_REPORT_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+// VoECallReport
+class WEBRTC_DLLEXPORT VoECallReport
+{
+public:
+    // Factory for the VoECallReport sub-API. Increases an internal
+    // reference counter if successful. Returns NULL if the API is not
+    // supported or if construction fails.
+    static VoECallReport* GetInterface(VoiceEngine* voiceEngine);
+
+    // Releases the VoECallReport sub-API and decreases an internal
+    // reference counter. Returns the new reference count. This value should
+    // be zero for all sub-API:s before the VoiceEngine object can be safely
+    // deleted.
+    virtual int Release() = 0;
+
+    // Performs a combined reset of all components involved in generating
+    // the call report for a specified |channel|.
+    virtual int ResetCallReportStatistics(int channel) = 0;
+
+    // Gets minimum, maximum and average levels for long-term speech and
+    // noise metrics.
+    virtual int GetSpeechAndNoiseSummary(LevelStatistics& stats) = 0;
+
+    // Gets minimum, maximum and average levels for long-term echo metrics.
+    virtual int GetEchoMetricSummary(EchoStatistics& stats) = 0;
+
+    // Gets minimum, maximum and average levels for Round Trip Time (RTT)
+    // measurements.
+    virtual int GetRoundTripTimeSummary(int channel,
+                                        StatVal& delaysMs) = 0;
+
+    // Gets the total amount of dead and alive connection detections
+    // during a VoIP session.
+    virtual int GetDeadOrAliveSummary(int channel, int& numOfDeadDetections,
+                                      int& numOfAliveDetections) = 0;
+
+    // Creates a text file in ASCII format, which contains a summary
+    // of all the statistics that can be obtained by the call report sub-API.
+    virtual int WriteReportToFile(const char* fileNameUTF8) = 0;
+
+protected:
+    VoECallReport() { }
+    virtual ~VoECallReport() { }
+};
+
+}  // namespace webrtc
+
+#endif  //  WEBRTC_VOICE_ENGINE_VOE_CALL_REPORT_H
diff --git a/voice_engine/main/interface/voe_codec.h b/voice_engine/main/interface/voe_codec.h
new file mode 100644
index 0000000..ea7bbce
--- /dev/null
+++ b/voice_engine/main/interface/voe_codec.h
@@ -0,0 +1,134 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+//  - Support of non-default codecs (e.g. iLBC, iSAC, etc.).
+//  - Voice Activity Detection (VAD) on a per channel basis.
+//  - Possibility to specify how to map received payload types to codecs.
+//
+// Usage example, omitting error checking:
+//
+//  using namespace webrtc;
+//  VoiceEngine* voe = VoiceEngine::Create();
+//  VoEBase* base = VoEBase::GetInterface(voe);
+//  VoECodec codec = VoECodec::GetInterface(voe);
+//  base->Init();
+//  int num_of_codecs = codec->NumOfCodecs()
+//  ...
+//  base->Terminate();
+//  base->Release();
+//  codec->Release();
+//  VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_CODEC_H
+#define WEBRTC_VOICE_ENGINE_VOE_CODEC_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+class WEBRTC_DLLEXPORT VoECodec
+{
+public:
+    // Factory for the VoECodec sub-API. Increases an internal
+    // reference counter if successful. Returns NULL if the API is not
+    // supported or if construction fails.
+    static VoECodec* GetInterface(VoiceEngine* voiceEngine);
+
+    // Releases the VoECodec sub-API and decreases an internal
+    // reference counter. Returns the new reference count. This value should
+    // be zero for all sub-API:s before the VoiceEngine object can be safely
+    // deleted.
+    virtual int Release() = 0;
+
+    // Gets the number of supported codecs.
+    virtual int NumOfCodecs() = 0;
+
+    // Get the |codec| information for a specified list |index|.
+    virtual int GetCodec(int index, CodecInst& codec) = 0;
+
+    // Sets the |codec| for the |channel| to be used for sending.
+    virtual int SetSendCodec(int channel, const CodecInst& codec) = 0;
+
+    // Gets the |codec| parameters for the sending codec on a specified
+    // |channel|.
+    virtual int GetSendCodec(int channel, CodecInst& codec) = 0;
+
+    // Gets the currently received |codec| for a specific |channel|.
+    virtual int GetRecCodec(int channel, CodecInst& codec) = 0;
+
+    // Sets the initial values of target rate and frame size for iSAC
+    // for a specified |channel|. This API is only valid if iSAC is setup
+    // to run in channel-adaptive mode
+    virtual int SetISACInitTargetRate(int channel, int rateBps,
+                                      bool useFixedFrameSize = false) = 0;
+
+    // Sets the maximum allowed iSAC rate which the codec may not exceed
+    // for a single packet for the specified |channel|. The maximum rate is
+    // defined as payload size per frame size in bits per second.
+    virtual int SetISACMaxRate(int channel, int rateBps) = 0;
+
+    // Sets the maximum allowed iSAC payload size for a specified |channel|.
+    // The maximum value is set independently of the frame size, i.e.
+    // 30 ms and 60 ms packets have the same limit.
+    virtual int SetISACMaxPayloadSize(int channel, int sizeBytes) = 0;
+
+    // Sets the dynamic payload type number for a particular |codec| or
+    // disables (ignores) a codec for receiving. For instance, when receiving
+    // an invite from a SIP-based client, this function can be used to change
+    // the dynamic payload type number to match that in the INVITE SDP-
+    // message. The utilized parameters in the |codec| structure are:
+    // plname, plfreq, pltype and channels.
+    virtual int SetRecPayloadType(int channel, const CodecInst& codec) = 0;
+
+    // Gets the actual payload type that is set for receiving a |codec| on a
+    // |channel|. The value it retrieves will either be the default payload
+    // type, or a value earlier set with SetRecPayloadType().
+    virtual int GetRecPayloadType(int channel, CodecInst& codec) = 0;
+
+    // Sets the payload |type| for the sending of SID-frames with background
+    // noise estimation during silence periods detected by the VAD.
+    virtual int SetSendCNPayloadType(
+        int channel, int type, PayloadFrequencies frequency = kFreq16000Hz) = 0;
+
+
+    // Sets the VAD/DTX (silence suppression) status and |mode| for a
+    // specified |channel|.
+    virtual int SetVADStatus(int channel, bool enable,
+                             VadModes mode = kVadConventional,
+                             bool disableDTX = false) = 0;
+
+    // Gets the VAD/DTX status and |mode| for a specified |channel|.
+    virtual int GetVADStatus(int channel, bool& enabled, VadModes& mode,
+                             bool& disabledDTX) = 0;
+
+    // Not supported
+    virtual int SetAMREncFormat(int channel, AmrMode mode) = 0;
+
+    // Not supported
+    virtual int SetAMRDecFormat(int channel, AmrMode mode) = 0;
+
+    // Not supported
+    virtual int SetAMRWbEncFormat(int channel, AmrMode mode) = 0;
+
+    // Not supported
+    virtual int SetAMRWbDecFormat(int channel, AmrMode mode) = 0;
+
+protected:
+    VoECodec() {}
+    virtual ~VoECodec() {}
+};
+
+} // namespace webrtc
+
+#endif  //  WEBRTC_VOICE_ENGINE_VOE_CODEC_H
diff --git a/voice_engine/main/interface/voe_dtmf.h b/voice_engine/main/interface/voe_dtmf.h
new file mode 100644
index 0000000..1290151
--- /dev/null
+++ b/voice_engine/main/interface/voe_dtmf.h
@@ -0,0 +1,148 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+//  - Telephone event transmission.
+//  - DTMF tone generation.
+//
+// Usage example, omitting error checking:
+//
+//  using namespace webrtc;
+//  VoiceEngine* voe = VoiceEngine::Create();
+//  VoEBase* base = VoEBase::GetInterface(voe);
+//  VoEDtmf* dtmf  = VoEDtmf::GetInterface(voe);
+//  base->Init();
+//  int ch = base->CreateChannel();
+//  ...
+//  dtmf->SendTelephoneEvent(ch, 7);
+//  ...
+//  base->DeleteChannel(ch);
+//  base->Terminate();
+//  base->Release();
+//  dtmf->Release();
+//  VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_DTMF_H
+#define WEBRTC_VOICE_ENGINE_VOE_DTMF_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+// VoETelephoneEventObserver
+class WEBRTC_DLLEXPORT VoETelephoneEventObserver
+{
+public:
+    // This method will be called after the detection of an inband
+    // telephone event. The event code is given as output in the
+    // |eventCode| parameter.
+    virtual void OnReceivedTelephoneEventInband(const int channel,
+                                                const unsigned char eventCode,
+                                                const bool endOfEvent) = 0;
+
+    // This method will be called after the detection of an out-of-band
+    // telephone event. The event code is given as output in the
+    // |eventCode| parameter.
+    virtual void OnReceivedTelephoneEventOutOfBand(
+        const int channel,
+        const unsigned char eventCode,
+        const bool endOfEvent) = 0;
+
+protected:
+    virtual ~VoETelephoneEventObserver() {}
+};
+
+// VoEDtmf
+class WEBRTC_DLLEXPORT VoEDtmf
+{
+public:
+    
+    // Factory for the VoEDtmf sub-API. Increases an internal
+    // reference counter if successful. Returns NULL if the API is not
+    // supported or if construction fails.
+    static VoEDtmf* GetInterface(VoiceEngine* voiceEngine);
+
+    // Releases the VoEDtmf sub-API and decreases an internal
+    // reference counter. Returns the new reference count. This value should
+    // be zero for all sub-API:s before the VoiceEngine object can be safely
+    // deleted.
+    virtual int Release() = 0;
+
+    // Sends telephone events either in-band or out-of-band.
+    virtual int SendTelephoneEvent(int channel, unsigned char eventCode,
+                                   bool outOfBand = true, int lengthMs = 160,
+                                   int attenuationDb = 10) = 0;
+
+   
+    // Sets the dynamic payload |type| that should be used for telephone
+    // events.
+    virtual int SetSendTelephoneEventPayloadType(int channel,
+                                                 unsigned char type) = 0;
+
+  
+    // Gets the currently set dynamic payload |type| for telephone events.
+    virtual int GetSendTelephoneEventPayloadType(int channel,
+                                                 unsigned char& type) = 0;
+
+    // Enables or disables local tone playout for received DTMF events
+    // out-of-band.
+    virtual int SetDtmfPlayoutStatus(int channel, bool enable) = 0;
+
+    // Gets the DTMF playout status.
+    virtual int GetDtmfPlayoutStatus(int channel, bool& enabled) = 0;
+
+    // Toogles DTMF feedback state: when a DTMF tone is sent, the same tone
+    // is played out on the speaker.
+    virtual int SetDtmfFeedbackStatus(bool enable,
+                                      bool directFeedback = false) = 0;
+
+    // Gets the DTMF feedback status.
+    virtual int GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback) = 0;
+
+    // Plays a DTMF feedback tone (only locally).
+    virtual int PlayDtmfTone(unsigned char eventCode, int lengthMs = 200,
+                             int attenuationDb = 10) = 0;
+
+    // Starts playing out a DTMF feedback tone locally.
+    // The tone will be played out until the corresponding stop function
+    // is called.
+    virtual int StartPlayingDtmfTone(unsigned char eventCode,
+                                     int attenuationDb = 10) = 0;
+
+    // Stops playing out a DTMF feedback tone locally.
+    virtual int StopPlayingDtmfTone() = 0;
+
+    // Installs an instance of a VoETelephoneEventObserver derived class and
+    // activates detection of telephone events for the specified |channel|.
+    virtual int RegisterTelephoneEventDetection(
+        int channel, TelephoneEventDetectionMethods detectionMethod,
+        VoETelephoneEventObserver& observer) = 0;
+
+    // Removes an instance of a VoETelephoneEventObserver derived class and
+    // disables detection of telephone events for the specified |channel|.
+    virtual int DeRegisterTelephoneEventDetection(int channel) = 0;
+
+    // Gets the current telephone-event detection status for a specified
+    // |channel|.
+    virtual int GetTelephoneEventDetectionStatus(
+        int channel, bool& enabled,
+        TelephoneEventDetectionMethods& detectionMethod) = 0;
+
+protected:
+    VoEDtmf() {}
+    virtual ~VoEDtmf() {}
+};
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_VOICE_ENGINE_VOE_DTMF_H
diff --git a/voice_engine/main/interface/voe_encryption.h b/voice_engine/main/interface/voe_encryption.h
new file mode 100644
index 0000000..ae3f373
--- /dev/null
+++ b/voice_engine/main/interface/voe_encryption.h
@@ -0,0 +1,81 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+//  - External encryption and decryption.
+//
+// Usage example, omitting error checking:
+//
+//  using namespace webrtc;
+//  VoiceEngine* voe = VoiceEngine::Create();
+//  VoEEncryption* encrypt  = VoEEncryption::GetInterface(voe);
+//  ...
+//  encrypt->Release();
+//  VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_ENCRYPTION_H
+#define WEBRTC_VOICE_ENGINE_VOE_ENCRYPTION_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+class WEBRTC_DLLEXPORT VoEEncryption
+{
+public:
+    // Factory for the VoEEncryption sub-API. Increases an internal
+    // reference counter if successful. Returns NULL if the API is not
+    // supported or if construction fails.
+    static VoEEncryption* GetInterface(VoiceEngine* voiceEngine);
+
+    // Releases the VoEEncryption sub-API and decreases an internal
+    // reference counter. Returns the new reference count. This value should
+    // be zero for all sub-API:s before the VoiceEngine object can be safely
+    // deleted.
+    virtual int Release() = 0;
+
+    // Installs an Encryption instance and enables external encryption
+    // for the selected |channel|.
+    virtual int RegisterExternalEncryption(
+        int channel, Encryption& encryption) = 0;
+
+    // Removes an Encryption instance and disables external encryption
+    // for the selected |channel|.
+    virtual int DeRegisterExternalEncryption(int channel) = 0;
+
+    // Not supported
+    virtual int EnableSRTPSend(int channel, CipherTypes cipherType,
+        int cipherKeyLength, AuthenticationTypes authType, int authKeyLength,
+        int authTagLength, SecurityLevels level, const unsigned char key[30],
+        bool useForRTCP = false) = 0;
+
+    // Not supported
+    virtual int DisableSRTPSend(int channel) = 0;
+
+    // Not supported
+    virtual int EnableSRTPReceive(int channel, CipherTypes cipherType,
+        int cipherKeyLength, AuthenticationTypes authType, int authKeyLength,
+        int authTagLength, SecurityLevels level, const unsigned char key[30],
+        bool useForRTCP = false) = 0;
+
+    // Not supported
+    virtual int DisableSRTPReceive(int channel) = 0;
+
+protected:
+    VoEEncryption() {}
+    virtual ~VoEEncryption() {}
+};
+
+}  // namespace webrtc
+
+#endif  //  WEBRTC_VOICE_ENGINE_VOE_ENCRYPTION_H
diff --git a/voice_engine/main/interface/voe_errors.h b/voice_engine/main/interface/voe_errors.h
new file mode 100644
index 0000000..cc05970
--- /dev/null
+++ b/voice_engine/main/interface/voe_errors.h
@@ -0,0 +1,162 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_ERRORS_H
+#define WEBRTC_VOICE_ENGINE_VOE_ERRORS_H
+
+// Warnings
+#define VE_PORT_NOT_DEFINED 8001
+#define VE_CHANNEL_NOT_VALID 8002
+#define VE_FUNC_NOT_SUPPORTED 8003
+#define VE_INVALID_LISTNR 8004
+#define VE_INVALID_ARGUMENT 8005
+#define VE_INVALID_PORT_NMBR 8006
+#define VE_INVALID_PLNAME 8007
+#define VE_INVALID_PLFREQ 8008
+#define VE_INVALID_PLTYPE 8009
+#define VE_INVALID_PACSIZE 8010
+#define VE_NOT_SUPPORTED 8011
+#define VE_ALREADY_LISTENING 8012
+#define VE_CHANNEL_NOT_CREATED 8013
+#define VE_MAX_ACTIVE_CHANNELS_REACHED 8014
+#define VE_REC_CANNOT_PREPARE_HEADER 8015
+#define VE_REC_CANNOT_ADD_BUFFER 8016
+#define VE_PLAY_CANNOT_PREPARE_HEADER 8017
+#define VE_ALREADY_SENDING 8018
+#define VE_INVALID_IP_ADDRESS 8019
+#define VE_ALREADY_PLAYING 8020
+#define VE_NOT_ALL_VERSION_INFO 8021
+#define VE_DTMF_OUTOF_RANGE 8022
+#define VE_INVALID_CHANNELS 8023
+#define VE_SET_PLTYPE_FAILED 8024
+#define VE_ENCRYPT_NOT_INITED 8025
+#define VE_NOT_INITED 8026
+#define VE_NOT_SENDING 8027
+#define VE_EXT_TRANSPORT_NOT_SUPPORTED 8028
+#define VE_EXTERNAL_TRANSPORT_ENABLED 8029
+#define VE_STOP_RECORDING_FAILED 8030
+#define VE_INVALID_RATE 8031
+#define VE_INVALID_PACKET 8032
+#define VE_NO_GQOS 8033
+#define VE_INVALID_TIMESTAMP 8034
+#define VE_RECEIVE_PACKET_TIMEOUT 8035
+#define VE_STILL_PLAYING_PREV_DTMF 8036
+#define VE_INIT_FAILED_WRONG_EXPIRY 8037
+#define VE_SENDING 8038
+#define VE_ENABLE_IPV6_FAILED 8039
+#define VE_FUNC_NO_STEREO 8040
+// Range 8041-8080 is not used
+#define VE_FW_TRAVERSAL_ALREADY_INITIALIZED 8081
+#define VE_PACKET_RECEIPT_RESTARTED 8082
+#define VE_NOT_ALL_INFO 8083
+#define VE_CANNOT_SET_SEND_CODEC 8084
+#define VE_CODEC_ERROR 8085
+#define VE_NETEQ_ERROR 8086
+#define VE_RTCP_ERROR 8087
+#define VE_INVALID_OPERATION 8088
+#define VE_CPU_INFO_ERROR 8089
+#define VE_SOUNDCARD_ERROR 8090
+#define VE_SPEECH_LEVEL_ERROR 8091
+#define VE_SEND_ERROR 8092
+#define VE_CANNOT_REMOVE_CONF_CHANNEL 8093
+#define VE_PLTYPE_ERROR 8094
+#define VE_SET_FEC_FAILED 8095
+#define VE_CANNOT_GET_PLAY_DATA 8096
+#define VE_APM_ERROR 8097
+#define VE_RUNTIME_PLAY_WARNING 8098
+#define VE_RUNTIME_REC_WARNING 8099
+#define VE_NOT_PLAYING 8100
+#define VE_SOCKETS_NOT_INITED 8101
+#define VE_CANNOT_GET_SOCKET_INFO 8102
+#define VE_INVALID_MULTICAST_ADDRESS 8103
+#define VE_DESTINATION_NOT_INITED 8104
+#define VE_RECEIVE_SOCKETS_CONFLICT 8105
+#define VE_SEND_SOCKETS_CONFLICT 8106
+#define VE_TYPING_NOISE_WARNING 8107
+#define VE_SATURATION_WARNING 8108
+#define VE_NOISE_WARNING 8109
+#define VE_CANNOT_GET_SEND_CODEC 8110
+#define VE_CANNOT_GET_REC_CODEC 8111
+#define VE_ALREADY_INITED 8112
+
+// Errors causing limited functionality
+#define VE_RTCP_SOCKET_ERROR 9001
+#define VE_MIC_VOL_ERROR 9002
+#define VE_SPEAKER_VOL_ERROR 9003
+#define VE_CANNOT_ACCESS_MIC_VOL 9004
+#define VE_CANNOT_ACCESS_SPEAKER_VOL 9005
+#define VE_GET_MIC_VOL_ERROR 9006
+#define VE_GET_SPEAKER_VOL_ERROR 9007
+#define VE_THREAD_RTCP_ERROR 9008
+#define VE_CANNOT_INIT_APM 9009
+#define VE_SEND_SOCKET_TOS_ERROR 9010
+#define VE_CANNOT_RETRIEVE_DEVICE_NAME 9013
+#define VE_SRTP_ERROR 9014
+// 9015 is not used
+#define VE_INTERFACE_NOT_FOUND 9016
+#define VE_TOS_GQOS_CONFLICT 9017
+#define VE_CANNOT_ADD_CONF_CHANNEL 9018
+#define VE_BUFFER_TOO_SMALL 9019
+#define VE_CANNOT_EXECUTE_SETTING 9020
+#define VE_CANNOT_RETRIEVE_SETTING 9021
+// 9022 is not used
+#define VE_RTP_KEEPALIVE_FAILED 9023
+#define VE_SEND_DTMF_FAILED 9024
+#define VE_CANNOT_RETRIEVE_CNAME 9025
+#define VE_DECRYPTION_FAILED 9026
+#define VE_ENCRYPTION_FAILED 9027
+#define VE_CANNOT_RETRIEVE_RTP_STAT 9028
+#define VE_GQOS_ERROR 9029
+#define VE_BINDING_SOCKET_TO_LOCAL_ADDRESS_FAILED 9030
+#define VE_TOS_INVALID 9031
+#define VE_TOS_ERROR 9032
+#define VE_CANNOT_RETRIEVE_VALUE 9033
+
+// Critical errors that stops voice functionality
+#define VE_PLAY_UNDEFINED_SC_ERR 10001
+#define VE_REC_CANNOT_OPEN_SC 10002
+#define VE_SOCKET_ERROR 10003
+#define VE_MMSYSERR_INVALHANDLE 10004
+#define VE_MMSYSERR_NODRIVER 10005
+#define VE_MMSYSERR_NOMEM 10006
+#define VE_WAVERR_UNPREPARED 10007
+#define VE_WAVERR_STILLPLAYING 10008
+#define VE_UNDEFINED_SC_ERR 10009
+#define VE_UNDEFINED_SC_REC_ERR 10010
+#define VE_THREAD_ERROR 10011
+#define VE_CANNOT_START_RECORDING 10012
+#define VE_PLAY_CANNOT_OPEN_SC 10013
+#define VE_NO_WINSOCK_2 10014
+#define VE_SEND_SOCKET_ERROR 10015
+#define VE_BAD_FILE 10016
+#define VE_EXPIRED_COPY 10017
+#define VE_NOT_AUTHORISED 10018
+#define VE_RUNTIME_PLAY_ERROR 10019
+#define VE_RUNTIME_REC_ERROR 10020
+#define VE_BAD_ARGUMENT 10021
+#define VE_LINUX_API_ONLY 10022
+#define VE_REC_DEVICE_REMOVED 10023
+#define VE_NO_MEMORY 10024
+#define VE_BAD_HANDLE 10025
+#define VE_RTP_RTCP_MODULE_ERROR 10026
+#define VE_AUDIO_CODING_MODULE_ERROR 10027
+#define VE_AUDIO_DEVICE_MODULE_ERROR 10028
+#define VE_CANNOT_START_PLAYOUT 10029
+#define VE_CANNOT_STOP_RECORDING 10030
+#define VE_CANNOT_STOP_PLAYOUT 10031
+#define VE_CANNOT_INIT_CHANNEL 10032
+#define VE_RECV_SOCKET_ERROR 10033
+#define VE_SOCKET_TRANSPORT_MODULE_ERROR 10034
+#define VE_AUDIO_CONF_MIX_MODULE_ERROR 10035
+
+// Warnings for other platforms (reserved range 8061-8080)
+#define VE_IGNORED_FUNCTION 8061
+
+#endif  //  WEBRTC_VOICE_ENGINE_VOE_ERRORS_H
diff --git a/voice_engine/main/interface/voe_external_media.h b/voice_engine/main/interface/voe_external_media.h
new file mode 100644
index 0000000..50d2d38
--- /dev/null
+++ b/voice_engine/main/interface/voe_external_media.h
@@ -0,0 +1,114 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// In some cases it is desirable to use an audio source or sink which may
+// not be available to the VoiceEngine, such as a DV camera. This sub-API
+// contains functions that allow for the use of such external recording
+// sources and playout sinks. It also describes how recorded data, or data
+// to be played out, can be modified outside the VoiceEngine.
+//
+// Usage example, omitting error checking:
+//
+//  using namespace webrtc;
+//  VoiceEngine* voe = VoiceEngine::Create();
+//  VoEBase* base = VoEBase::GetInterface(voe);
+//  VoEMediaProcess media = VoEMediaProcess::GetInterface(voe);
+//  base->Init();
+//  ...
+//  media->SetExternalRecordingStatus(true);
+//  ...
+//  base->Terminate();
+//  base->Release();
+//  media->Release();
+//  VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_H
+#define WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+class WEBRTC_DLLEXPORT VoEMediaProcess
+{
+public:
+    // The VoiceEngine user should override the Process() method in a
+    // derived class. Process() will be called when audio is ready to
+    // be processed. The audio can be accessed in several different modes
+    // given by the |type| parameter. The function should modify the
+    // original data and ensure that it is copied back to the |audio10ms|
+    // array. The number of samples in the frame cannot be changed.
+    // The sampling frequency will depend upon the codec used. 
+    // If |isStereo| is true, audio10ms will contain 16-bit PCM data
+    // samples in interleaved stereo format (L0,R0,L1,R1,…):
+    virtual void Process(const int channel, const ProcessingTypes type,
+                         WebRtc_Word16 audio10ms[], const int length,
+                         const int samplingFreq, const bool isStereo) = 0;
+
+protected:
+    virtual ~VoEMediaProcess() {}
+};
+
+class WEBRTC_DLLEXPORT VoEExternalMedia
+{
+public:
+    // Factory for the VoEExternalMedia sub-API. Increases an internal
+    // reference counter if successful. Returns NULL if the API is not
+    // supported or if construction fails.
+    static VoEExternalMedia* GetInterface(VoiceEngine* voiceEngine);
+
+    // Releases the VoEExternalMedia sub-API and decreases an internal
+    // reference counter. Returns the new reference count. This value should
+    // be zero for all sub-API:s before the VoiceEngine object can be safely
+    // deleted.
+    virtual int Release() = 0;
+
+    // Installs a VoEMediaProcess derived instance and activates external
+    // media for the specified |channel| and |type|.
+    virtual int RegisterExternalMediaProcessing(
+        int channel, ProcessingTypes type, VoEMediaProcess& processObject) = 0;
+
+    // Removes the VoEMediaProcess derived instance and deactivates external
+    // media for the specified |channel| and |type|.
+    virtual int DeRegisterExternalMediaProcessing(
+        int channel, ProcessingTypes type) = 0;
+
+    // Toogles state of external recording.
+    virtual int SetExternalRecordingStatus(bool enable) = 0;
+
+    // Toogles state of external playout.
+    virtual int SetExternalPlayoutStatus(bool enable) = 0;
+
+    // This function accepts externally recorded audio. During transmission,
+    // this method should be called at as regular an interval as possible
+    // with frames of corresponding size.
+    virtual int ExternalRecordingInsertData(
+        const WebRtc_Word16 speechData10ms[], int lengthSamples,
+        int samplingFreqHz, int current_delay_ms) = 0;
+
+    // This function gets audio for an external playout sink.
+    // During transmission, this function should be called every ~10 ms
+    // to obtain a new 10 ms frame of audio. The length of the block will
+    // be 160, 320, 440 or 480 samples (for 16, 32, 44 or 48 kHz sampling
+    // rates respectively).
+    virtual int ExternalPlayoutGetData(
+        WebRtc_Word16 speechData10ms[], int samplingFreqHz,
+        int current_delay_ms, int& lengthSamples) = 0;
+
+protected:
+    VoEExternalMedia() {}
+    virtual ~VoEExternalMedia() {}
+};
+
+}  // namespace webrtc
+
+#endif  //  WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_H
diff --git a/voice_engine/main/interface/voe_file.h b/voice_engine/main/interface/voe_file.h
new file mode 100644
index 0000000..d968dcf
--- /dev/null
+++ b/voice_engine/main/interface/voe_file.h
@@ -0,0 +1,184 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+//  - File playback.
+//  - File recording.
+//  - File conversion.
+//
+// Usage example, omitting error checking:
+//
+//  using namespace webrtc;
+//  VoiceEngine* voe = VoiceEngine::Create();
+//  VoEBase* base = VoEBase::GetInterface(voe);
+//  VoEFile* file  = VoEFile::GetInterface(voe);
+//  base->Init();
+//  int ch = base->CreateChannel();
+//  ...
+//  base->StartPlayout(ch);
+//  file->StartPlayingFileAsMicrophone(ch, "data_file_16kHz.pcm", true);
+//  ...
+//  file->StopPlayingFileAsMicrophone(ch);
+//  base->StopPlayout(ch);
+//  ...
+//  base->DeleteChannel(ch);
+//  base->Terminate();
+//  base->Release();
+//  file->Release();
+//  VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_FILE_H
+#define WEBRTC_VOICE_ENGINE_VOE_FILE_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+class WEBRTC_DLLEXPORT VoEFile
+{
+public:
+    // Factory for the VoEFile sub-API. Increases an internal
+    // reference counter if successful. Returns NULL if the API is not
+    // supported or if construction fails.
+    static VoEFile* GetInterface(VoiceEngine* voiceEngine);
+
+    // Releases the VoEFile sub-API and decreases an internal
+    // reference counter. Returns the new reference count. This value should
+    // be zero for all sub-API:s before the VoiceEngine object can be safely
+    // deleted.
+    virtual int Release() = 0;
+
+    // Starts playing and mixing files with the local speaker signal for
+    // playout.
+    virtual int StartPlayingFileLocally(
+        int channel,
+        const char fileNameUTF8[1024],
+        bool loop = false,
+        FileFormats format = kFileFormatPcm16kHzFile,
+        float volumeScaling = 1.0,
+        int startPointMs = 0,
+        int stopPointMs = 0) = 0;
+
+    // Starts playing and mixing streams with the local speaker signal for
+    // playout.
+    virtual int StartPlayingFileLocally(
+        int channel,
+        InStream* stream,
+        FileFormats format = kFileFormatPcm16kHzFile,
+        float volumeScaling = 1.0,
+        int startPointMs = 0, int stopPointMs = 0) = 0;
+
+    // Stops playback of a file on a specific |channel|.
+    virtual int StopPlayingFileLocally(int channel) = 0;
+
+    // Returns the current file playing state for a specific |channel|.
+    virtual int IsPlayingFileLocally(int channel) = 0;
+
+    // Sets the volume scaling for a speaker file that is already playing.
+    virtual int ScaleLocalFilePlayout(int channel, float scale) = 0;
+
+    // Starts reading data from a file and transmits the data either
+    // mixed with or instead of the microphone signal.
+    virtual int StartPlayingFileAsMicrophone(
+        int channel,
+        const char fileNameUTF8[1024],
+        bool loop = false ,
+        bool mixWithMicrophone = false,
+        FileFormats format = kFileFormatPcm16kHzFile,
+        float volumeScaling = 1.0) = 0;
+
+    // Starts reading data from a stream and transmits the data either
+    // mixed with or instead of the microphone signal.
+    virtual int StartPlayingFileAsMicrophone(
+        int channel,
+        InStream* stream,
+        bool mixWithMicrophone = false,
+        FileFormats format = kFileFormatPcm16kHzFile,
+        float volumeScaling = 1.0) = 0;
+
+    // Stops playing of a file as microphone signal for a specific |channel|.
+    virtual int StopPlayingFileAsMicrophone(int channel) = 0;
+
+    // Returns whether the |channel| is currently playing a file as microphone.
+    virtual int IsPlayingFileAsMicrophone(int channel) = 0;
+
+    // Sets the volume scaling for a microphone file that is already playing.
+    virtual int ScaleFileAsMicrophonePlayout(int channel, float scale) = 0;
+
+    // Starts recording the mixed playout audio.
+    virtual int StartRecordingPlayout(int channel,
+                                      const char* fileNameUTF8,
+                                      CodecInst* compression = NULL,
+                                      int maxSizeBytes = -1) = 0;
+
+    // Stops recording the mixed playout audio.
+    virtual int StopRecordingPlayout(int channel) = 0;
+
+    virtual int StartRecordingPlayout(int channel,
+                                      OutStream* stream,
+                                      CodecInst* compression = NULL) = 0;
+
+    // Starts recording the microphone signal to a file.
+    virtual int StartRecordingMicrophone(const char* fileNameUTF8,
+                                         CodecInst* compression = NULL,
+                                         int maxSizeBytes = -1) = 0;
+
+    // Starts recording the microphone signal to a stream.
+    virtual int StartRecordingMicrophone(OutStream* stream,
+                                         CodecInst* compression = NULL) = 0;
+
+    // Stops recording the microphone signal.
+    virtual int StopRecordingMicrophone() = 0;
+
+
+    // Gets the duration of a file.
+    virtual int GetFileDuration(const char* fileNameUTF8, int& durationMs,
+        FileFormats format = kFileFormatPcm16kHzFile) = 0;
+
+    // Gets the current played position of a file on a specific |channel|.
+    virtual int GetPlaybackPosition(int channel, int& positionMs) = 0;
+
+    virtual int ConvertPCMToWAV(const char* fileNameInUTF8,
+                                const char* fileNameOutUTF8) = 0;
+
+    virtual int ConvertPCMToWAV(InStream* streamIn,
+                                OutStream* streamOut) = 0;
+
+    virtual int ConvertWAVToPCM(const char* fileNameInUTF8,
+                                const char* fileNameOutUTF8) = 0;
+
+    virtual int ConvertWAVToPCM(InStream* streamIn,
+                                OutStream* streamOut) = 0;
+
+    virtual int ConvertPCMToCompressed(const char* fileNameInUTF8,
+                                       const char* fileNameOutUTF8,
+                                       CodecInst* compression) = 0;
+
+    virtual int ConvertPCMToCompressed(InStream* streamIn,
+                                       OutStream* streamOut,
+                                       CodecInst* compression) = 0;
+
+    virtual int ConvertCompressedToPCM(const char* fileNameInUTF8,
+                                       const char* fileNameOutUTF8) = 0;
+
+    virtual int ConvertCompressedToPCM(InStream* streamIn,
+                                       OutStream* streamOut) = 0;
+
+protected:
+    VoEFile() {}
+    virtual ~VoEFile() {}
+};
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_VOICE_ENGINE_VOE_FILE_H
diff --git a/voice_engine/main/interface/voe_hardware.h b/voice_engine/main/interface/voe_hardware.h
new file mode 100644
index 0000000..b4d02d0
--- /dev/null
+++ b/voice_engine/main/interface/voe_hardware.h
@@ -0,0 +1,120 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+//  - Audio device handling.
+//  - Device information.
+//  - CPU load monitoring.
+//
+// Usage example, omitting error checking:
+//
+//  using namespace webrtc;
+//  VoiceEngine* voe = VoiceEngine::Create();
+//  VoEBase* base = VoEBase::GetInterface(voe);
+//  VoEHardware* hardware  = VoEHardware::GetInterface(voe);
+//  base->Init();
+//  ...
+//  int n_devices = hardware->GetNumOfPlayoutDevices();
+//  ...
+//  base->Terminate();
+//  base->Release();
+//  hardware->Release();
+//  VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_HARDWARE_H
+#define WEBRTC_VOICE_ENGINE_VOE_HARDWARE_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+class WEBRTC_DLLEXPORT VoEHardware
+{
+public:
+    // Factory for the VoEHardware sub-API. Increases an internal
+    // reference counter if successful. Returns NULL if the API is not
+    // supported or if construction fails.
+    static VoEHardware* GetInterface(VoiceEngine* voiceEngine);
+
+    // Releases the VoEHardware sub-API and decreases an internal
+    // reference counter. Returns the new reference count. This value should
+    // be zero for all sub-API:s before the VoiceEngine object can be safely
+    // deleted.
+    virtual int Release() = 0;
+
+    // Gets the number of audio devices available for recording.
+    virtual int GetNumOfRecordingDevices(int& devices) = 0;
+
+    // Gets the number of audio devices available for playout.
+    virtual int GetNumOfPlayoutDevices(int& devices) = 0;
+
+    // Gets the name of a specific recording device given by an |index|.
+    // On Windows Vista/7, it also retrieves an additional unique ID
+    // (GUID) for the recording device.
+    virtual int GetRecordingDeviceName(int index, char strNameUTF8[128],
+                                       char strGuidUTF8[128]) = 0;
+
+    // Gets the name of a specific playout device given by an |index|.
+    // On Windows Vista/7, it also retrieves an additional unique ID
+    // (GUID) for the playout device.
+    virtual int GetPlayoutDeviceName(int index, char strNameUTF8[128],
+                                     char strGuidUTF8[128]) = 0;
+
+    // Checks if the sound card is available to be opened for recording.
+    virtual int GetRecordingDeviceStatus(bool& isAvailable) = 0;
+
+    // Checks if the sound card is available to be opened for playout.
+    virtual int GetPlayoutDeviceStatus(bool& isAvailable) = 0;
+
+    // Sets the audio device used for recording.
+    virtual int SetRecordingDevice(
+        int index, StereoChannel recordingChannel = kStereoBoth) = 0;
+
+    // Sets the audio device used for playout.
+    virtual int SetPlayoutDevice(int index) = 0;
+
+    // Sets the type of audio device layer to use.
+    virtual int SetAudioDeviceLayer(AudioLayers audioLayer) = 0;
+
+    // Gets the currently used (active) audio device layer.
+    virtual int GetAudioDeviceLayer(AudioLayers& audioLayer) = 0;
+
+    // Gets the VoiceEngine’s current CPU consumption in terms of the percent
+    // of total CPU availability. [Windows only]
+    virtual int GetCPULoad(int& loadPercent) = 0;
+
+    // Gets the computer’s current CPU consumption in terms of the percent
+    // of the total CPU availability.
+    virtual int GetSystemCPULoad(int& loadPercent) = 0;
+
+    // Not supported
+    virtual int ResetAudioDevice() = 0;
+
+    // Not supported
+    virtual int AudioDeviceControl(
+        unsigned int par1, unsigned int par2, unsigned int par3) = 0;
+
+    // Not supported
+    virtual int SetLoudspeakerStatus(bool enable) = 0;
+
+    // Not supported
+    virtual int GetLoudspeakerStatus(bool& enabled) = 0;
+
+protected:
+    VoEHardware() {}
+    virtual ~VoEHardware() {}
+};
+
+} // namespace webrtc
+
+#endif  //  WEBRTC_VOICE_ENGINE_VOE_HARDWARE_H
diff --git a/voice_engine/main/interface/voe_neteq_stats.h b/voice_engine/main/interface/voe_neteq_stats.h
new file mode 100644
index 0000000..197285c
--- /dev/null
+++ b/voice_engine/main/interface/voe_neteq_stats.h
@@ -0,0 +1,58 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_H
+#define WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+class WEBRTC_DLLEXPORT VoENetEqStats
+{
+public:
+    // Factory for the VoENetEqStats sub-API. Increases an internal
+    // reference counter if successful. Returns NULL if the API is not
+    // supported or if construction fails.
+    static VoENetEqStats* GetInterface(VoiceEngine* voiceEngine);
+
+    // Releases the VoENetEqStats sub-API and decreases an internal
+    // reference counter. Returns the new reference count. This value should
+    // be zero for all sub-API:s before the VoiceEngine object can be safely
+    // deleted.
+    virtual int Release() = 0;
+
+    // Get the "in-call" statistics from NetEQ.
+    // The statistics are reset after the query.
+    virtual int GetNetworkStatistics(int channel, NetworkStatistics& stats) = 0;
+
+    // Get the "post-call" jitter statistics from NetEQ.
+    // The statistics are not reset by the query. Use the function
+    // ResetJitterStatistics() to reset.
+    virtual int GetJitterStatistics(int channel, JitterStatistics& stats) = 0;
+
+    // Get the optimal buffer size calculated for the current network
+    // conditions.
+    virtual int GetPreferredBufferSize(
+        int channel, unsigned short& preferredBufferSize) = 0;
+
+    // Reset "post-call" jitter statistics.
+    virtual int ResetJitterStatistics(int channel) = 0;
+
+protected:
+    VoENetEqStats() {}
+    virtual ~VoENetEqStats() {}
+};
+
+}   // namespace webrtc
+
+#endif    // #ifndef WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_H
diff --git a/voice_engine/main/interface/voe_network.h b/voice_engine/main/interface/voe_network.h
new file mode 100644
index 0000000..10acf1c
--- /dev/null
+++ b/voice_engine/main/interface/voe_network.h
@@ -0,0 +1,177 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+//  - External protocol support.
+//  - Extended port and address APIs.
+//  - Port and address filters.
+//  - Windows GQoS functions.
+//  - Packet timeout notification.
+//  - Dead-or-Alive connection observations.
+//  - Transmission of raw RTP/RTCP packets into existing channels.
+//
+// Usage example, omitting error checking:
+//
+//  using namespace webrtc;
+//  VoiceEngine* voe = VoiceEngine::Create();
+//  VoEBase* base = VoEBase::GetInterface(voe);
+//  VoENetwork* netw  = VoENetwork::GetInterface(voe);
+//  base->Init();
+//  int ch = base->CreateChannel();
+//  ...
+//  netw->SetPeriodicDeadOrAliveStatus(ch, true);
+//  ...
+//  base->DeleteChannel(ch);
+//  base->Terminate();
+//  base->Release();
+//  netw->Release();
+//  VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_NETWORK_H
+#define WEBRTC_VOICE_ENGINE_VOE_NETWORK_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+// VoEConnectionObserver
+class WEBRTC_DLLEXPORT VoEConnectionObserver
+{
+public:
+    // This method will be called peridically and deliver dead-or-alive
+    // notifications for a specified |channel| when the observer interface
+    // has been installed and activated.
+    virtual void OnPeriodicDeadOrAlive(const int channel, const bool alive) = 0;
+
+protected:
+    virtual ~VoEConnectionObserver() {}
+};
+
+// VoENetwork
+class WEBRTC_DLLEXPORT VoENetwork
+{
+public:
+    // Factory for the VoENetwork sub-API. Increases an internal
+    // reference counter if successful. Returns NULL if the API is not
+    // supported or if construction fails.
+    static VoENetwork* GetInterface(VoiceEngine* voiceEngine);
+
+    // Releases the VoENetwork sub-API and decreases an internal
+    // reference counter. Returns the new reference count. This value should
+    // be zero for all sub-API:s before the VoiceEngine object can be safely
+    // deleted.
+    virtual int Release() = 0;
+
+    // Installs and enables a user-defined external transport protocol for a
+    // specified |channel|.
+    virtual int RegisterExternalTransport(
+        int channel, Transport& transport) = 0;
+
+    // Removes and disables a user-defined external transport protocol for a
+    // specified |channel|.
+    virtual int DeRegisterExternalTransport(int channel) = 0;
+
+    // The packets received from the network should be passed to this
+    // function when external transport is enabled. Note that the data
+    // including the RTP-header must also be given to the VoiceEngine.
+    virtual int ReceivedRTPPacket(
+        int channel, const void* data, unsigned int length) = 0;
+
+    // The packets received from the network should be passed to this
+    // function when external transport is enabled. Note that the data
+    // including the RTCP-header must also be given to the VoiceEngine.
+    virtual int ReceivedRTCPPacket(
+        int channel, const void* data, unsigned int length) = 0;
+
+    // Gets the source ports and IP address of incoming packets on a
+    // specific |channel|.
+    virtual int GetSourceInfo(
+        int channel, int& rtpPort, int& rtcpPort, char ipAddr[64]) = 0;
+
+    // Gets the local (host) IP address.
+    virtual int GetLocalIP(char ipAddr[64], bool ipv6 = false) = 0;
+
+    // Enables IPv6 for a specified |channel|.
+    virtual int EnableIPv6(int channel) = 0;
+
+    // Gets the current IPv6 staus for a specified |channel|.
+    virtual bool IPv6IsEnabled(int channel) = 0;
+
+    // Enables a port and IP address filter for incoming packets on a
+    // specific |channel|.
+    virtual int SetSourceFilter(int channel,
+        int rtpPort, int rtcpPort = 0, const char ipAddr[64] = 0) = 0;
+
+    // Gets the current port and IP-address filter for a specified |channel|.
+    virtual int GetSourceFilter(
+        int channel, int& rtpPort, int& rtcpPort, char ipAddr[64]) = 0;
+
+    // Sets the six-bit Differentiated Services Code Point (DSCP) in the
+    // IP header of the outgoing stream for a specific |channel|.
+    virtual int SetSendTOS(int channel,
+        int DSCP, int priority = -1, bool useSetSockopt = false) = 0;
+
+    // Gets the six-bit DSCP in the IP header of the outgoing stream for
+    // a specific channel.
+    virtual int GetSendTOS(
+        int channel, int& DSCP, int& priority, bool& useSetSockopt) = 0;
+
+    // Sets the Generic Quality of Service (GQoS) service level.
+    // The Windows operating system then maps to a Differentiated Services
+    // Code Point (DSCP) and to an 802.1p setting. [Windows only]
+    virtual int SetSendGQoS(
+        int channel, bool enable, int serviceType, int overrideDSCP = 0) = 0;
+
+    // Gets the Generic Quality of Service (GQoS) service level.
+    virtual int GetSendGQoS(
+        int channel, bool& enabled, int& serviceType, int& overrideDSCP) = 0;
+
+    // Enables or disables warnings that report if packets have not been
+    // received in |timeoutSeconds| seconds for a specific |channel|.
+    virtual int SetPacketTimeoutNotification(
+        int channel, bool enable, int timeoutSeconds = 2) = 0;
+
+    // Gets the current time-out notification status.
+    virtual int GetPacketTimeoutNotification(
+        int channel, bool& enabled, int& timeoutSeconds) = 0;
+
+    // Installs the observer class implementation for a specified |channel|.
+    virtual int RegisterDeadOrAliveObserver(
+        int channel, VoEConnectionObserver& observer) = 0;
+
+    // Removes the observer class implementation for a specified |channel|.
+    virtual int DeRegisterDeadOrAliveObserver(int channel) = 0;
+
+    // Enables or disables the periodic dead-or-alive callback functionality
+    // for a specified |channel|.
+    virtual int SetPeriodicDeadOrAliveStatus(
+        int channel, bool enable, int sampleTimeSeconds = 2) = 0;
+
+    // Gets the current dead-or-alive notification status.
+    virtual int GetPeriodicDeadOrAliveStatus(
+        int channel, bool& enabled, int& sampleTimeSeconds) = 0;
+
+    // Handles sending a raw UDP data packet over an existing RTP or RTCP
+    // socket.
+    virtual int SendUDPPacket(
+        int channel, const void* data, unsigned int length,
+        int& transmittedBytes, bool useRtcpSocket = false) = 0;
+
+protected:
+    VoENetwork() {}
+    virtual ~VoENetwork() {}
+};
+
+} // namespace webrtc
+
+#endif  //  WEBRTC_VOICE_ENGINE_VOE_NETWORK_H
diff --git a/voice_engine/main/interface/voe_rtp_rtcp.h b/voice_engine/main/interface/voe_rtp_rtcp.h
new file mode 100644
index 0000000..e26d85f
--- /dev/null
+++ b/voice_engine/main/interface/voe_rtp_rtcp.h
@@ -0,0 +1,234 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+//  - Callbacks for RTP and RTCP events such as modified SSRC or CSRC.
+//  - SSRC handling.
+//  - Transmission of RTCP sender reports.
+//  - Obtaining RTCP data from incoming RTCP sender reports.
+//  - RTP and RTCP statistics (jitter, packet loss, RTT etc.).
+//  - Forward Error Correction (FEC).
+//  - RTP Keepalive for maintaining the NAT mappings associated to RTP flows.
+//  - Writing RTP and RTCP packets to binary files for off-line analysis of
+//    the call quality.
+//  - Inserting extra RTP packets into active audio stream.
+//
+// Usage example, omitting error checking:
+//
+//  using namespace webrtc;
+//  VoiceEngine* voe = VoiceEngine::Create();
+//  VoEBase* base = VoEBase::GetInterface(voe);
+//  VoERTP_RTCP* rtp_rtcp  = VoERTP_RTCP::GetInterface(voe);
+//  base->Init();
+//  int ch = base->CreateChannel();
+//  ...
+//  rtp_rtcp->SetLocalSSRC(ch, 12345);
+//  ...
+//  base->DeleteChannel(ch);
+//  base->Terminate();
+//  base->Release();
+//  rtp_rtcp->Release();
+//  VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_RTP_RTCP_H
+#define WEBRTC_VOICE_ENGINE_VOE_RTP_RTCP_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+// VoERTPObserver
+class WEBRTC_DLLEXPORT VoERTPObserver
+{
+public:
+    virtual void OnIncomingCSRCChanged(
+        const int channel, const unsigned int CSRC, const bool added) = 0;
+
+    virtual void OnIncomingSSRCChanged(
+        const int channel, const unsigned int SSRC) = 0;
+
+protected:
+    virtual ~VoERTPObserver() {}
+};
+
+// VoERTCPObserver
+class WEBRTC_DLLEXPORT VoERTCPObserver
+{
+public:
+    virtual void OnApplicationDataReceived(
+        const int channel, const unsigned char subType,
+        const unsigned int name, const unsigned char* data,
+        const unsigned short dataLengthInBytes) = 0;
+
+protected:
+    virtual ~VoERTCPObserver() {}
+};
+
+// CallStatistics
+struct CallStatistics
+{
+    unsigned short fractionLost;
+    unsigned int cumulativeLost;
+    unsigned int extendedMax;
+    unsigned int jitterSamples;
+    int rttMs;
+    int bytesSent;
+    int packetsSent;
+    int bytesReceived;
+    int packetsReceived;
+};
+
+// VoERTP_RTCP
+class WEBRTC_DLLEXPORT VoERTP_RTCP
+{
+public:
+
+    // Factory for the VoERTP_RTCP sub-API. Increases an internal
+    // reference counter if successful. Returns NULL if the API is not
+    // supported or if construction fails.
+    static VoERTP_RTCP* GetInterface(VoiceEngine* voiceEngine);
+
+    // Releases the VoERTP_RTCP sub-API and decreases an internal
+    // reference counter. Returns the new reference count. This value should
+    // be zero for all sub-API:s before the VoiceEngine object can be safely
+    // deleted.
+    virtual int Release() = 0;
+
+    // Registers an instance of a VoERTPObserver derived class for a specified
+    // |channel|. It will allow the user to observe callbacks related to the
+    // RTP protocol such as changes in the incoming SSRC.
+    virtual int RegisterRTPObserver(int channel, VoERTPObserver& observer) = 0;
+
+    // Deregisters an instance of a VoERTPObserver derived class for a
+    // specified |channel|.
+    virtual int DeRegisterRTPObserver(int channel) = 0;
+
+    // Registers an instance of a VoERTCPObserver derived class for a specified
+    // |channel|.
+    virtual int RegisterRTCPObserver(
+        int channel, VoERTCPObserver& observer) = 0;
+
+    // Deregisters an instance of a VoERTCPObserver derived class for a
+    // specified |channel|.
+    virtual int DeRegisterRTCPObserver(int channel) = 0;
+
+    // Sets the local RTP synchronization source identifier (SSRC) explicitly.
+    virtual int SetLocalSSRC(int channel, unsigned int ssrc) = 0;
+
+    // Gets the local RTP SSRC of a specified |channel|.
+    virtual int GetLocalSSRC(int channel, unsigned int& ssrc) = 0;
+
+    // Gets the SSRC of the incoming RTP packets.
+    virtual int GetRemoteSSRC(int channel, unsigned int& ssrc) = 0;
+
+    // Sets the status of rtp-audio-level-indication on a specific |channel|.
+    virtual int SetRTPAudioLevelIndicationStatus(
+        int channel, bool enable, unsigned char ID = 1) = 0;
+
+    // Sets the status of rtp-audio-level-indication on a specific |channel|.
+    virtual int GetRTPAudioLevelIndicationStatus(
+        int channel, bool& enabled, unsigned char& ID) = 0;
+
+    // Gets the CSRCs of the incoming RTP packets.
+    virtual int GetRemoteCSRCs(int channel, unsigned int arrCSRC[15]) = 0;
+
+    // Sets the RTCP status on a specific |channel|.
+    virtual int SetRTCPStatus(int channel, bool enable) = 0;
+
+    // Gets the RTCP status on a specific |channel|.
+    virtual int GetRTCPStatus(int channel, bool& enabled) = 0;
+
+    // Sets the canonical name (CNAME) parameter for RTCP reports on a
+    // specific |channel|.
+    virtual int SetRTCP_CNAME(int channel, const char cName[256]) = 0;
+
+    // Gets the canonical name (CNAME) parameter for RTCP reports on a
+    // specific |channel|.
+    virtual int GetRTCP_CNAME(int channel, char cName[256]) = 0;
+
+    // Gets the canonical name (CNAME) parameter for incoming RTCP reports
+    // on a specific channel.
+    virtual int GetRemoteRTCP_CNAME(int channel, char cName[256]) = 0;
+
+    // Gets RTCP data from incoming RTCP Sender Reports.
+    virtual int GetRemoteRTCPData(
+        int channel, unsigned int& NTPHigh, unsigned int& NTPLow,
+        unsigned int& timestamp, unsigned int& playoutTimestamp,
+        unsigned int* jitter = NULL, unsigned short* fractionLost = NULL) = 0;
+
+    // Gets RTP statistics for a specific |channel|.
+    virtual int GetRTPStatistics(
+        int channel, unsigned int& averageJitterMs, unsigned int& maxJitterMs,
+        unsigned int& discardedPackets) = 0;
+
+    // Gets RTCP statistics for a specific |channel|.
+    virtual int GetRTCPStatistics(int channel, CallStatistics& stats) = 0;
+
+    // Sends an RTCP APP packet on a specific |channel|.
+    virtual int SendApplicationDefinedRTCPPacket(
+        int channel, const unsigned char subType, unsigned int name,
+        const char* data, unsigned short dataLengthInBytes) = 0;
+
+    // Sets the Forward Error Correction (FEC) status on a specific |channel|.
+    virtual int SetFECStatus(
+        int channel, bool enable, int redPayloadtype = -1) = 0;
+
+    // Gets the FEC status on a specific |channel|.
+    virtual int GetFECStatus(
+        int channel, bool& enabled, int& redPayloadtype) = 0;
+
+    // Sets the RTP keepalive mechanism status.
+    // This functionality can maintain an existing Network Address Translator
+    // (NAT) mapping while regular RTP is no longer transmitted.
+    virtual int SetRTPKeepaliveStatus(
+        int channel, bool enable, unsigned char unknownPayloadType,
+        int deltaTransmitTimeSeconds = 15) = 0;
+
+    // Gets the RTP keepalive mechanism status.
+    virtual int GetRTPKeepaliveStatus(
+        int channel, bool& enabled, unsigned char& unknownPayloadType,
+        int& deltaTransmitTimeSeconds) = 0;
+
+    // Enables capturing of RTP packets to a binary file on a specific
+    // |channel| and for a given |direction|. The file can later be replayed
+    // using e.g. RTP Tools’ rtpplay since the binary file format is
+    // compatible with the rtpdump format.
+    virtual int StartRTPDump(
+        int channel, const char fileNameUTF8[1024],
+        RTPDirections direction = kRtpIncoming) = 0;
+
+    // Disables capturing of RTP packets to a binary file on a specific
+    // |channel| and for a given |direction|.
+    virtual int StopRTPDump(
+        int channel, RTPDirections direction = kRtpIncoming) = 0;
+
+    // Gets the the current RTP capturing state for the specified
+    // |channel| and |direction|.
+    virtual int RTPDumpIsActive(
+        int channel, RTPDirections direction = kRtpIncoming) = 0;
+
+    // Sends an extra RTP packet using an existing/active RTP session.
+    // It is possible to set the payload type, marker bit and payload
+    // of the extra RTP
+    virtual int InsertExtraRTPPacket(
+        int channel, unsigned char payloadType, bool markerBit,
+        const char* payloadData, unsigned short payloadSize) = 0;
+
+protected:
+    VoERTP_RTCP() {}
+    virtual ~VoERTP_RTCP() {}
+};
+
+}  // namespace webrtc
+
+#endif  // #ifndef WEBRTC_VOICE_ENGINE_VOE_RTP_RTCP_H
diff --git a/voice_engine/main/interface/voe_video_sync.h b/voice_engine/main/interface/voe_video_sync.h
new file mode 100644
index 0000000..ac3b84a
--- /dev/null
+++ b/voice_engine/main/interface/voe_video_sync.h
@@ -0,0 +1,85 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+//  - RTP header modification (time stamp and sequence number fields).
+//  - Playout delay tuning to synchronize the voice with video.
+//  - Playout delay monitoring.
+//
+// Usage example, omitting error checking:
+//
+//  using namespace webrtc;
+//  VoiceEngine* voe = VoiceEngine::Create();
+//  VoEBase* base = VoEBase::GetInterface(voe);
+//  VoEVideoSync* vsync  = VoEVideoSync::GetInterface(voe);
+//  base->Init();
+//  ...
+//  int buffer_ms(0);
+//  vsync->GetPlayoutBufferSize(buffer_ms);
+//  ...
+//  base->Terminate();
+//  base->Release();
+//  vsync->Release();
+//  VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_VIDEO_SYNC_H
+#define WEBRTC_VOICE_ENGINE_VOE_VIDEO_SYNC_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class RtpRtcp;
+class VoiceEngine;
+
+class WEBRTC_DLLEXPORT VoEVideoSync
+{
+public:
+    // Factory for the VoEVideoSync sub-API. Increases an internal
+    // reference counter if successful. Returns NULL if the API is not
+    // supported or if construction fails.
+    static VoEVideoSync* GetInterface(VoiceEngine* voiceEngine);
+
+    // Releases the VoEVideoSync sub-API and decreases an internal
+    // reference counter. Returns the new reference count. This value should
+    // be zero for all sub-API:s before the VoiceEngine object can be safely
+    // deleted.
+    virtual int Release() = 0;
+
+    // Gets the current sound card buffer size (playout delay).
+    virtual int GetPlayoutBufferSize(int& bufferMs) = 0;
+
+    // Sets an additional delay for the playout jitter buffer.
+    virtual int SetMinimumPlayoutDelay(int channel, int delayMs) = 0;
+
+    // Gets the sum of the algorithmic delay, jitter buffer delay, and the
+    // playout buffer delay for a specified |channel|.
+    virtual int GetDelayEstimate(int channel, int& delayMs) = 0;
+
+    // Manual initialization of the RTP timestamp.
+    virtual int SetInitTimestamp(int channel, unsigned int timestamp) = 0;
+
+    // Manual initialization of the RTP sequence number.
+    virtual int SetInitSequenceNumber(int channel, short sequenceNumber) = 0;
+
+    // Get the received RTP timestamp
+    virtual int GetPlayoutTimestamp(int channel, unsigned int& timestamp) = 0;
+
+    virtual int GetRtpRtcp (int channel, RtpRtcp* &rtpRtcpModule) = 0;
+
+protected:
+    VoEVideoSync() { }
+    virtual ~VoEVideoSync() { }
+};
+
+}   // namespace webrtc
+
+#endif  // #ifndef WEBRTC_VOICE_ENGINE_VOE_VIDEO_SYNC_H
diff --git a/voice_engine/main/interface/voe_volume_control.h b/voice_engine/main/interface/voe_volume_control.h
new file mode 100644
index 0000000..6d64e96
--- /dev/null
+++ b/voice_engine/main/interface/voe_volume_control.h
@@ -0,0 +1,127 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+//  - Speaker volume controls.
+//  - Microphone volume control.
+//  - Non-linear speech level control.
+//  - Mute functions.
+//  - Additional stereo scaling methods.
+//
+// Usage example, omitting error checking:
+//
+//  using namespace webrtc;
+//  VoiceEngine* voe = VoiceEngine::Create();
+//  VoEBase* base = VoEBase::GetInterface(voe);
+//  VoEVolumeControl* volume  = VoEVolumeControl::GetInterface(voe);
+//  base->Init();
+//  int ch = base->CreateChannel();
+//  ...
+//  volume->SetInputMute(ch, true);
+//  ...
+//  base->DeleteChannel(ch);
+//  base->Terminate();
+//  base->Release();
+//  volume->Release();
+//  VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_H
+#define WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+class WEBRTC_DLLEXPORT VoEVolumeControl
+{
+public:
+    // Factory for the VoEVolumeControl sub-API. Increases an internal
+    // reference counter if successful. Returns NULL if the API is not
+    // supported or if construction fails.
+    static VoEVolumeControl* GetInterface(VoiceEngine* voiceEngine);
+
+    // Releases the VoEVolumeControl sub-API and decreases an internal
+    // reference counter. Returns the new reference count. This value should
+    // be zero for all sub-API:s before the VoiceEngine object can be safely
+    // deleted.
+    virtual int Release() = 0;
+
+    // Sets the speaker |volume| level. Valid range is [0,255].
+    virtual int SetSpeakerVolume(unsigned int volume) = 0;
+
+    // Gets the speaker |volume| level.
+    virtual int GetSpeakerVolume(unsigned int& volume) = 0;
+
+    // Mutes the speaker device completely in the operating system.
+    virtual int SetSystemOutputMute(bool enable) = 0;
+
+    // Gets the output device mute state in the operating system.
+    virtual int GetSystemOutputMute(bool &enabled) = 0;
+
+    // Sets the microphone volume level. Valid range is [0,255].
+    virtual int SetMicVolume(unsigned int volume) = 0;
+
+    // Gets the microphone volume level.
+    virtual int GetMicVolume(unsigned int& volume) = 0;
+
+    // Mutes the microphone input signal completely without affecting
+    // the audio device volume.
+    virtual int SetInputMute(int channel, bool enable) = 0;
+
+    // Gets the current microphone input mute state.
+    virtual int GetInputMute(int channel, bool& enabled) = 0;
+
+    // Mutes the microphone device completely in the operating system.
+    virtual int SetSystemInputMute(bool enable) = 0;
+
+    // Gets the mute state of the input device in the operating system.
+    virtual int GetSystemInputMute(bool& enabled) = 0;
+
+    // Gets the microphone speech |level|, mapped non-linearly to the range
+    // [0,9].
+    virtual int GetSpeechInputLevel(unsigned int& level) = 0;
+
+    // Gets the speaker speech |level|, mapped non-linearly to the range
+    // [0,9].
+    virtual int GetSpeechOutputLevel(int channel, unsigned int& level) = 0;
+
+    // Gets the microphone speech |level|, mapped linearly to the range
+    // [0,32768].
+    virtual int GetSpeechInputLevelFullRange(unsigned int& level) = 0;
+
+    // Gets the speaker speech |level|, mapped linearly to the range [0,32768].
+    virtual int GetSpeechOutputLevelFullRange(
+        int channel, unsigned int& level) = 0;
+
+    // Sets a volume |scaling| applied to the outgoing signal of a specific
+    // channel. Valid scale range is [0.0, 10.0].
+    virtual int SetChannelOutputVolumeScaling(int channel, float scaling) = 0;
+
+    // Gets the current volume scaling for a specified |channel|.
+    virtual int GetChannelOutputVolumeScaling(int channel, float& scaling) = 0;
+
+    // Scales volume of the |left| and |right| channels independently.
+    // Valid scale range is [0.0, 1.0].
+    virtual int SetOutputVolumePan(int channel, float left, float right) = 0;
+
+    // Gets the current left and right scaling factors.
+    virtual int GetOutputVolumePan(int channel, float& left, float& right) = 0;
+
+protected:
+    VoEVolumeControl() {};
+    virtual ~VoEVolumeControl() {};
+};
+
+}  // namespace webrtc
+
+#endif  // #ifndef WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_H
diff --git a/voice_engine/main/source/audio_frame_operations.cc b/voice_engine/main/source/audio_frame_operations.cc
new file mode 100644
index 0000000..e08d0a2
--- /dev/null
+++ b/voice_engine/main/source/audio_frame_operations.cc
@@ -0,0 +1,129 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio_frame_operations.h"
+#include "module_common_types.h"
+
+namespace webrtc {
+
+namespace voe {
+
+WebRtc_Word32 
+AudioFrameOperations::MonoToStereo(AudioFrame& audioFrame)
+{
+    if (audioFrame._audioChannel != 1)
+    {
+        return -1;
+    }
+    if ((audioFrame._payloadDataLengthInSamples << 1) >=
+        AudioFrame::kMaxAudioFrameSizeSamples)
+    {
+        // not enough memory to expand from mono to stereo
+        return -1;
+    }
+
+    WebRtc_Word16* payloadCopy =
+        new WebRtc_Word16[audioFrame._payloadDataLengthInSamples];
+    memcpy(payloadCopy, audioFrame._payloadData,
+           sizeof(WebRtc_Word16)*audioFrame._payloadDataLengthInSamples);
+
+    for (int i = 0; i < audioFrame._payloadDataLengthInSamples; i++)
+    {
+        audioFrame._payloadData[2*i]   = payloadCopy[i];
+        audioFrame._payloadData[2*i+1] = payloadCopy[i];
+    }
+
+    audioFrame._audioChannel = 2;
+
+    delete [] payloadCopy;
+    return 0;
+}
+
+WebRtc_Word32 
+AudioFrameOperations::StereoToMono(AudioFrame& audioFrame)
+{
+    if (audioFrame._audioChannel != 2)
+    {
+        return -1;
+    }
+
+    for (int i = 0; i < audioFrame._payloadDataLengthInSamples; i++)
+    {
+        audioFrame._payloadData[i] = (audioFrame._payloadData[2*i] >> 1) +
+            (audioFrame._payloadData[2*i+1] >> 1);
+    }
+
+    audioFrame._audioChannel = 1;
+
+    return 0;
+}
+
+WebRtc_Word32 
+AudioFrameOperations::Mute(AudioFrame& audioFrame)
+{
+    const int sizeInBytes = sizeof(WebRtc_Word16) *
+        audioFrame._payloadDataLengthInSamples * audioFrame._audioChannel;
+    memset(audioFrame._payloadData, 0, sizeInBytes);
+    audioFrame._energy = 0;
+    return 0;
+}
+
+WebRtc_Word32 
+AudioFrameOperations::Scale(const float left,
+                            const float right,
+                            AudioFrame& audioFrame)
+{
+    if (audioFrame._audioChannel == 1)
+    {
+        assert(false);
+        return -1;
+    }
+
+    for (int i = 0; i < audioFrame._payloadDataLengthInSamples; i++)
+    {
+        audioFrame._payloadData[2*i] =
+            (WebRtc_Word16)(left*audioFrame._payloadData[2*i]);
+        audioFrame._payloadData[2*i+1] =
+            (WebRtc_Word16)(right*audioFrame._payloadData[2*i+1]);
+    }
+    return 0;
+}
+
+WebRtc_Word32 
+AudioFrameOperations::ScaleWithSat(const float scale, AudioFrame& audioFrame)
+{
+    WebRtc_Word32 tmp(0);
+
+    // Ensure that the output result is saturated [-32768, +32768].
+    for (int i = 0;
+        i < audioFrame._payloadDataLengthInSamples * audioFrame._audioChannel;
+        i++)
+    {
+        tmp = static_cast<WebRtc_Word32> (scale * audioFrame._payloadData[i]);
+        if (tmp < -32768)
+        {
+            audioFrame._payloadData[i] = -32768;
+        }
+        else if (tmp > 32767)
+        {
+            audioFrame._payloadData[i] = 32767;
+        }
+        else
+        {
+            audioFrame._payloadData[i] = static_cast<WebRtc_Word16> (tmp);
+        }
+    }
+    return 0;
+}
+
+}  //  namespace voe
+
+}  //  namespace webrtc
+
diff --git a/voice_engine/main/source/audio_frame_operations.h b/voice_engine/main/source/audio_frame_operations.h
new file mode 100644
index 0000000..368850b
--- /dev/null
+++ b/voice_engine/main/source/audio_frame_operations.h
@@ -0,0 +1,43 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_AUDIO_FRAME_OPERATIONS_H
+#define WEBRTC_VOICE_ENGINE_AUDIO_FRAME_OPERATIONS_H
+
+#include "typedefs.h"
+
+namespace webrtc {
+
+class AudioFrame;
+
+namespace voe {
+
+class AudioFrameOperations
+{
+public:
+    static WebRtc_Word32 MonoToStereo(AudioFrame& audioFrame);
+
+    static WebRtc_Word32 StereoToMono(AudioFrame& audioFrame);
+
+    static WebRtc_Word32 Mute(AudioFrame& audioFrame);
+
+    static WebRtc_Word32 Scale(const float left,
+                               const float right,
+                               AudioFrame& audioFrame);
+
+    static WebRtc_Word32 ScaleWithSat(const float scale,
+                                      AudioFrame& audioFrame);
+};
+
+}  //  namespace voe
+
+}  //  namespace webrtc
+
+#endif  // #ifndef WEBRTC_VOICE_ENGINE_AUDIO_FRAME_OPERATIONS_H
diff --git a/voice_engine/main/source/channel.cc b/voice_engine/main/source/channel.cc
new file mode 100644
index 0000000..608f744
--- /dev/null
+++ b/voice_engine/main/source/channel.cc
@@ -0,0 +1,6656 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "channel.h"
+
+#include "audio_device.h"
+#include "audio_frame_operations.h"
+#include "audio_processing.h"
+#include "critical_section_wrapper.h"
+#include "output_mixer.h"
+#include "process_thread.h"
+#include "rtp_dump.h"
+#include "statistics.h"
+#include "trace.h"
+#include "transmit_mixer.h"
+#include "utility.h"
+#include "voe_base.h"
+#include "voe_external_media.h"
+#include "voe_rtp_rtcp.h" 
+
+#if defined(_WIN32)
+#include <Qos.h>
+#endif
+
+namespace webrtc
+{
+
+namespace voe
+{
+
+WebRtc_Word32
+Channel::SendData(FrameType frameType,
+                  WebRtc_UWord8   payloadType,
+                  WebRtc_UWord32  timeStamp,
+                  const WebRtc_UWord8*  payloadData,
+                  WebRtc_UWord16  payloadSize,
+                  const RTPFragmentationHeader* fragmentation)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SendData(frameType=%u, payloadType=%u, timeStamp=%u,"
+                 " payloadSize=%u, fragmentation=0x%x)",
+                 frameType, payloadType, timeStamp, payloadSize, fragmentation);
+
+    if (_includeAudioLevelIndication)
+    {
+        // Store current audio level in the RTP/RTCP module.
+        // The level will be used in combination with voice-activity state
+        // (frameType) to add an RTP header extension
+        _rtpRtcpModule.SetAudioLevel(_audioLevel_dBov);
+    }
+
+    // Push data from ACM to RTP/RTCP-module to deliver audio frame for
+    // packetization.
+    // This call will trigger Transport::SendPacket() from the RTP/RTCP module.
+    if (_rtpRtcpModule.SendOutgoingData((FrameType&)frameType,
+                                        payloadType,
+                                        timeStamp,
+                                        payloadData,
+                                        payloadSize,
+                                        fragmentation) == -1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_RTP_RTCP_MODULE_ERROR, kTraceWarning,
+            "Channel::SendData() failed to send data to RTP/RTCP module");
+        return -1;
+    }
+
+    _lastLocalTimeStamp = timeStamp;
+    _lastPayloadType = payloadType;
+
+    return 0;
+}
+
+WebRtc_Word32
+Channel::InFrameType(WebRtc_Word16 frameType)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::InFrameType(frameType=%d)", frameType);
+
+    CriticalSectionScoped cs(_callbackCritSect);
+    // 1 indicates speech
+    _sendFrameType = (frameType == 1) ? 1 : 0;
+    return 0;
+}
+
+#ifdef WEBRTC_DTMF_DETECTION
+int
+Channel::IncomingDtmf(const WebRtc_UWord8 digitDtmf, const bool end)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+               "Channel::IncomingDtmf(digitDtmf=%u, end=%d)",
+               digitDtmf, end);
+
+    if (digitDtmf != 999)
+    {
+        CriticalSectionScoped cs(_callbackCritSect);
+        if (_telephoneEventDetectionPtr)
+        {
+            _telephoneEventDetectionPtr->OnReceivedTelephoneEventInband(
+                _channelId, digitDtmf, end);
+        }
+    }
+
+    return 0;
+}
+#endif
+
+WebRtc_Word32
+Channel::OnRxVadDetected(const int vadDecision)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+                 "Channel::OnRxVadDetected(vadDecision=%d)", vadDecision);
+
+    CriticalSectionScoped cs(_callbackCritSect);
+    if (_rxVadObserverPtr)
+    {
+        _rxVadObserverPtr->OnRxVad(_channelId, vadDecision);
+    }
+
+    return 0;
+}
+
+int
+Channel::SendPacket(int channel, const void *data, int len)
+{
+    channel = VoEChannelId(channel);
+    assert(channel == _channelId);
+
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SendPacket(channel=%d, len=%d)", channel, len);
+
+    if (_transportPtr == NULL)
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
+                     "Channel::SendPacket() failed to send RTP packet due to"
+                     " invalid transport object");
+        return -1;
+    }
+
+    // Insert extra RTP packet using if user has called the InsertExtraRTPPacket
+    // API
+    if (_insertExtraRTPPacket)
+    {
+        WebRtc_UWord8* rtpHdr = (WebRtc_UWord8*)data;
+        WebRtc_UWord8 M_PT(0);
+        if (_extraMarkerBit)
+        {
+            M_PT = 0x80;            // set the M-bit
+        }
+        M_PT += _extraPayloadType;  // set the payload type
+        *(++rtpHdr) = M_PT;     // modify the M|PT-byte within the RTP header
+        _insertExtraRTPPacket = false;  // insert one packet only
+    }
+
+    WebRtc_UWord8* bufferToSendPtr = (WebRtc_UWord8*)data;
+    WebRtc_Word32 bufferLength = len;
+
+    // Dump the RTP packet to a file (if RTP dump is enabled).
+    if (_rtpDumpOut.DumpPacket((const WebRtc_UWord8*)data, len) == -1)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                     VoEId(_instanceId,_channelId),
+                     "Channel::SendPacket() RTP dump to output file failed");
+    }
+
+    // SRTP or External encryption
+    if (_encrypting)
+    {
+        CriticalSectionScoped cs(_callbackCritSect);
+
+        if (_encryptionPtr)
+        {
+            if (!_encryptionRTPBufferPtr)
+            {
+                // Allocate memory for encryption buffer one time only
+                _encryptionRTPBufferPtr =
+                    new WebRtc_UWord8[kVoiceEngineMaxIpPacketSizeBytes];
+            }
+
+            // Perform encryption (SRTP or external)
+            WebRtc_Word32 encryptedBufferLength = 0;
+            _encryptionPtr->encrypt(_channelId,
+                                    bufferToSendPtr,
+                                    _encryptionRTPBufferPtr,
+                                    bufferLength,
+                                    (int*)&encryptedBufferLength);
+            if (encryptedBufferLength <= 0)
+            {
+                _engineStatisticsPtr->SetLastError(
+                    VE_ENCRYPTION_FAILED,
+                    kTraceError, "Channel::SendPacket() encryption failed");
+                return -1;
+            }
+
+            // Replace default data buffer with encrypted buffer
+            bufferToSendPtr = _encryptionRTPBufferPtr;
+            bufferLength = encryptedBufferLength;
+        }
+    }
+
+    // Packet transmission using WebRtc socket transport
+    if (!_externalTransport)
+    {
+        int n = _transportPtr->SendPacket(channel, bufferToSendPtr,
+                                          bufferLength);
+        if (n < 0)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice,
+                         VoEId(_instanceId,_channelId),
+                         "Channel::SendPacket() RTP transmission using WebRtc"
+                         " sockets failed");
+            return -1;
+        }
+        return n;
+    }
+
+    // Packet transmission using external transport transport
+    {
+        CriticalSectionScoped cs(_callbackCritSect);
+
+        int n = _transportPtr->SendPacket(channel,
+                                          bufferToSendPtr,
+                                          bufferLength);
+        if (n < 0)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice,
+                         VoEId(_instanceId,_channelId),
+                         "Channel::SendPacket() RTP transmission using external"
+                         " transport failed");
+            return -1;
+        }
+        return n;
+    }
+}
+
+int
+Channel::SendRTCPPacket(int channel, const void *data, int len)
+{
+    channel = VoEChannelId(channel);
+    assert(channel == _channelId);
+
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SendRTCPPacket(channel=%d, len=%d)", channel, len);
+
+    if (_transportPtr == NULL)
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice,
+                     VoEId(_instanceId,_channelId),
+                     "Channel::SendRTCPPacket() failed to send RTCP packet"
+                     " due to invalid transport object");
+        return -1;
+    }
+
+    WebRtc_UWord8* bufferToSendPtr = (WebRtc_UWord8*)data;
+    WebRtc_Word32 bufferLength = len;
+
+    // Dump the RTCP packet to a file (if RTP dump is enabled).
+    if (_rtpDumpOut.DumpPacket((const WebRtc_UWord8*)data, len) == -1)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                     VoEId(_instanceId,_channelId),
+                     "Channel::SendPacket() RTCP dump to output file failed");
+    }
+
+    // SRTP or External encryption
+    if (_encrypting)
+    {
+        CriticalSectionScoped cs(_callbackCritSect);
+
+        if (_encryptionPtr)
+        {
+            if (!_encryptionRTCPBufferPtr)
+            {
+                // Allocate memory for encryption buffer one time only
+                _encryptionRTCPBufferPtr =
+                    new WebRtc_UWord8[kVoiceEngineMaxIpPacketSizeBytes];
+            }
+
+            // Perform encryption (SRTP or external).
+            WebRtc_Word32 encryptedBufferLength = 0;
+            _encryptionPtr->encrypt_rtcp(_channelId,
+                                         bufferToSendPtr,
+                                         _encryptionRTCPBufferPtr,
+                                         bufferLength,
+                                         (int*)&encryptedBufferLength);
+            if (encryptedBufferLength <= 0)
+            {
+                _engineStatisticsPtr->SetLastError(
+                    VE_ENCRYPTION_FAILED, kTraceError,
+                    "Channel::SendRTCPPacket() encryption failed");
+                return -1;
+            }
+
+            // Replace default data buffer with encrypted buffer
+            bufferToSendPtr = _encryptionRTCPBufferPtr;
+            bufferLength = encryptedBufferLength;
+        }
+    }
+
+    // Packet transmission using WebRtc socket transport
+    if (!_externalTransport)
+    {
+        int n = _transportPtr->SendRTCPPacket(channel,
+                                              bufferToSendPtr,
+                                              bufferLength);
+        if (n < 0)
+        {
+            WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+                         VoEId(_instanceId,_channelId),
+                         "Channel::SendRTCPPacket() transmission using WebRtc"
+                         " sockets failed");
+            return -1;
+        }
+        return n;
+    }
+
+    // Packet transmission using external transport transport
+    {
+        CriticalSectionScoped cs(_callbackCritSect);
+
+        int n = _transportPtr->SendRTCPPacket(channel,
+                                              bufferToSendPtr,
+                                              bufferLength);
+        if (n < 0)
+        {
+            WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+                         VoEId(_instanceId,_channelId),
+                         "Channel::SendRTCPPacket() transmission using external"
+                         " transport failed");
+            return -1;
+        }
+        return n;
+    }
+
+    return len;
+}
+
+void
+Channel::IncomingRTPPacket(const WebRtc_Word8* incomingRtpPacket,
+                           const WebRtc_Word32 rtpPacketLength,
+                           const WebRtc_Word8* fromIP,
+                           const WebRtc_UWord16 fromPort)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::IncomingRTPPacket(rtpPacketLength=%d,"
+                 " fromIP=%s, fromPort=%u)",
+                 rtpPacketLength, fromIP, fromPort);
+
+    // Store playout timestamp for the received RTP packet
+    // to be used for upcoming delay estimations
+    WebRtc_UWord32 playoutTimestamp(0);
+    if (GetPlayoutTimeStamp(playoutTimestamp) == 0)
+    {
+        _playoutTimeStampRTP = playoutTimestamp;
+    }
+
+    WebRtc_UWord8* rtpBufferPtr = (WebRtc_UWord8*)incomingRtpPacket;
+    WebRtc_Word32 rtpBufferLength = rtpPacketLength;
+
+    // SRTP or External decryption
+    if (_decrypting)
+    {
+        CriticalSectionScoped cs(_callbackCritSect);
+
+        if (_encryptionPtr)
+        {
+            if (!_decryptionRTPBufferPtr)
+            {
+                // Allocate memory for decryption buffer one time only
+                _decryptionRTPBufferPtr =
+                    new WebRtc_UWord8[kVoiceEngineMaxIpPacketSizeBytes];
+            }
+
+            // Perform decryption (SRTP or external)
+            WebRtc_Word32 decryptedBufferLength = 0;
+            _encryptionPtr->decrypt(_channelId,
+                                    rtpBufferPtr,
+                                    _decryptionRTPBufferPtr,
+                                    rtpBufferLength,
+                                    (int*)&decryptedBufferLength);
+            if (decryptedBufferLength <= 0)
+            {
+                _engineStatisticsPtr->SetLastError(
+                    VE_DECRYPTION_FAILED, kTraceError,
+                    "Channel::IncomingRTPPacket() decryption failed");
+                return;
+            }
+
+            // Replace default data buffer with decrypted buffer
+            rtpBufferPtr = _decryptionRTPBufferPtr;
+            rtpBufferLength = decryptedBufferLength;
+        }
+    }
+
+    // Dump the RTP packet to a file (if RTP dump is enabled).
+    if (_rtpDumpIn.DumpPacket(rtpBufferPtr,
+                              (WebRtc_UWord16)rtpBufferLength) == -1)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                     VoEId(_instanceId,_channelId),
+                     "Channel::SendPacket() RTP dump to input file failed");
+    }
+
+    // Deliver RTP packet to RTP/RTCP module for parsing
+    // The packet will be pushed back to the channel thru the
+    // OnReceivedPayloadData callback so we don't push it to the ACM here
+    if (_rtpRtcpModule.IncomingPacket((const WebRtc_UWord8*)rtpBufferPtr,
+                                      (WebRtc_UWord16)rtpBufferLength) == -1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceWarning,
+            "Channel::IncomingRTPPacket() RTP packet is invalid");
+        return;
+    }
+}
+
+void
+Channel::IncomingRTCPPacket(const WebRtc_Word8* incomingRtcpPacket,
+                            const WebRtc_Word32 rtcpPacketLength,
+                            const WebRtc_Word8* fromIP,
+                            const WebRtc_UWord16 fromPort)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::IncomingRTCPPacket(rtcpPacketLength=%d, fromIP=%s,"
+                 " fromPort=%u)",
+                 rtcpPacketLength, fromIP, fromPort);
+
+    // Temporary buffer pointer and size for decryption
+    WebRtc_UWord8* rtcpBufferPtr = (WebRtc_UWord8*)incomingRtcpPacket;
+    WebRtc_Word32 rtcpBufferLength = rtcpPacketLength;
+
+    // Store playout timestamp for the received RTCP packet
+    // which will be read by the GetRemoteRTCPData API
+    WebRtc_UWord32 playoutTimestamp(0);
+    if (GetPlayoutTimeStamp(playoutTimestamp) == 0)
+    {
+        _playoutTimeStampRTCP = playoutTimestamp;
+    }
+
+    // SRTP or External decryption
+    if (_decrypting)
+    {
+        CriticalSectionScoped cs(_callbackCritSect);
+
+        if (_encryptionPtr)
+        {
+            if (!_decryptionRTCPBufferPtr)
+            {
+                // Allocate memory for decryption buffer one time only
+                _decryptionRTCPBufferPtr =
+                    new WebRtc_UWord8[kVoiceEngineMaxIpPacketSizeBytes];
+            }
+
+            // Perform decryption (SRTP or external).
+            WebRtc_Word32 decryptedBufferLength = 0;
+            _encryptionPtr->decrypt_rtcp(_channelId,
+                                         rtcpBufferPtr,
+                                         _decryptionRTCPBufferPtr,
+                                         rtcpBufferLength,
+                                         (int*)&decryptedBufferLength);
+            if (decryptedBufferLength <= 0)
+            {
+                _engineStatisticsPtr->SetLastError(
+                    VE_DECRYPTION_FAILED, kTraceError,
+                    "Channel::IncomingRTCPPacket() decryption failed");
+                return;
+            }
+
+            // Replace default data buffer with decrypted buffer
+            rtcpBufferPtr = _decryptionRTCPBufferPtr;
+            rtcpBufferLength = decryptedBufferLength;
+        }
+    }
+
+    // Dump the RTCP packet to a file (if RTP dump is enabled).
+    if (_rtpDumpIn.DumpPacket(rtcpBufferPtr,
+                              (WebRtc_UWord16)rtcpBufferLength) == -1)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                     VoEId(_instanceId,_channelId),
+                     "Channel::SendPacket() RTCP dump to input file failed");
+    }
+
+    // Deliver RTCP packet to RTP/RTCP module for parsing
+    if (_rtpRtcpModule.IncomingPacket((const WebRtc_UWord8*)rtcpBufferPtr,
+                                      (WebRtc_UWord16)rtcpBufferLength) == -1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceWarning,
+            "Channel::IncomingRTPPacket() RTCP packet is invalid");
+        return;
+    }
+}
+
+void
+Channel::OnReceivedTelephoneEvent(const WebRtc_Word32 id,
+                                  const WebRtc_UWord8 event,
+                                  const bool endOfEvent)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::OnReceivedTelephoneEvent(id=%d, event=%u,"
+                 "endOfEvent=%d)", id, event, endOfEvent);
+
+#ifdef WEBRTC_DTMF_DETECTION
+    if (_outOfBandTelephoneEventDetecion)
+    {
+        CriticalSectionScoped cs(_callbackCritSect);
+
+        if (_telephoneEventDetectionPtr)
+        {
+            _telephoneEventDetectionPtr->OnReceivedTelephoneEventOutOfBand(
+                _channelId, event, endOfEvent);
+        }
+    }
+#endif
+}
+
+void
+Channel::OnPlayTelephoneEvent(const WebRtc_Word32 id,
+                              const WebRtc_UWord8 event,
+                              const WebRtc_UWord16 lengthMs,
+                              const WebRtc_UWord8 volume)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::OnPlayTelephoneEvent(id=%d, event=%u, lengthMs=%u,"
+                 "volume=%u)", id, event, lengthMs, volume);
+
+    if (!_playOutbandDtmfEvent || (event > 15))
+    {
+        // Ignore callback since feedback is disabled or event is not a
+        // Dtmf tone event.
+        return;
+    }
+
+    assert(_outputMixerPtr != NULL);
+
+    // Start playing out the Dtmf tone (if playout is enabled).
+    // Reduce length of tone with 80ms to the reduce risk of echo.
+    _outputMixerPtr->PlayDtmfTone(event, lengthMs - 80, volume);
+}
+
+void
+Channel::OnIncomingSSRCChanged(const WebRtc_Word32 id,
+                               const WebRtc_UWord32 SSRC)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::OnIncomingSSRCChanged(id=%d, SSRC=%d)",
+                 id, SSRC);
+
+    WebRtc_Word32 channel = VoEChannelId(id);
+    assert(channel == _channelId);
+
+    // Reset RTP-module counters since a new incoming RTP stream is detected
+    _rtpRtcpModule.ResetReceiveDataCountersRTP();
+    _rtpRtcpModule.ResetStatisticsRTP();
+
+    if (_rtpObserver)
+    {
+        CriticalSectionScoped cs(_callbackCritSect);
+
+        if (_rtpObserverPtr)
+        {
+            // Send new SSRC to registered observer using callback
+            _rtpObserverPtr->OnIncomingSSRCChanged(channel, SSRC);
+        }
+    }
+}
+
+void Channel::OnIncomingCSRCChanged(const WebRtc_Word32 id,
+                                    const WebRtc_UWord32 CSRC,
+                                    const bool added)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::OnIncomingCSRCChanged(id=%d, CSRC=%d, added=%d)",
+                 id, CSRC, added);
+
+    WebRtc_Word32 channel = VoEChannelId(id);
+    assert(channel == _channelId);
+
+    if (_rtpObserver)
+    {
+        CriticalSectionScoped cs(_callbackCritSect);
+
+        if (_rtpObserverPtr)
+        {
+            _rtpObserverPtr->OnIncomingCSRCChanged(channel, CSRC, added);
+        }
+    }
+}
+
+void
+Channel::OnApplicationDataReceived(const WebRtc_Word32 id,
+                                   const WebRtc_UWord8 subType,
+                                   const WebRtc_UWord32 name,
+                                   const WebRtc_UWord16 length,
+                                   const WebRtc_UWord8* data)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::OnApplicationDataReceived(id=%d, subType=%u,"
+                 " name=%u, length=%u)",
+                 id, subType, name, length);
+
+    WebRtc_Word32 channel = VoEChannelId(id);
+    assert(channel == _channelId);
+
+    if (_rtcpObserver)
+    {
+        CriticalSectionScoped cs(_callbackCritSect);
+
+        if (_rtcpObserverPtr)
+        {
+            _rtcpObserverPtr->OnApplicationDataReceived(channel,
+                                                        subType,
+                                                        name,
+                                                        data,
+                                                        length);
+        }
+    }
+}
+
+WebRtc_Word32
+Channel::OnInitializeDecoder(
+    const WebRtc_Word32 id,
+    const WebRtc_Word8 payloadType,
+    const WebRtc_Word8 payloadName[RTP_PAYLOAD_NAME_SIZE],
+    const WebRtc_UWord32 frequency,
+    const WebRtc_UWord8 channels,
+    const WebRtc_UWord32 rate)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::OnInitializeDecoder(id=%d, payloadType=%d, "
+                 "payloadName=%s, frequency=%u, channels=%u, rate=%u)",
+                 id, payloadType, payloadName, frequency, channels, rate);
+
+    WebRtc_Word32 channel = VoEChannelId(id);
+    assert(channel == _channelId);
+
+    CodecInst receiveCodec;
+    CodecInst dummyCodec;
+
+    receiveCodec.pltype = payloadType;
+    strcpy(receiveCodec.plname, payloadName);
+    receiveCodec.plfreq = frequency;
+    receiveCodec.channels = channels;
+    receiveCodec.rate = rate;
+
+    _audioCodingModule.Codec(payloadName, dummyCodec, frequency);
+    receiveCodec.pacsize = dummyCodec.pacsize;
+
+    // Register the new codec to the ACM
+    if (_audioCodingModule.RegisterReceiveCodec(receiveCodec) == -1)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                     VoEId(_instanceId,_channelId),
+                     "Channel::OnInitializeDecoder() invalid codec ("
+                     "pt=%d, name=%s) received - 1", payloadType, payloadName);
+        _engineStatisticsPtr->SetLastError(VE_AUDIO_CODING_MODULE_ERROR);
+        return -1;
+    }
+
+    return 0;
+}
+
+void
+Channel::OnPacketTimeout(const WebRtc_Word32 id)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::OnPacketTimeout(id=%d)", id);
+
+    CriticalSectionScoped cs(*_callbackCritSectPtr);
+    if (_voiceEngineObserverPtr)
+    {
+        if (_receiving || _externalTransport)
+        {
+            WebRtc_Word32 channel = VoEChannelId(id);
+            assert(channel == _channelId);
+            // Ensure that next OnReceivedPacket() callback will trigger
+            // a VE_PACKET_RECEIPT_RESTARTED callback.
+            _rtpPacketTimedOut = true;
+            // Deliver callback to the observer
+            WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+                         VoEId(_instanceId,_channelId),
+                         "Channel::OnPacketTimeout() => "
+                         "CallbackOnError(VE_RECEIVE_PACKET_TIMEOUT)");
+            _voiceEngineObserverPtr->CallbackOnError(channel,
+                                                     VE_RECEIVE_PACKET_TIMEOUT);
+        }
+    }
+}
+
+void
+Channel::OnReceivedPacket(const WebRtc_Word32 id,
+                          const RtpRtcpPacketType packetType)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::OnReceivedPacket(id=%d, packetType=%d)",
+                 id, packetType);
+
+    WebRtc_Word32 channel = VoEChannelId(id);
+    assert(channel == _channelId);
+
+    // Notify only for the case when we have restarted an RTP session.
+    if (_rtpPacketTimedOut && (kPacketRtp == packetType))
+    {
+        CriticalSectionScoped cs(*_callbackCritSectPtr);
+        if (_voiceEngineObserverPtr)
+        {
+            WebRtc_Word32 channel = VoEChannelId(id);
+            assert(channel == _channelId);
+            // Reset timeout mechanism
+            _rtpPacketTimedOut = false;
+            // Deliver callback to the observer
+            WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+                         VoEId(_instanceId,_channelId),
+                         "Channel::OnPacketTimeout() =>"
+                         " CallbackOnError(VE_PACKET_RECEIPT_RESTARTED)");
+            _voiceEngineObserverPtr->CallbackOnError(
+                channel,
+                VE_PACKET_RECEIPT_RESTARTED);
+        }
+    }
+}
+
+void
+Channel::OnPeriodicDeadOrAlive(const WebRtc_Word32 id,
+                               const RTPAliveType alive)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::OnPeriodicDeadOrAlive(id=%d, alive=%d)", id, alive);
+
+    if (!_connectionObserver)
+        return;
+
+    WebRtc_Word32 channel = VoEChannelId(id);
+    assert(channel == _channelId);
+
+    // Use Alive as default to limit risk of false Dead detections
+    bool isAlive(true);
+
+    // Always mark the connection as Dead when the module reports kRtpDead
+    if (kRtpDead == alive)
+    {
+        isAlive = false;
+    }
+
+    // It is possible that the connection is alive even if no RTP packet has
+    // been received for a long time since the other side might use VAD/DTX
+    // and a low SID-packet update rate.
+    if ((kRtpNoRtp == alive) && _playing)
+    {
+        // Detect Alive for all NetEQ states except for the case when we are
+        // in PLC_CNG state.
+        // PLC_CNG <=> background noise only due to long expand or error.
+        // Note that, the case where the other side stops sending during CNG
+        // state will be detected as Alive. Dead is is not set until after
+        // missing RTCP packets for at least twelve seconds (handled
+        // internally by the RTP/RTCP module).
+        isAlive = (_outputSpeechType != AudioFrame::kPLCCNG);
+    }
+
+    UpdateDeadOrAliveCounters(isAlive);
+
+    // Send callback to the registered observer
+    if (_connectionObserver)
+    {
+        CriticalSectionScoped cs(_callbackCritSect);
+        if (_connectionObserverPtr)
+        {
+            _connectionObserverPtr->OnPeriodicDeadOrAlive(channel, isAlive);
+        }
+    }
+}
+
+WebRtc_Word32
+Channel::OnReceivedPayloadData(const WebRtc_UWord8* payloadData,
+                               const WebRtc_UWord16 payloadSize,
+                               const WebRtcRTPHeader* rtpHeader)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::OnReceivedPayloadData(payloadSize=%d,"
+                 " payloadType=%u, audioChannel=%u)",
+                 payloadSize,
+                 rtpHeader->header.payloadType,
+                 rtpHeader->type.Audio.channel);
+
+    if (!_playing)
+    {
+        // Avoid inserting into NetEQ when we are not playing. Count the
+        // packet as discarded.
+        WEBRTC_TRACE(kTraceStream, kTraceVoice,
+                     VoEId(_instanceId, _channelId),
+                     "received packet is discarded since playing is not"
+                     " activated");
+        _numberOfDiscardedPackets++;
+        return 0;
+    }
+
+    // Push the incoming payload (parsed and ready for decoding) into the ACM
+    if (_audioCodingModule.IncomingPacket((const WebRtc_Word8*) payloadData,
+                                          payloadSize,
+                                          *rtpHeader) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_AUDIO_CODING_MODULE_ERROR, kTraceWarning,
+            "Channel::OnReceivedPayloadData() unable to push data to the ACM");
+        return -1;
+    }
+
+    // Update the packet delay
+    UpdatePacketDelay(rtpHeader->header.timestamp,
+                      rtpHeader->header.sequenceNumber);
+
+    return 0;
+}
+
+WebRtc_Word32 Channel::GetAudioFrame(const WebRtc_Word32 id,
+                                     AudioFrame& audioFrame)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetAudioFrame(id=%d)", id);
+
+    // Get 10ms raw PCM data from the ACM (mixer limits output frequency)
+    if (_audioCodingModule.PlayoutData10Ms(
+        audioFrame._frequencyInHz, (AudioFrame&)audioFrame) == -1)
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice,
+                     VoEId(_instanceId,_channelId),
+                     "Channel::GetAudioFrame() PlayoutData10Ms() failed!");
+    }
+
+    if (_RxVadDetection)
+    {
+        UpdateRxVadDetection(audioFrame);
+    }
+
+    // Convert module ID to internal VoE channel ID
+    audioFrame._id = VoEChannelId(audioFrame._id);
+    // Store speech type for dead-or-alive detection
+    _outputSpeechType = audioFrame._speechType;
+
+    // Perform far-end AudioProcessing module processing on the received signal
+    if (_rxApmIsEnabled)
+    {
+        ApmProcessRx(audioFrame);
+    }
+
+    // Output volume scaling
+    if (_outputGain < 0.99f || _outputGain > 1.01f)
+    {
+        AudioFrameOperations::ScaleWithSat(_outputGain, audioFrame);
+    }
+
+    // Scale left and/or right channel(s) if stereo and master balance is
+    // active
+
+    if (_panLeft != 1.0f || _panRight != 1.0f)
+    {
+        if (audioFrame._audioChannel == 1)
+        {
+            // Emulate stereo mode since panning is active.
+            // The mono signal is copied to both left and right channels here.
+            AudioFrameOperations::MonoToStereo(audioFrame);
+        }
+        // For true stereo mode (when we are receiving a stereo signal), no
+        // action is needed.
+
+        // Do the panning operation (the audio frame contains stereo at this
+        // stage)
+        AudioFrameOperations::Scale(_panLeft, _panRight, audioFrame);
+    }
+
+    // Mix decoded PCM output with file if file mixing is enabled
+    if (_outputFilePlaying)
+    {
+        assert(audioFrame._audioChannel == 1);
+        MixAudioWithFile(audioFrame, audioFrame._frequencyInHz);
+    }
+
+    // Place channel in on-hold state (~muted) if on-hold is activated
+    if (_outputIsOnHold)
+    {
+        AudioFrameOperations::Mute(audioFrame);
+    }
+
+    // External media
+    if (_outputExternalMedia)
+    {
+        CriticalSectionScoped cs(_callbackCritSect);
+        const bool isStereo = (audioFrame._audioChannel == 2);
+        if (_outputExternalMediaCallbackPtr)
+        {
+            _outputExternalMediaCallbackPtr->Process(
+                _channelId,
+                kPlaybackPerChannel,
+                (WebRtc_Word16*)audioFrame._payloadData,
+                audioFrame._payloadDataLengthInSamples,
+                audioFrame._frequencyInHz,
+                isStereo);
+        }
+    }
+
+    // Record playout if enabled
+    {
+        CriticalSectionScoped cs(_fileCritSect);
+
+        if (_outputFileRecording && _outputFileRecorderPtr)
+        {
+            if(audioFrame._audioChannel == 2)
+            {
+                AudioFrame temp =  audioFrame;
+                AudioFrameOperations::StereoToMono (temp);
+                _outputFileRecorderPtr->RecordAudioToFile(temp);
+            }
+            else if(audioFrame._audioChannel == 1)
+            {
+                _outputFileRecorderPtr->RecordAudioToFile(audioFrame);
+            }
+            else
+            {
+                assert(false);
+            }
+        }
+    }
+
+    // Measure audio level (0-9)
+    _outputAudioLevel.ComputeLevel(audioFrame);
+
+    return 0;
+}
+
+WebRtc_Word32
+Channel::NeededFrequency(const WebRtc_Word32 id)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::NeededFrequency(id=%d)", id);
+
+    int highestNeeded = 0;
+
+    // Determine highest needed receive frequency
+    WebRtc_Word32 receiveFrequency = _audioCodingModule.ReceiveFrequency();
+
+    // Return the bigger of playout and receive frequency in the ACM.
+    if (_audioCodingModule.PlayoutFrequency() > receiveFrequency)
+    {
+        highestNeeded = _audioCodingModule.PlayoutFrequency();
+    }
+    else
+    {
+        highestNeeded = receiveFrequency;
+    }
+
+    // Special case, if we're playing a file on the playout side
+    // we take that frequency into consideration as well
+    // This is not needed on sending side, since the codec will
+    // limit the spectrum anyway.
+    if (_outputFilePlaying)
+    {
+        CriticalSectionScoped cs(_fileCritSect);
+        if (_outputFilePlayerPtr && _outputFilePlaying)
+        {
+            if(_outputFilePlayerPtr->Frequency()>highestNeeded)
+            {
+                highestNeeded=_outputFilePlayerPtr->Frequency();
+            }
+        }
+    }
+
+    return(highestNeeded);
+}
+
+WebRtc_UWord8 Channel::numSocketThreads = KNumSocketThreads;
+
+WebRtc_Word32
+Channel::CreateChannel(Channel*& channel,
+                       const WebRtc_Word32 channelId,
+                       const WebRtc_UWord32 instanceId)
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId,channelId),
+                 "Channel::CreateChannel(channelId=%d, instanceId=%d)",
+        channelId, instanceId);
+
+    channel = new Channel(channelId, instanceId);
+    if (channel == NULL)
+    {
+        WEBRTC_TRACE(kTraceMemory, kTraceVoice,
+                     VoEId(instanceId,channelId),
+                     "Channel::CreateChannel() unable to allocate memory for"
+                     " channel");
+        return -1;
+    }
+    return 0;
+}
+
+void
+Channel::PlayNotification(const WebRtc_Word32 id,
+                          const WebRtc_UWord32 durationMs)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::PlayNotification(id=%d, durationMs=%d)",
+                 id, durationMs);
+
+    // Not implement yet
+}
+
+void
+Channel::RecordNotification(const WebRtc_Word32 id,
+                            const WebRtc_UWord32 durationMs)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::RecordNotification(id=%d, durationMs=%d)",
+                 id, durationMs);
+
+    // Not implement yet
+}
+
+void
+Channel::PlayFileEnded(const WebRtc_Word32 id)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::PlayFileEnded(id=%d)", id);
+
+    if (id == _inputFilePlayerId)
+    {
+        CriticalSectionScoped cs(_fileCritSect);
+
+        _inputFilePlaying = false;
+        WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                     VoEId(_instanceId,_channelId),
+                     "Channel::PlayFileEnded() => input file player module is"
+                     " shutdown");
+    }
+    else if (id == _outputFilePlayerId)
+    {
+        CriticalSectionScoped cs(_fileCritSect);
+
+        _outputFilePlaying = false;
+        WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                     VoEId(_instanceId,_channelId),
+                     "Channel::PlayFileEnded() => output file player module is"
+                     " shutdown");
+    }
+}
+
+void
+Channel::RecordFileEnded(const WebRtc_Word32 id)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::RecordFileEnded(id=%d)", id);
+
+    assert(id == _outputFileRecorderId);
+
+    CriticalSectionScoped cs(_fileCritSect);
+
+    _outputFileRecording = false;
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                 VoEId(_instanceId,_channelId),
+                 "Channel::RecordFileEnded() => output file recorder module is"
+                 " shutdown");
+}
+
+Channel::Channel(const WebRtc_Word32 channelId,
+                 const WebRtc_UWord32 instanceId) :
+    _fileCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
+    _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
+    _transmitCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
+    _channelId(channelId),
+    _instanceId(instanceId),
+    _playing(false),
+    _sending(false),
+    _receiving(false),
+    _mixFileWithMicrophone(false),
+    _timeStamp(0), // This is just an offset, RTP module will add it's own random offset
+    _rtpRtcpModule(*RtpRtcp::CreateRtpRtcp(VoEModuleId(
+            instanceId, channelId), true)),
+    _audioCodingModule(*AudioCodingModule::Create(
+            VoEModuleId(instanceId, channelId))),
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    _socketTransportModule(
+        *UdpTransport::Create(
+            VoEModuleId(instanceId, channelId),
+            numSocketThreads)),
+#endif
+#ifdef WEBRTC_SRTP
+    _srtpModule(*SrtpModule::CreateSrtpModule(VoEModuleId(instanceId,
+                                                          channelId))),
+#endif
+    _rtpDumpIn(*RtpDump::CreateRtpDump()),
+    _rtpDumpOut(*RtpDump::CreateRtpDump()),
+    _transportPtr(NULL),
+    _encryptionPtr(NULL),
+    _rxAudioProcessingModulePtr(NULL),
+#ifdef WEBRTC_DTMF_DETECTION
+    _telephoneEventDetectionPtr(NULL),
+#endif
+    _rxVadObserverPtr(NULL),
+    _oldVadDecision(-1),
+    _sendFrameType(0),
+    _outputAudioLevel(),
+    _inbandDtmfQueue(VoEModuleId(instanceId, channelId)),
+    _inbandDtmfGenerator(VoEModuleId(instanceId, channelId)),
+    _encrypting(false),
+    _decrypting(false),
+    _encryptionRTPBufferPtr(NULL),
+    _decryptionRTPBufferPtr(NULL),
+    _encryptionRTCPBufferPtr(NULL),
+    _decryptionRTCPBufferPtr(NULL),
+    _externalTransport(false),
+    _engineStatisticsPtr(NULL),
+    _moduleProcessThreadPtr(NULL),
+    _audioDeviceModulePtr(NULL),
+    _voiceEngineObserverPtr(NULL),
+    _callbackCritSectPtr(NULL),
+    _inputFilePlayerPtr(NULL),
+    _outputFilePlayerPtr(NULL),
+    _outputFileRecorderPtr(NULL),
+    // Avoid conflict with other channels by adding 1024 - 1026,
+    // won't use as much as 1024 channels.
+    _inputFilePlayerId(VoEModuleId(instanceId, channelId) + 1024),
+    _outputFilePlayerId(VoEModuleId(instanceId, channelId) + 1025),
+    _outputFileRecorderId(VoEModuleId(instanceId, channelId) + 1026),
+    _inputFilePlaying(false),
+    _outputFilePlaying(false),
+    _outputFileRecording(false),
+    _outputExternalMedia(false),
+    _inputExternalMedia(false),
+    _inputExternalMediaCallbackPtr(NULL),
+    _outputExternalMediaCallbackPtr(NULL),
+    _rtpObserverPtr(NULL),
+    _rtcpObserverPtr(NULL),
+    _mute(false),
+    _panLeft(1.0f),
+    _panRight(1.0f),
+    _outputGain(1.0f),
+    _playOutbandDtmfEvent(false),
+    _playInbandDtmfEvent(false),
+    _sendTelephoneEventPayloadType(106),
+    _inbandTelephoneEventDetection(false),
+    _outOfBandTelephoneEventDetecion(false),
+    _rtpObserver(false),
+    _rtcpObserver(false),
+    _playoutTimeStampRTP(0),
+    _playoutTimeStampRTCP(0),
+    _numberOfDiscardedPackets(0),
+    _extraPayloadType(0),
+    _insertExtraRTPPacket(false),
+    _extraMarkerBit(false),
+    _lastLocalTimeStamp(0),
+    _lastPayloadType(0),
+    _outputIsOnHold(false),
+    _externalPlayout(false),
+    _inputIsOnHold(false),
+    _rtpPacketTimedOut(false),
+    _rtpPacketTimeOutIsEnabled(false),
+    _rtpTimeOutSeconds(0),
+    _connectionObserver(false),
+    _connectionObserverPtr(NULL),
+    _countAliveDetections(0),
+    _countDeadDetections(0),
+    _outputSpeechType(AudioFrame::kNormalSpeech),
+    _averageDelayMs(0),
+    _previousSequenceNumber(0),
+    _previousTimestamp(0),
+    _recPacketDelayMs(20),
+    _RxVadDetection(false),
+    _rxApmIsEnabled(false),
+    _rxAgcIsEnabled(false),
+    _rxNsIsEnabled(false),
+    _audioLevel_dBov(100),
+    _includeAudioLevelIndication(false)
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::Channel() - ctor");
+    _inbandDtmfQueue.ResetDtmf();
+    _inbandDtmfGenerator.Init();
+    _outputAudioLevel.Clear();
+
+    // Create far end AudioProcessing Module
+    _rxAudioProcessingModulePtr = AudioProcessing::Create(
+        VoEModuleId(instanceId, channelId));
+}
+
+Channel::~Channel()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::~Channel() - dtor");
+
+    if (_outputExternalMedia)
+    {
+        DeRegisterExternalMediaProcessing(kPlaybackPerChannel);
+    }
+    if (_inputExternalMedia)
+    {
+        DeRegisterExternalMediaProcessing(kRecordingPerChannel);
+    }
+    StopSend();
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    StopReceiving();
+    // De-register packet callback to ensure we're not in a callback when
+    // deleting channel state, avoids race condition and deadlock.
+    if (_socketTransportModule.InitializeReceiveSockets(NULL, 0, NULL, NULL, 0)
+            != 0)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                     VoEId(_instanceId, _channelId),
+                     "~Channel() failed to de-register receive callback");
+    }
+#endif
+    StopPlayout();
+
+    {
+        CriticalSectionScoped cs(_fileCritSect);
+        if (_inputFilePlayerPtr)
+        {
+            _inputFilePlayerPtr->RegisterModuleFileCallback(NULL);
+            _inputFilePlayerPtr->StopPlayingFile();
+            FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
+            _inputFilePlayerPtr = NULL;
+        }
+        if (_outputFilePlayerPtr)
+        {
+            _outputFilePlayerPtr->RegisterModuleFileCallback(NULL);
+            _outputFilePlayerPtr->StopPlayingFile();
+            FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
+            _outputFilePlayerPtr = NULL;
+        }
+        if (_outputFileRecorderPtr)
+        {
+            _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
+            _outputFileRecorderPtr->StopRecording();
+            FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+            _outputFileRecorderPtr = NULL;
+        }
+    }
+
+    // The order to safely shutdown modules in a channel is:
+    // 1. De-register callbacks in modules
+    // 2. De-register modules in process thread
+    // 3. Destroy modules
+
+    // De-register all RTP module callbacks to ensure geting no callbacks
+    // (Receive socket callback was de-registered above)
+    if (_rtpRtcpModule.RegisterIncomingDataCallback(NULL) == -1)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                     VoEId(_instanceId,_channelId),
+                     "~Channel() failed to de-register incoming data callback"
+                     " (RTP module)");
+    }
+    if (_rtpRtcpModule.RegisterSendTransport(NULL) == -1)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                     VoEId(_instanceId,_channelId),
+                     "~Channel() failed to de-register send transport "
+                     "(RTP module)");
+    }
+    if (_rtpRtcpModule.RegisterIncomingRTPCallback(NULL) == -1)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                     VoEId(_instanceId,_channelId),
+                     "~Channel() failed to de-register incoming RTP"
+                     " callback (RTP module)");
+    }
+    if (_rtpRtcpModule.RegisterIncomingRTCPCallback(NULL) == -1)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                     VoEId(_instanceId,_channelId),
+                     "~Channel() failed to de-register incoming RTCP "
+                     "callback (RTP module)");
+    }
+    if (_rtpRtcpModule.RegisterAudioCallback(NULL) == -1)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                     VoEId(_instanceId,_channelId),
+                     "~Channel() failed to de-register audio callback "
+                     "(RTP module)");
+    }
+    if (_audioCodingModule.RegisterTransportCallback(NULL) == -1)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                     VoEId(_instanceId,_channelId),
+                     "~Channel() failed to de-register transport callback"
+                     " (Audio coding module)");
+    }
+    if (_audioCodingModule.RegisterVADCallback(NULL) == -1)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                     VoEId(_instanceId,_channelId),
+                     "~Channel() failed to de-register VAD callback"
+                     " (Audio coding module)");
+    }
+#ifdef WEBRTC_DTMF_DETECTION
+    if (_audioCodingModule.RegisterIncomingMessagesCallback(NULL) == -1)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                     VoEId(_instanceId,_channelId),
+                     "~Channel() failed to de-register incoming messages "
+                     "callback (Audio coding module)");
+    }
+#endif
+    // De-register modules in process thread
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    if (_moduleProcessThreadPtr->DeRegisterModule(&_socketTransportModule)
+            == -1)
+    {
+        WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+                     VoEId(_instanceId,_channelId),
+                     "~Channel() failed to deregister socket module");
+    }
+#endif
+    if (_moduleProcessThreadPtr->DeRegisterModule(&_rtpRtcpModule) == -1)
+    {
+        WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+                     VoEId(_instanceId,_channelId),
+                     "~Channel() failed to deregister RTP/RTCP module");
+    }
+
+    // Destroy modules
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    UdpTransport::Destroy(
+        &_socketTransportModule);
+#endif
+    RtpRtcp::DestroyRtpRtcp(&_rtpRtcpModule);
+    AudioCodingModule::Destroy(&_audioCodingModule);
+#ifdef WEBRTC_SRTP
+    SrtpModule::DestroySrtpModule(&_srtpModule);
+#endif
+    if (_rxAudioProcessingModulePtr != NULL)
+    {
+        AudioProcessing::Destroy(_rxAudioProcessingModulePtr); // far end APM
+        _rxAudioProcessingModulePtr = NULL;
+    }
+
+    // End of modules shutdown
+
+    // Delete other objects
+    RtpDump::DestroyRtpDump(&_rtpDumpIn);
+    RtpDump::DestroyRtpDump(&_rtpDumpOut);
+    delete [] _encryptionRTPBufferPtr;
+    delete [] _decryptionRTPBufferPtr;
+    delete [] _encryptionRTCPBufferPtr;
+    delete [] _decryptionRTCPBufferPtr;
+    delete &_callbackCritSect;
+    delete &_transmitCritSect;
+    delete &_fileCritSect;
+}
+
+WebRtc_Word32
+Channel::Init()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::Init()");
+
+    // --- Initial sanity
+
+    if ((_engineStatisticsPtr == NULL) ||
+        (_moduleProcessThreadPtr == NULL))
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice,
+                     VoEId(_instanceId,_channelId),
+                     "Channel::Init() must call SetEngineInformation() first");
+        return -1;
+    }
+
+    // --- Add modules to process thread (for periodic schedulation)
+
+    const bool processThreadFail =
+        ((_moduleProcessThreadPtr->RegisterModule(&_rtpRtcpModule) != 0) ||
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+        (_moduleProcessThreadPtr->RegisterModule(
+                &_socketTransportModule) != 0));
+#else
+        false);
+#endif
+    if (processThreadFail)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_CANNOT_INIT_CHANNEL, kTraceError,
+            "Channel::Init() modules not registered");
+        return -1;
+    }
+
+    // --- Log module versions
+
+    Utility::TraceModuleVersion(VoEId(_instanceId,_channelId),
+                                _audioCodingModule);
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    Utility::TraceModuleVersion(VoEId(_instanceId,_channelId),
+                                _socketTransportModule);
+#endif
+#ifdef WEBRTC_SRTP
+    Utility::TraceModuleVersion(VoEId(_instanceId,_channelId), _srtpModule);
+#endif
+    Utility::TraceModuleVersion(VoEId(_instanceId,_channelId), _rtpRtcpModule);
+
+     // --- ACM initialization
+
+    if ((_audioCodingModule.InitializeReceiver() == -1) ||
+#ifdef WEBRTC_CODEC_AVT
+        // out-of-band Dtmf tones are played out by default
+        (_audioCodingModule.SetDtmfPlayoutStatus(true) == -1) ||
+#endif
+        // enable RX VAD by default (improves output mixing)
+        (_audioCodingModule.SetReceiveVADStatus(true) == -1) ||
+        (_audioCodingModule.InitializeSender() == -1))
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+            "Channel::Init() unable to initialize the ACM - 1");
+        return -1;
+    }
+
+    // --- RTP/RTCP module initialization
+
+    // Ensure that RTCP is enabled by default for the created channel.
+    // Note that, the module will keep generating RTCP until it is explicitly
+    // disabled by the user.
+    // After StopListen (when no sockets exists), RTCP packets will no longer
+    // be transmitted since the Transport object will then be invalid.
+
+    const bool rtpRtcpFail =
+        ((_rtpRtcpModule.InitReceiver() == -1) ||
+        (_rtpRtcpModule.InitSender() == -1) ||
+        (_rtpRtcpModule.SetTelephoneEventStatus(false, true, true) == -1) ||
+        // RTCP is enabled by default
+        (_rtpRtcpModule.SetRTCPStatus(kRtcpCompound) == -1));
+    if (rtpRtcpFail)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+            "Channel::Init() RTP/RTCP module not initialized");
+        return -1;
+    }
+
+     // --- Register all permanent callbacks
+
+    const bool fail =
+        (_rtpRtcpModule.RegisterIncomingDataCallback(this) == -1) ||
+        (_rtpRtcpModule.RegisterIncomingRTPCallback(this) == -1) ||
+        (_rtpRtcpModule.RegisterIncomingRTCPCallback(this) == -1) ||
+        (_rtpRtcpModule.RegisterSendTransport(this) == -1) ||
+        (_rtpRtcpModule.RegisterAudioCallback(this) == -1) ||
+        (_audioCodingModule.RegisterTransportCallback(this) == -1) ||
+        (_audioCodingModule.RegisterVADCallback(this) == -1);
+
+    if (fail)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_CANNOT_INIT_CHANNEL, kTraceError,
+            "Channel::Init() callbacks not registered");
+        return -1;
+    }
+
+    // --- Register all supported codecs to the receiving side of the
+    // RTP/RTCP module
+
+    CodecInst codec;
+    const WebRtc_UWord8 nSupportedCodecs = AudioCodingModule::NumberOfCodecs();
+
+    for (int idx = 0; idx < nSupportedCodecs; idx++)
+    {
+        // Open up the RTP/RTCP receiver for all supported codecs
+        if ((_audioCodingModule.Codec(idx, codec) == -1) ||
+            (_rtpRtcpModule.RegisterReceivePayload(codec.plname,
+                                                   codec.pltype,
+                                                   codec.plfreq,
+                                                   codec.channels,
+                                                   codec.rate) == -1))
+        {
+            WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                         VoEId(_instanceId,_channelId),
+                         "Channel::Init() unable to register %s (%d/%d/%d/%d) "
+                         "to RTP/RTCP receiver",
+                         codec.plname, codec.pltype, codec.plfreq,
+                         codec.channels, codec.rate);
+        }
+        else
+        {
+            WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+                         VoEId(_instanceId,_channelId),
+                         "Channel::Init() %s (%d/%d/%d/%d) has been added to "
+                         "the RTP/RTCP receiver",
+                         codec.plname, codec.pltype, codec.plfreq,
+                         codec.channels, codec.rate);
+        }
+
+        // Ensure that PCMU is used as default codec on the sending side
+        if (!STR_CASE_CMP(codec.plname, "PCMU"))
+        {
+            SetSendCodec(codec);
+        }
+
+        // Register default PT for outband 'telephone-event'
+        if (!STR_CASE_CMP(codec.plname, "telephone-event"))
+        {
+            if ((_rtpRtcpModule.RegisterSendPayload(codec.plname,
+                                                    codec.pltype,
+                                                    codec.plfreq,
+                                                    codec.channels) == -1) ||
+                (_audioCodingModule.RegisterReceiveCodec(codec) == -1))
+            {
+                WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                             VoEId(_instanceId,_channelId),
+                             "Channel::Init() failed to register outband "
+                             "'telephone-event' (%d/%d) correctly",
+                             codec.pltype, codec.plfreq);
+            }
+        }
+
+        if (!STR_CASE_CMP(codec.plname, "CN"))
+        {
+            if ((_audioCodingModule.RegisterSendCodec(codec) == -1) ||
+                (_audioCodingModule.RegisterReceiveCodec(codec) == -1) ||
+                (_rtpRtcpModule.RegisterSendPayload(codec.plname,
+                                                    codec.pltype,
+                                                    codec.plfreq,
+                                                    codec.channels) == -1))
+            {
+                WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                             VoEId(_instanceId,_channelId),
+                             "Channel::Init() failed to register CN (%d/%d) "
+                             "correctly - 1",
+                             codec.pltype, codec.plfreq);
+            }
+        }
+#ifdef WEBRTC_CODEC_RED
+        // Register RED to the receiving side of the ACM.
+        // We will not receive an OnInitializeDecoder() callback for RED.
+        if (!STR_CASE_CMP(codec.plname, "RED"))
+        {
+            if (_audioCodingModule.RegisterReceiveCodec(codec) == -1)
+            {
+                WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                             VoEId(_instanceId,_channelId),
+                             "Channel::Init() failed to register RED (%d/%d) "
+                             "correctly",
+                             codec.pltype, codec.plfreq);
+            }
+        }
+#endif
+    }
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    // Ensure that the WebRtcSocketTransport implementation is used as
+    // Transport on the sending side
+    _transportPtr = &_socketTransportModule;
+#endif
+
+    // Initialize the far end AP module
+    // Using 8 kHz as initial Fs, the same as in transmission. Might be
+    // changed at the first receiving audio.
+    if (_rxAudioProcessingModulePtr == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_NO_MEMORY, kTraceCritical,
+            "Channel::Init() failed to create the far-end AudioProcessing"
+            " module");
+        return -1;
+    }
+
+    if (_rxAudioProcessingModulePtr->echo_cancellation()->
+            set_device_sample_rate_hz(
+                    kVoiceEngineAudioProcessingDeviceSampleRateHz))
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_APM_ERROR, kTraceWarning,
+            "Channel::Init() failed to set the device sample rate to 48K"
+            "for far-end AP module");
+    }
+
+    if (_rxAudioProcessingModulePtr->set_sample_rate_hz(8000))
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_APM_ERROR, kTraceWarning,
+            "Channel::Init() failed to set the sample rate to 8K for"
+            " far-end AP module");
+    }
+
+    if (_rxAudioProcessingModulePtr->set_num_channels(1, 1) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_SOUNDCARD_ERROR, kTraceWarning,
+            "Init() failed to set channels for the primary audio"
+            " stream");
+    }
+
+    if (_rxAudioProcessingModulePtr->set_num_reverse_channels(1) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_SOUNDCARD_ERROR, kTraceWarning,
+            "Init() failed to set channels for the primary audio"
+            "stream");
+    }
+
+    if (_rxAudioProcessingModulePtr->high_pass_filter()->Enable(
+        WEBRTC_VOICE_ENGINE_RX_HP_DEFAULT_STATE) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_APM_ERROR, kTraceWarning,
+            "Channel::Init() failed to set the high-pass filter for"
+            "far-end AP module");
+    }
+
+    if (_rxAudioProcessingModulePtr->noise_suppression()->set_level(
+        (NoiseSuppression::Level)WEBRTC_VOICE_ENGINE_RX_NS_DEFAULT_MODE) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_APM_ERROR, kTraceWarning,
+            "Init() failed to set noise reduction level for far-end"
+            "AP module");
+    }
+    if (_rxAudioProcessingModulePtr->noise_suppression()->Enable(
+        WEBRTC_VOICE_ENGINE_RX_NS_DEFAULT_STATE) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_APM_ERROR, kTraceWarning,
+            "Init() failed to set noise reduction state for far-end"
+            "AP module");
+    }
+
+    if (_rxAudioProcessingModulePtr->gain_control()->set_mode(
+        (GainControl::Mode)WEBRTC_VOICE_ENGINE_RX_AGC_DEFAULT_MODE) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_APM_ERROR, kTraceWarning,
+            "Init() failed to set AGC mode for far-end AP module");
+    }
+    if (_rxAudioProcessingModulePtr->gain_control()->Enable(
+        WEBRTC_VOICE_ENGINE_RX_AGC_DEFAULT_STATE) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_APM_ERROR, kTraceWarning,
+            "Init() failed to set AGC state for far-end AP module");
+    }
+
+    return 0;
+}
+
+WebRtc_Word32
+Channel::SetEngineInformation(Statistics& engineStatistics,
+                              OutputMixer& outputMixer,
+                              voe::TransmitMixer& transmitMixer,
+                              ProcessThread& moduleProcessThread,
+                              AudioDeviceModule& audioDeviceModule,
+                              VoiceEngineObserver* voiceEngineObserver,
+                              CriticalSectionWrapper* callbackCritSect)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetEngineInformation()");
+    _engineStatisticsPtr = &engineStatistics;
+    _outputMixerPtr = &outputMixer;
+    _transmitMixerPtr = &transmitMixer,
+    _moduleProcessThreadPtr = &moduleProcessThread;
+    _audioDeviceModulePtr = &audioDeviceModule;
+    _voiceEngineObserverPtr = voiceEngineObserver;
+    _callbackCritSectPtr = callbackCritSect;
+    return 0;
+}
+
+WebRtc_Word32
+Channel::UpdateLocalTimeStamp()
+{
+
+    _timeStamp += _audioFrame._payloadDataLengthInSamples;
+    return 0;
+}
+
+WebRtc_Word32
+Channel::StartPlayout()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::StartPlayout()");
+    if (_playing)
+    {
+        return 0;
+    }
+    // Add participant as candidates for mixing.
+    if (_outputMixerPtr->SetMixabilityStatus(*this, true) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError,
+            "StartPlayout() failed to add participant to mixer");
+        return -1;
+    }
+
+    _playing = true;
+    return 0;
+}
+
+WebRtc_Word32
+Channel::StopPlayout()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::StopPlayout()");
+    if (!_playing)
+    {
+        return 0;
+    }
+    // Remove participant as candidates for mixing
+    if (_outputMixerPtr->SetMixabilityStatus(*this, false) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError,
+            "StartPlayout() failed to remove participant from mixer");
+        return -1;
+    }
+
+    _playing = false;
+    _outputAudioLevel.Clear();
+
+    return 0;
+}
+
+WebRtc_Word32
+Channel::StartSend()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::StartSend()");
+    if (_sending)
+    {
+        return 0;
+    }
+    if (_rtpRtcpModule.SetSendingStatus(true) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+            "StartSend() RTP/RTCP failed to start sending");
+        return -1;
+    }
+    _sending = true;
+    return 0;
+}
+
+WebRtc_Word32
+Channel::StopSend()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::StopSend()");
+    if (!_sending)
+    {
+        return 0;
+    }
+    // Reset sending SSRC and sequence number and triggers direct transmission
+    // of RTCP BYE
+    if (_rtpRtcpModule.SetSendingStatus(false) == -1 ||
+        _rtpRtcpModule.ResetSendDataCountersRTP() == -1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_RTP_RTCP_MODULE_ERROR, kTraceWarning,
+            "StartSend() RTP/RTCP failed to stop sending");
+    }
+
+    _sending = false;
+    return 0;
+}
+
+WebRtc_Word32
+Channel::StartReceiving()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::StartReceiving()");
+    if (_receiving)
+    {
+        return 0;
+    }
+    // If external transport is used, we will only initialize/set the variables
+    // after this section, since we are not using the WebRtc transport but
+    // still need to keep track of e.g. if we are receiving.
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    if (!_externalTransport)
+    {
+        if (!_socketTransportModule.ReceiveSocketsInitialized())
+        {
+            _engineStatisticsPtr->SetLastError(
+                VE_SOCKETS_NOT_INITED, kTraceError,
+                "StartReceive() must set local receiver first");
+            return -1;
+        }
+        if (_socketTransportModule.StartReceiving(KNumberOfSocketBuffers) != 0)
+        {
+            _engineStatisticsPtr->SetLastError(
+                VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceError,
+                "StartReceiving() failed to start receiving");
+            return -1;
+        }
+    }
+#endif
+    _receiving = true;
+    _numberOfDiscardedPackets = 0;
+    return 0;
+}
+
+WebRtc_Word32
+Channel::StopReceiving()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::StopReceiving()");
+    if (!_receiving)
+    {
+        return 0;
+    }
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    if (!_externalTransport &&
+        _socketTransportModule.ReceiveSocketsInitialized())
+    {
+        if (_socketTransportModule.StopReceiving() != 0)
+        {
+            _engineStatisticsPtr->SetLastError(
+                VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceError,
+                "StopReceiving() failed to stop receiving");
+            return -1;
+        }
+    }
+#endif
+
+    bool dtmfDetection = _rtpRtcpModule.TelephoneEvent();
+    _rtpRtcpModule.InitReceiver();
+    // Recover Dtmf detection status
+    _rtpRtcpModule.SetTelephoneEventStatus(dtmfDetection, true, true);
+    RegisterReceiveCodecsToRTPModule();
+    _receiving = false;
+    return 0;
+}
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+WebRtc_Word32
+Channel::SetLocalReceiver(const WebRtc_UWord16 rtpPort,
+                          const WebRtc_UWord16 rtcpPort,
+                          const WebRtc_Word8 ipAddr[64],
+                          const WebRtc_Word8 multicastIpAddr[64])
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetLocalReceiver()");
+
+    if (_externalTransport)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+            "SetLocalReceiver() conflict with external transport");
+        return -1;
+    }
+
+    if (_sending)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_ALREADY_SENDING, kTraceError,
+            "SetLocalReceiver() already sending");
+        return -1;
+    }
+    if (_receiving)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_ALREADY_LISTENING, kTraceError,
+            "SetLocalReceiver() already receiving");
+        return -1;
+    }
+
+    if (_socketTransportModule.InitializeReceiveSockets(this,
+                                                        rtpPort,
+                                                        ipAddr,
+                                                        multicastIpAddr,
+                                                        rtcpPort) != 0)
+    {
+        UdpTransport::ErrorCode lastSockError(
+            _socketTransportModule.LastError());
+        switch (lastSockError)
+        {
+        case UdpTransport::kIpAddressInvalid:
+            _engineStatisticsPtr->SetLastError(
+                VE_INVALID_IP_ADDRESS, kTraceError,
+                "SetLocalReceiver() invalid IP address");
+            break;
+        case UdpTransport::kSocketInvalid:
+            _engineStatisticsPtr->SetLastError(
+                VE_SOCKET_ERROR, kTraceError,
+                "SetLocalReceiver() invalid socket");
+            break;
+        case UdpTransport::kPortInvalid:
+            _engineStatisticsPtr->SetLastError(
+                VE_INVALID_PORT_NMBR, kTraceError,
+                "SetLocalReceiver() invalid port");
+            break;
+        case UdpTransport::kFailedToBindPort:
+            _engineStatisticsPtr->SetLastError(
+                VE_BINDING_SOCKET_TO_LOCAL_ADDRESS_FAILED, kTraceError,
+                "SetLocalReceiver() binding failed");
+            break;
+        default:
+            _engineStatisticsPtr->SetLastError(
+                VE_SOCKET_ERROR, kTraceError,
+                "SetLocalReceiver() undefined socket error");
+            break;
+        }
+        return -1;
+    }
+    return 0;
+}
+#endif
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+WebRtc_Word32
+Channel::GetLocalReceiver(int& port, int& RTCPport, char ipAddr[64])
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetLocalReceiver()");
+
+    if (_externalTransport)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+            "SetLocalReceiver() conflict with external transport");
+        return -1;
+    }
+
+    WebRtc_Word8 ipAddrTmp[UdpTransport::
+                           kIpAddressVersion6Length] = {0};
+    WebRtc_UWord16 rtpPort(0);
+    WebRtc_UWord16 rtcpPort(0);
+    WebRtc_Word8 multicastIpAddr[UdpTransport::
+                                 kIpAddressVersion6Length] = {0};
+
+    // Acquire socket information from the socket module
+    if (_socketTransportModule.ReceiveSocketInformation(ipAddrTmp,
+                                                        rtpPort,
+                                                        rtcpPort,
+                                                        multicastIpAddr) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_CANNOT_GET_SOCKET_INFO, kTraceError,
+            "GetLocalReceiver() unable to retrieve socket information");
+        return -1;
+    }
+
+    // Deliver valid results to the user
+    port = static_cast<int> (rtpPort);
+    RTCPport = static_cast<int> (rtcpPort);
+    if (ipAddr != NULL)
+    {
+        strcpy(ipAddr, ipAddrTmp);
+    }
+    return 0;
+}
+#endif
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+WebRtc_Word32
+Channel::SetSendDestination(const WebRtc_UWord16 rtpPort,
+                            const WebRtc_Word8 ipAddr[64],
+                            const int sourcePort,
+                            const WebRtc_UWord16 rtcpPort)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetSendDestination()");
+
+    if (_externalTransport)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+            "SetSendDestination() conflict with external transport");
+        return -1;
+    }
+
+    // Initialize ports and IP address for the remote (destination) side.
+    // By default, the sockets used for receiving are used for transmission as
+    // well, hence the source ports for outgoing packets are the same as the
+    // receiving ports specified in SetLocalReceiver.
+    // If an extra send socket has been created, it will be utilized until a
+    // new source port is specified or until the channel has been deleted and
+    // recreated. If no socket exists, sockets will be created when the first
+    // RTP and RTCP packets shall be transmitted (see e.g.
+    // UdpTransportImpl::SendPacket()).
+    //
+    // NOTE: this function does not require that sockets exists; all it does is
+    // to build send structures to be used with the sockets when they exist.
+    // It is therefore possible to call this method before SetLocalReceiver.
+    // However, sockets must exist if a multi-cast address is given as input.
+
+    // Build send structures and enable QoS (if enabled and supported)
+    if (_socketTransportModule.InitializeSendSockets(
+        ipAddr, rtpPort, rtcpPort) != UdpTransport::kNoSocketError)
+    {
+        UdpTransport::ErrorCode lastSockError(
+            _socketTransportModule.LastError());
+        switch (lastSockError)
+        {
+        case UdpTransport::kIpAddressInvalid:
+            _engineStatisticsPtr->SetLastError(
+                VE_INVALID_IP_ADDRESS, kTraceError,
+                "SetSendDestination() invalid IP address 1");
+            break;
+        case UdpTransport::kSocketInvalid:
+            _engineStatisticsPtr->SetLastError(
+                VE_SOCKET_ERROR, kTraceError,
+                "SetSendDestination() invalid socket 1");
+            break;
+        case UdpTransport::kQosError:
+            _engineStatisticsPtr->SetLastError(
+                VE_GQOS_ERROR, kTraceError,
+                "SetSendDestination() failed to set QoS");
+            break;
+        case UdpTransport::kMulticastAddressInvalid:
+            _engineStatisticsPtr->SetLastError(
+                VE_INVALID_MULTICAST_ADDRESS, kTraceError,
+                "SetSendDestination() invalid multicast address");
+            break;
+        default:
+            _engineStatisticsPtr->SetLastError(
+                VE_SOCKET_ERROR, kTraceError,
+                "SetSendDestination() undefined socket error 1");
+            break;
+        }
+        return -1;
+    }
+
+    // Check if the user has specified a non-default source port different from
+    // the local receive port.
+    // If so, an extra local socket will be created unless the source port is
+    // not unique.
+    if (sourcePort != kVoEDefault)
+    {
+        WebRtc_UWord16 receiverRtpPort(0);
+        WebRtc_UWord16 rtcpNA(0);
+        if (_socketTransportModule.ReceiveSocketInformation(NULL,
+                                                            receiverRtpPort,
+                                                            rtcpNA,
+                                                            NULL) != 0)
+        {
+            _engineStatisticsPtr->SetLastError(
+                VE_CANNOT_GET_SOCKET_INFO, kTraceError,
+                "SetSendDestination() failed to retrieve socket information");
+            return -1;
+        }
+
+        WebRtc_UWord16 sourcePortUW16 =
+                static_cast<WebRtc_UWord16> (sourcePort);
+
+        // An extra socket will only be created if the specified source port
+        // differs from the local receive port.
+        if (sourcePortUW16 != receiverRtpPort)
+        {
+            // Initialize extra local socket to get a different source port
+            // than the local
+            // receiver port. Always use default source for RTCP.
+            // Note that, this calls UdpTransport::CloseSendSockets().
+            if (_socketTransportModule.InitializeSourcePorts(
+                sourcePortUW16,
+                sourcePortUW16+1) != 0)
+            {
+                UdpTransport::ErrorCode lastSockError(
+                    _socketTransportModule.LastError());
+                switch (lastSockError)
+                {
+                case UdpTransport::kIpAddressInvalid:
+                    _engineStatisticsPtr->SetLastError(
+                        VE_INVALID_IP_ADDRESS, kTraceError,
+                        "SetSendDestination() invalid IP address 2");
+                    break;
+                case UdpTransport::kSocketInvalid:
+                    _engineStatisticsPtr->SetLastError(
+                        VE_SOCKET_ERROR, kTraceError,
+                        "SetSendDestination() invalid socket 2");
+                    break;
+                default:
+                    _engineStatisticsPtr->SetLastError(
+                        VE_SOCKET_ERROR, kTraceError,
+                        "SetSendDestination() undefined socket error 2");
+                    break;
+                }
+                return -1;
+            }
+            WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+                         VoEId(_instanceId,_channelId),
+                         "SetSendDestination() extra local socket is created"
+                         " to facilitate unique source port");
+        }
+        else
+        {
+            WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+                         VoEId(_instanceId,_channelId),
+                         "SetSendDestination() sourcePort equals the local"
+                         " receive port => no extra socket is created");
+        }
+    }
+
+    return 0;
+}
+#endif
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+WebRtc_Word32
+Channel::GetSendDestination(int& port,
+                            char ipAddr[64],
+                            int& sourcePort,
+                            int& RTCPport)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetSendDestination()");
+
+    if (_externalTransport)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+            "GetSendDestination() conflict with external transport");
+        return -1;
+    }
+
+    WebRtc_Word8 ipAddrTmp[UdpTransport::kIpAddressVersion6Length] = {0};
+    WebRtc_UWord16 rtpPort(0);
+    WebRtc_UWord16 rtcpPort(0);
+    WebRtc_UWord16 rtpSourcePort(0);
+    WebRtc_UWord16 rtcpSourcePort(0);
+
+    // Acquire sending socket information from the socket module
+    _socketTransportModule.SendSocketInformation(ipAddrTmp, rtpPort, rtcpPort);
+    _socketTransportModule.SourcePorts(rtpSourcePort, rtcpSourcePort);
+
+    // Deliver valid results to the user
+    port = static_cast<int> (rtpPort);
+    RTCPport = static_cast<int> (rtcpPort);
+    sourcePort = static_cast<int> (rtpSourcePort);
+    if (ipAddr != NULL)
+    {
+        strcpy(ipAddr, ipAddrTmp);
+    }
+
+    return 0;
+}
+#endif
+
+
+WebRtc_Word32
+Channel::SetNetEQPlayoutMode(NetEqModes mode)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetNetEQPlayoutMode()");
+    AudioPlayoutMode playoutMode(voice);
+    switch (mode)
+    {
+        case kNetEqDefault:
+            playoutMode = voice;
+            break;
+        case kNetEqStreaming:
+            playoutMode = streaming;
+            break;
+        case kNetEqFax:
+            playoutMode = fax;
+            break;
+        default:
+            _engineStatisticsPtr->SetLastError(
+                VE_INVALID_ARGUMENT, kTraceError,
+                "SetNetEQPlayoutMode() invalid mode");
+            return -1;
+    }
+    if (_audioCodingModule.SetPlayoutMode(playoutMode) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+            "SetNetEQPlayoutMode() failed to set playout mode");
+        return -1;
+    }
+    return 0;
+}
+
+WebRtc_Word32
+Channel::GetNetEQPlayoutMode(NetEqModes& mode)
+{
+    const AudioPlayoutMode playoutMode = _audioCodingModule.PlayoutMode();
+    switch (playoutMode)
+    {
+        case voice:
+            mode = kNetEqDefault;
+            break;
+        case streaming:
+            mode = kNetEqStreaming;
+            break;
+        case fax:
+            mode = kNetEqFax;
+            break;
+        default:
+            _engineStatisticsPtr->SetLastError(
+                VE_INVALID_ARGUMENT, kTraceError,
+                "GetNetEQPlayoutMode() invalid mode");
+            return -1;
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                 VoEId(_instanceId,_channelId),
+                 "Channel::GetNetEQPlayoutMode() => mode=%u", mode);
+    return 0;
+}
+
+WebRtc_Word32
+Channel::SetNetEQBGNMode(NetEqBgnModes mode)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetNetEQPlayoutMode()");
+    ACMBackgroundNoiseMode noiseMode(On);
+    switch (mode)
+    {
+        case kBgnOn:
+            noiseMode = On;
+            break;
+        case kBgnFade:
+            noiseMode = Fade;
+            break;
+        case kBgnOff:
+            noiseMode = Off;
+            break;
+        default:
+            _engineStatisticsPtr->SetLastError(
+                VE_INVALID_ARGUMENT, kTraceError,
+                "SetNetEQBGNMode() invalid mode");
+            return -1;
+    }
+    if (_audioCodingModule.SetBackgroundNoiseMode(noiseMode) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+            "SetBackgroundNoiseMode() failed to set noise mode");
+        return -1;
+    }
+    return 0;
+}
+
+WebRtc_Word32
+Channel::SetOnHoldStatus(bool enable, OnHoldModes mode)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetOnHoldStatus()");
+    if (mode == kHoldSendAndPlay)
+    {
+        _outputIsOnHold = enable;
+        _inputIsOnHold = enable;
+    }
+    else if (mode == kHoldPlayOnly)
+    {
+        _outputIsOnHold = enable;
+    }
+    if (mode == kHoldSendOnly)
+    {
+        _inputIsOnHold = enable;
+    }
+    return 0;
+}
+
+WebRtc_Word32
+Channel::GetOnHoldStatus(bool& enabled, OnHoldModes& mode)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetOnHoldStatus()");
+    enabled = (_outputIsOnHold || _inputIsOnHold);
+    if (_outputIsOnHold && _inputIsOnHold)
+    {
+        mode = kHoldSendAndPlay;
+    }
+    else if (_outputIsOnHold && !_inputIsOnHold)
+    {
+        mode = kHoldPlayOnly;
+    }
+    else if (!_outputIsOnHold && _inputIsOnHold)
+    {
+        mode = kHoldSendOnly;
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetOnHoldStatus() => enabled=%d, mode=%d",
+                 enabled, mode);
+    return 0;
+}
+
+WebRtc_Word32
+Channel::RegisterVoiceEngineObserver(VoiceEngineObserver& observer)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::RegisterVoiceEngineObserver()");
+    CriticalSectionScoped cs(_callbackCritSect);
+
+    if (_voiceEngineObserverPtr)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceError,
+            "RegisterVoiceEngineObserver() observer already enabled");
+        return -1;
+    }
+    _voiceEngineObserverPtr = &observer;
+    return 0;
+}
+
+WebRtc_Word32
+Channel::DeRegisterVoiceEngineObserver()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::DeRegisterVoiceEngineObserver()");
+    CriticalSectionScoped cs(_callbackCritSect);
+
+    if (!_voiceEngineObserverPtr)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceWarning,
+            "DeRegisterVoiceEngineObserver() observer already disabled");
+        return 0;
+    }
+    _voiceEngineObserverPtr = NULL;
+    return 0;
+}
+
+WebRtc_Word32
+Channel::GetNetEQBGNMode(NetEqBgnModes& mode)
+{
+  ACMBackgroundNoiseMode noiseMode(On);
+    _audioCodingModule.BackgroundNoiseMode(noiseMode);
+    switch (noiseMode)
+    {
+        case On:
+            mode = kBgnOn;
+            break;
+        case Fade:
+            mode = kBgnFade;
+            break;
+        case Off:
+            mode = kBgnOff;
+            break;
+        default:
+            _engineStatisticsPtr->SetLastError(
+                    VE_INVALID_ARGUMENT,
+                    kTraceError,
+                    "GetNetEQBGNMode() invalid mode");
+            return -1;
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetNetEQBGNMode() => mode=%u", mode);
+    return 0;
+}
+
+WebRtc_Word32
+Channel::GetSendCodec(CodecInst& codec)
+{
+    return (_audioCodingModule.SendCodec(codec));
+}
+
+WebRtc_Word32
+Channel::GetRecCodec(CodecInst& codec)
+{
+    return (_audioCodingModule.ReceiveCodec(codec));
+}
+
+WebRtc_Word32
+Channel::SetSendCodec(const CodecInst& codec)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetSendCodec()");
+
+    if (_audioCodingModule.RegisterSendCodec(codec) != 0)
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
+                     "SetSendCodec() failed to register codec to ACM");
+        return -1;
+    }
+
+    if (_rtpRtcpModule.RegisterSendPayload(
+        codec.plname,
+        codec.pltype,
+        codec.plfreq,
+        codec.channels,
+        (codec.rate < 0 ? 0 : codec.rate)) != 0)
+    {
+        _rtpRtcpModule.DeRegisterSendPayload(codec.pltype);
+        if (_rtpRtcpModule.RegisterSendPayload(
+            codec.plname,
+            codec.pltype,
+            codec.plfreq,
+            codec.channels,
+            (codec.rate < 0 ? 0 : codec.rate)) != 0)
+        {
+            WEBRTC_TRACE(
+                    kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
+                    "SetSendCodec() failed to register codec to"
+                    " RTP/RTCP module");
+            return -1;
+        }
+    }
+
+    if (_rtpRtcpModule.SetAudioPacketSize(codec.pacsize) != 0)
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
+                     "SetSendCodec() failed to set audio packet size");
+        return -1;
+    }
+
+    return 0;
+}
+
+WebRtc_Word32
+Channel::SetVADStatus(bool enableVAD, ACMVADMode mode, bool disableDTX)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetVADStatus(mode=%d)", mode);
+    // To disable VAD, DTX must be disabled too
+    disableDTX = ((enableVAD == false) ? true : disableDTX);
+    if (_audioCodingModule.SetVAD(!disableDTX, enableVAD, mode) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+            "SetVADStatus() failed to set VAD");
+        return -1;
+    }
+    return 0;
+}
+
+WebRtc_Word32
+Channel::GetVADStatus(bool& enabledVAD, ACMVADMode& mode, bool& disabledDTX)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetVADStatus");
+    if (_audioCodingModule.VAD(disabledDTX, enabledVAD, mode) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+            "GetVADStatus() failed to get VAD status");
+        return -1;
+    }
+    disabledDTX = !disabledDTX;
+    return 0;
+}
+
+WebRtc_Word32
+Channel::SetRecPayloadType(const CodecInst& codec)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetRecPayloadType()");
+
+    if (_playing)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_ALREADY_PLAYING, kTraceError,
+            "SetRecPayloadType() unable to set PT while playing");
+        return -1;
+    }
+    if (_receiving)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_ALREADY_LISTENING, kTraceError,
+            "SetRecPayloadType() unable to set PT while listening");
+        return -1;
+    }
+
+    if (codec.pltype == -1)
+    {
+        // De-register the selected codec (RTP/RTCP module and ACM)
+
+        WebRtc_Word8 pltype(-1);
+        CodecInst rxCodec = codec;
+
+        // Get payload type for the given codec
+        _rtpRtcpModule.ReceivePayloadType(
+                rxCodec.plname,
+                rxCodec.plfreq,
+                rxCodec.channels,
+                &pltype,
+                (rxCodec.rate < 0 ? 0 : rxCodec.rate));
+        rxCodec.pltype = pltype;
+
+        if (_rtpRtcpModule.DeRegisterReceivePayload(pltype) != 0)
+        {
+            _engineStatisticsPtr->SetLastError(
+                    VE_RTP_RTCP_MODULE_ERROR,
+                    kTraceError,
+                    "SetRecPayloadType() RTP/RTCP-module deregistration "
+                    "failed");
+            return -1;
+        }
+        if (_audioCodingModule.UnregisterReceiveCodec(rxCodec.pltype) != 0)
+        {
+            _engineStatisticsPtr->SetLastError(
+                VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+                "SetRecPayloadType() ACM deregistration failed - 1");
+            return -1;
+        }
+        return 0;
+    }
+
+    if (_rtpRtcpModule.RegisterReceivePayload(
+        codec.plname,
+        codec.pltype,
+        codec.plfreq,
+        codec.channels,
+        (codec.rate < 0 ? 0 : codec.rate)) != 0)
+    {
+        // First attempt to register failed => de-register and try again
+        _rtpRtcpModule.DeRegisterReceivePayload(codec.pltype);
+        if (_rtpRtcpModule.RegisterReceivePayload(
+            codec.plname,
+            codec.pltype,
+            codec.plfreq,
+            codec.channels,
+            (codec.rate < 0 ? 0 : codec.rate)) != 0)
+        {
+            _engineStatisticsPtr->SetLastError(
+                VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+                "SetRecPayloadType() RTP/RTCP-module registration failed");
+            return -1;
+        }
+    }
+    if (_audioCodingModule.RegisterReceiveCodec(codec) != 0)
+    {
+        _audioCodingModule.UnregisterReceiveCodec(codec.pltype);
+        if (_audioCodingModule.RegisterReceiveCodec(codec) != 0)
+        {
+            _engineStatisticsPtr->SetLastError(
+                VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+                "SetRecPayloadType() ACM registration failed - 1");
+            return -1;
+        }
+    }
+    return 0;
+}
+
+WebRtc_Word32
+Channel::GetRecPayloadType(CodecInst& codec)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetRecPayloadType()");
+    WebRtc_Word8 payloadType(-1);
+    if (_rtpRtcpModule.ReceivePayloadType((
+        const WebRtc_Word8*)codec.plname,
+        codec.plfreq,
+        codec.channels,
+        &payloadType,
+        (codec.rate < 0 ? 0 : codec.rate)) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+            "GetRecPayloadType() failed to retrieve RX payload type");
+        return -1;
+    }
+    codec.pltype = payloadType;
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetRecPayloadType() => pltype=%u", codec.pltype);
+    return 0;
+}
+
+WebRtc_Word32
+Channel::SetAMREncFormat(AmrMode mode)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetAMREncFormat()");
+
+    // ACM doesn't support AMR
+    return -1;
+}
+
+WebRtc_Word32
+Channel::SetAMRDecFormat(AmrMode mode)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetAMRDecFormat()");
+
+    // ACM doesn't support AMR
+    return -1;
+}
+
+WebRtc_Word32
+Channel::SetAMRWbEncFormat(AmrMode mode)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetAMRWbEncFormat()");
+
+    // ACM doesn't support AMR
+    return -1;
+
+}
+
+WebRtc_Word32
+Channel::SetAMRWbDecFormat(AmrMode mode)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetAMRWbDecFormat()");
+
+    // ACM doesn't support AMR
+    return -1;
+}
+
+WebRtc_Word32
+Channel::SetSendCNPayloadType(int type, PayloadFrequencies frequency)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetSendCNPayloadType()");
+
+    CodecInst codec;
+    WebRtc_Word32 samplingFreqHz(-1);
+    if (frequency == kFreq32000Hz)
+        samplingFreqHz = 32000;
+    else if (frequency == kFreq16000Hz)
+        samplingFreqHz = 16000;
+
+    if (_audioCodingModule.Codec("CN", codec, samplingFreqHz) == -1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+            "SetSendCNPayloadType() failed to retrieve default CN codec "
+            "settings");
+        return -1;
+    }
+
+    // Modify the payload type (must be set to dynamic range)
+    codec.pltype = type;
+
+    if (_audioCodingModule.RegisterSendCodec(codec) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+            "SetSendCNPayloadType() failed to register CN to ACM");
+        return -1;
+    }
+
+    if (_rtpRtcpModule.RegisterSendPayload(codec.plname,
+                                           codec.pltype,
+                                           codec.plfreq,
+                                           codec.channels) != 0)
+    {
+        _rtpRtcpModule.DeRegisterSendPayload(codec.pltype);
+        if (_rtpRtcpModule.RegisterSendPayload(codec.plname,
+                                               codec.pltype,
+                                               codec.plfreq,
+                                               codec.channels) != 0)
+        {
+            _engineStatisticsPtr->SetLastError(
+                VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+                "SetSendCNPayloadType() failed to register CN to RTP/RTCP "
+                "module");
+            return -1;
+        }
+    }
+    return 0;
+}
+
+WebRtc_Word32
+Channel::SetISACInitTargetRate(int rateBps, bool useFixedFrameSize)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetISACInitTargetRate()");
+
+    CodecInst sendCodec;
+    if (_audioCodingModule.SendCodec(sendCodec) == -1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_CODEC_ERROR, kTraceError,
+            "SetISACInitTargetRate() failed to retrieve send codec");
+        return -1;
+    }
+    if (STR_CASE_CMP(sendCodec.plname, "ISAC") != 0)
+    {
+        // This API is only valid if iSAC is setup to run in channel-adaptive
+        // mode.
+        // We do not validate the adaptive mode here. It is done later in the
+        // ConfigISACBandwidthEstimator() API.
+        _engineStatisticsPtr->SetLastError(
+            VE_CODEC_ERROR, kTraceError,
+            "SetISACInitTargetRate() send codec is not iSAC");
+        return -1;
+    }
+
+    WebRtc_UWord8 initFrameSizeMsec(0);
+    if (16000 == sendCodec.plfreq)
+    {
+        // Note that 0 is a valid and corresponds to "use default
+        if ((rateBps != 0 &&
+            rateBps < kVoiceEngineMinIsacInitTargetRateBpsWb) ||
+            (rateBps > kVoiceEngineMaxIsacInitTargetRateBpsWb))
+        {
+             _engineStatisticsPtr->SetLastError(
+                VE_INVALID_ARGUMENT, kTraceError,
+                "SetISACInitTargetRate() invalid target rate - 1");
+            return -1;
+        }
+        // 30 or 60ms
+        initFrameSizeMsec = (WebRtc_UWord8)(sendCodec.pacsize / 16);
+    }
+    else if (32000 == sendCodec.plfreq)
+    {
+        if ((rateBps != 0 &&
+            rateBps < kVoiceEngineMinIsacInitTargetRateBpsSwb) ||
+            (rateBps > kVoiceEngineMaxIsacInitTargetRateBpsSwb))
+        {
+            _engineStatisticsPtr->SetLastError(
+                VE_INVALID_ARGUMENT, kTraceError,
+                "SetISACInitTargetRate() invalid target rate - 2");
+            return -1;
+        }
+        initFrameSizeMsec = (WebRtc_UWord8)(sendCodec.pacsize / 32); // 30ms
+    }
+
+    if (_audioCodingModule.ConfigISACBandwidthEstimator(
+        initFrameSizeMsec, rateBps, useFixedFrameSize) == -1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+            "SetISACInitTargetRate() iSAC BWE config failed");
+        return -1;
+    }
+
+    return 0;
+}
+
+WebRtc_Word32
+Channel::SetISACMaxRate(int rateBps)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetISACMaxRate()");
+
+    CodecInst sendCodec;
+    if (_audioCodingModule.SendCodec(sendCodec) == -1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_CODEC_ERROR, kTraceError,
+            "SetISACMaxRate() failed to retrieve send codec");
+        return -1;
+    }
+    if (STR_CASE_CMP(sendCodec.plname, "ISAC") != 0)
+    {
+        // This API is only valid if iSAC is selected as sending codec.
+        _engineStatisticsPtr->SetLastError(
+            VE_CODEC_ERROR, kTraceError,
+            "SetISACMaxRate() send codec is not iSAC");
+        return -1;
+    }
+    if (16000 == sendCodec.plfreq)
+    {
+        if ((rateBps < kVoiceEngineMinIsacMaxRateBpsWb) ||
+            (rateBps > kVoiceEngineMaxIsacMaxRateBpsWb))
+        {
+            _engineStatisticsPtr->SetLastError(
+                VE_INVALID_ARGUMENT, kTraceError,
+                "SetISACMaxRate() invalid max rate - 1");
+            return -1;
+        }
+    }
+    else if (32000 == sendCodec.plfreq)
+    {
+        if ((rateBps < kVoiceEngineMinIsacMaxRateBpsSwb) ||
+            (rateBps > kVoiceEngineMaxIsacMaxRateBpsSwb))
+        {
+            _engineStatisticsPtr->SetLastError(
+                VE_INVALID_ARGUMENT, kTraceError,
+                "SetISACMaxRate() invalid max rate - 2");
+            return -1;
+        }
+    }
+    if (_sending)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_SENDING, kTraceError,
+            "SetISACMaxRate() unable to set max rate while sending");
+        return -1;
+    }
+
+    // Set the maximum instantaneous rate of iSAC (works for both adaptive
+    // and non-adaptive mode)
+    if (_audioCodingModule.SetISACMaxRate(rateBps) == -1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+            "SetISACMaxRate() failed to set max rate");
+        return -1;
+    }
+
+    return 0;
+}
+
+WebRtc_Word32
+Channel::SetISACMaxPayloadSize(int sizeBytes)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetISACMaxPayloadSize()");
+    CodecInst sendCodec;
+    if (_audioCodingModule.SendCodec(sendCodec) == -1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_CODEC_ERROR, kTraceError,
+            "SetISACMaxPayloadSize() failed to retrieve send codec");
+        return -1;
+    }
+    if (STR_CASE_CMP(sendCodec.plname, "ISAC") != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_CODEC_ERROR, kTraceError,
+            "SetISACMaxPayloadSize() send codec is not iSAC");
+        return -1;
+    }
+    if (16000 == sendCodec.plfreq)
+    {
+        if ((sizeBytes < kVoiceEngineMinIsacMaxPayloadSizeBytesWb) ||
+            (sizeBytes > kVoiceEngineMaxIsacMaxPayloadSizeBytesWb))
+        {
+            _engineStatisticsPtr->SetLastError(
+                VE_INVALID_ARGUMENT, kTraceError,
+                "SetISACMaxPayloadSize() invalid max payload - 1");
+            return -1;
+        }
+    }
+    else if (32000 == sendCodec.plfreq)
+    {
+        if ((sizeBytes < kVoiceEngineMinIsacMaxPayloadSizeBytesSwb) ||
+            (sizeBytes > kVoiceEngineMaxIsacMaxPayloadSizeBytesSwb))
+        {
+            _engineStatisticsPtr->SetLastError(
+                VE_INVALID_ARGUMENT, kTraceError,
+                "SetISACMaxPayloadSize() invalid max payload - 2");
+            return -1;
+        }
+    }
+    if (_sending)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_SENDING, kTraceError,
+            "SetISACMaxPayloadSize() unable to set max rate while sending");
+        return -1;
+    }
+
+    if (_audioCodingModule.SetISACMaxPayloadSize(sizeBytes) == -1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+            "SetISACMaxPayloadSize() failed to set max payload size");
+        return -1;
+    }
+    return 0;
+}
+
+WebRtc_Word32 Channel::RegisterExternalTransport(Transport& transport)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+               "Channel::RegisterExternalTransport()");
+
+    CriticalSectionScoped cs(_callbackCritSect);
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    // Sanity checks for default (non external transport) to avoid conflict with
+    // WebRtc sockets.
+    if (_socketTransportModule.SendSocketsInitialized())
+    {
+        _engineStatisticsPtr->SetLastError(VE_SEND_SOCKETS_CONFLICT,
+                                           kTraceError,
+                "RegisterExternalTransport() send sockets already initialized");
+        return -1;
+    }
+    if (_socketTransportModule.ReceiveSocketsInitialized())
+    {
+        _engineStatisticsPtr->SetLastError(VE_RECEIVE_SOCKETS_CONFLICT,
+                                           kTraceError,
+             "RegisterExternalTransport() receive sockets already initialized");
+        return -1;
+    }
+#endif
+    if (_externalTransport)
+    {
+        _engineStatisticsPtr->SetLastError(VE_INVALID_OPERATION,
+                                           kTraceError,
+              "RegisterExternalTransport() external transport already enabled");
+       return -1;
+    }
+    _externalTransport = true;
+    _transportPtr = &transport;
+    return 0;
+}
+
+WebRtc_Word32
+Channel::DeRegisterExternalTransport()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::DeRegisterExternalTransport()");
+
+    if (!_transportPtr)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceWarning,
+            "DeRegisterExternalTransport() external transport already "
+            "disabled");
+        return 0;
+    }
+    _externalTransport = false;
+#ifdef WEBRTC_EXTERNAL_TRANSPORT
+    _transportPtr = NULL;
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "DeRegisterExternalTransport() all transport is disabled");
+#else
+    _transportPtr = &_socketTransportModule;
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "DeRegisterExternalTransport() internal Transport is enabled");
+#endif
+    return 0;
+}
+
+WebRtc_Word32
+Channel::ReceivedRTPPacket(const WebRtc_Word8* data, WebRtc_Word32 length)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::ReceivedRTPPacket()");
+    const WebRtc_Word8 dummyIP[] = "127.0.0.1";
+    IncomingRTPPacket(data, length, dummyIP, 0);
+    return 0;
+}
+
+WebRtc_Word32
+Channel::ReceivedRTCPPacket(const WebRtc_Word8* data, WebRtc_Word32 length)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::ReceivedRTCPPacket()");
+    const WebRtc_Word8 dummyIP[] = "127.0.0.1";
+    IncomingRTCPPacket(data, length, dummyIP, 0);
+    return 0;
+}
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+WebRtc_Word32
+Channel::GetSourceInfo(int& rtpPort, int& rtcpPort, char ipAddr[64])
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetSourceInfo()");
+
+    WebRtc_UWord16 rtpPortModule;
+    WebRtc_UWord16 rtcpPortModule;
+    WebRtc_Word8 ipaddr[UdpTransport::kIpAddressVersion6Length] = {0};
+
+    if (_socketTransportModule.RemoteSocketInformation(ipaddr,
+                                                       rtpPortModule,
+                                                       rtcpPortModule) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceError,
+            "GetSourceInfo() failed to retrieve remote socket information");
+        return -1;
+    }
+    strcpy(ipAddr, ipaddr);
+    rtpPort = rtpPortModule;
+    rtcpPort = rtcpPortModule;
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+        "GetSourceInfo() => rtpPort=%d, rtcpPort=%d, ipAddr=%s",
+        rtpPort, rtcpPort, ipAddr);
+    return 0;
+}
+
+WebRtc_Word32
+Channel::EnableIPv6()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::EnableIPv6()");
+    if (_socketTransportModule.ReceiveSocketsInitialized() ||
+        _socketTransportModule.SendSocketsInitialized())
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceError,
+            "EnableIPv6() socket layer is already initialized");
+        return -1;
+    }
+    if (_socketTransportModule.EnableIpV6() != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_SOCKET_ERROR, kTraceError,
+            "EnableIPv6() failed to enable IPv6");
+        const UdpTransport::ErrorCode lastError =
+            _socketTransportModule.LastError();
+        WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                     "UdpTransport::LastError() => %d", lastError);
+        return -1;
+    }
+    return 0;
+}
+
+bool
+Channel::IPv6IsEnabled() const
+{
+    bool isEnabled = _socketTransportModule.IpV6Enabled();
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "IPv6IsEnabled() => %d", isEnabled);
+    return isEnabled;
+}
+
+WebRtc_Word32
+Channel::SetSourceFilter(int rtpPort, int rtcpPort, const char ipAddr[64])
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetSourceFilter()");
+    if (_socketTransportModule.SetFilterPorts(
+        static_cast<WebRtc_UWord16>(rtpPort),
+        static_cast<WebRtc_UWord16>(rtcpPort)) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceError,
+            "SetSourceFilter() failed to set filter ports");
+        const UdpTransport::ErrorCode lastError =
+            _socketTransportModule.LastError();
+        WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                     "UdpTransport::LastError() => %d",
+                     lastError);
+        return -1;
+    }
+    const WebRtc_Word8* filterIpAddress =
+        static_cast<const WebRtc_Word8*> (ipAddr);
+    if (_socketTransportModule.SetFilterIP(filterIpAddress) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_IP_ADDRESS, kTraceError,
+            "SetSourceFilter() failed to set filter IP address");
+        const UdpTransport::ErrorCode lastError =
+           _socketTransportModule.LastError();
+        WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                     "UdpTransport::LastError() => %d", lastError);
+        return -1;
+    }
+    return 0;
+}
+
+WebRtc_Word32
+Channel::GetSourceFilter(int& rtpPort, int& rtcpPort, char ipAddr[64])
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetSourceFilter()");
+    WebRtc_UWord16 rtpFilterPort(0);
+    WebRtc_UWord16 rtcpFilterPort(0);
+    if (_socketTransportModule.FilterPorts(rtpFilterPort, rtcpFilterPort) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceWarning,
+            "GetSourceFilter() failed to retrieve filter ports");
+    }
+    WebRtc_Word8 ipAddrTmp[UdpTransport::kIpAddressVersion6Length] = {0};
+    if (_socketTransportModule.FilterIP(ipAddrTmp) != 0)
+    {
+        // no filter has been configured (not seen as an error)
+        memset(ipAddrTmp,
+               0, UdpTransport::kIpAddressVersion6Length);
+    }
+    rtpPort = static_cast<int> (rtpFilterPort);
+    rtcpPort = static_cast<int> (rtcpFilterPort);
+    strcpy(ipAddr, ipAddrTmp);
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+        "GetSourceFilter() => rtpPort=%d, rtcpPort=%d, ipAddr=%s",
+        rtpPort, rtcpPort, ipAddr);
+    return 0;
+}
+
+WebRtc_Word32
+Channel::SetSendTOS(int DSCP, int priority, bool useSetSockopt)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetSendTOS(DSCP=%d, useSetSockopt=%d)",
+                 DSCP, (int)useSetSockopt);
+
+    // Set TOS value and possibly try to force usage of setsockopt()
+    if (_socketTransportModule.SetToS(DSCP, useSetSockopt) != 0)
+    {
+        UdpTransport::ErrorCode lastSockError(
+            _socketTransportModule.LastError());
+        switch (lastSockError)
+        {
+        case UdpTransport::kTosError:
+            _engineStatisticsPtr->SetLastError(VE_TOS_ERROR, kTraceError,
+                                               "SetSendTOS() TOS error");
+            break;
+        case UdpTransport::kQosError:
+            _engineStatisticsPtr->SetLastError(
+                    VE_TOS_GQOS_CONFLICT, kTraceError,
+                    "SetSendTOS() GQOS error");
+            break;
+        case UdpTransport::kTosInvalid:
+            // can't switch SetSockOpt method without disabling TOS first, or
+            // SetSockopt() call failed
+            _engineStatisticsPtr->SetLastError(VE_TOS_INVALID, kTraceError,
+                                               "SetSendTOS() invalid TOS");
+            break;
+        case UdpTransport::kSocketInvalid:
+            _engineStatisticsPtr->SetLastError(VE_SOCKET_ERROR, kTraceError,
+                                               "SetSendTOS() invalid Socket");
+            break;
+        default:
+            _engineStatisticsPtr->SetLastError(VE_TOS_ERROR, kTraceError,
+                                               "SetSendTOS() TOS error");
+            break;
+        }
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
+                     "UdpTransport =>  lastError = %d",
+                     lastSockError);
+        return -1;
+    }
+
+    // Set priority (PCP) value, -1 means don't change
+    if (-1 != priority)
+    {
+        if (_socketTransportModule.SetPCP(priority) != 0)
+        {
+            UdpTransport::ErrorCode lastSockError(
+                _socketTransportModule.LastError());
+            switch (lastSockError)
+            {
+            case UdpTransport::kPcpError:
+                _engineStatisticsPtr->SetLastError(VE_TOS_ERROR, kTraceError,
+                                                   "SetSendTOS() PCP error");
+                break;
+            case UdpTransport::kQosError:
+                _engineStatisticsPtr->SetLastError(
+                        VE_TOS_GQOS_CONFLICT, kTraceError,
+                        "SetSendTOS() GQOS conflict");
+                break;
+            case UdpTransport::kSocketInvalid:
+                _engineStatisticsPtr->SetLastError(
+                        VE_SOCKET_ERROR, kTraceError,
+                        "SetSendTOS() invalid Socket");
+                break;
+            default:
+                _engineStatisticsPtr->SetLastError(VE_TOS_ERROR, kTraceError,
+                                                   "SetSendTOS() PCP error");
+                break;
+            }
+            WEBRTC_TRACE(kTraceError, kTraceVoice,
+                         VoEId(_instanceId,_channelId),
+                         "UdpTransport =>  lastError = %d",
+                         lastSockError);
+            return -1;
+        }
+    }
+
+    return 0;
+}
+
+WebRtc_Word32
+Channel::GetSendTOS(int &DSCP, int& priority, bool &useSetSockopt)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetSendTOS(DSCP=?, useSetSockopt=?)");
+    WebRtc_Word32 dscp(0), prio(0);
+    bool setSockopt(false);
+    if (_socketTransportModule.ToS(dscp, setSockopt) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceError,
+            "GetSendTOS() failed to get TOS info");
+        return -1;
+    }
+    if (_socketTransportModule.PCP(prio) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceError,
+            "GetSendTOS() failed to get PCP info");
+        return -1;
+    }
+    DSCP = static_cast<int> (dscp);
+    priority = static_cast<int> (prio);
+    useSetSockopt = setSockopt;
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetSendTOS() => DSCP=%d, priority=%d, useSetSockopt=%d",
+        DSCP, priority, (int)useSetSockopt);
+    return 0;
+}
+
+#if defined(_WIN32)
+WebRtc_Word32
+Channel::SetSendGQoS(bool enable, int serviceType, int overrideDSCP)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetSendGQoS(enable=%d, serviceType=%d, "
+                 "overrideDSCP=%d)",
+                 (int)enable, serviceType, overrideDSCP);
+    if(!_socketTransportModule.ReceiveSocketsInitialized())
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_SOCKETS_NOT_INITED, kTraceError,
+            "SetSendGQoS() GQoS state must be set after sockets are created");
+        return -1;
+    }
+    if(!_socketTransportModule.SendSocketsInitialized())
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_DESTINATION_NOT_INITED, kTraceError,
+            "SetSendGQoS() GQoS state must be set after sending side is "
+            "initialized");
+        return -1;
+    }
+    if (enable &&
+       (serviceType != SERVICETYPE_BESTEFFORT) &&
+       (serviceType != SERVICETYPE_CONTROLLEDLOAD) &&
+       (serviceType != SERVICETYPE_GUARANTEED) &&
+       (serviceType != SERVICETYPE_QUALITATIVE))
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "SetSendGQoS() Invalid service type");
+        return -1;
+    }
+    if (enable && ((overrideDSCP <  0) || (overrideDSCP > 63)))
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "SetSendGQoS() Invalid overrideDSCP value");
+        return -1;
+    }
+
+    // Avoid GQoS/ToS conflict when user wants to override the default DSCP
+    // mapping
+    bool QoS(false);
+    WebRtc_Word32 sType(0);
+    WebRtc_Word32 ovrDSCP(0);
+    if (_socketTransportModule.QoS(QoS, sType, ovrDSCP))
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceError,
+            "SetSendGQoS() failed to get QOS info");
+        return -1;
+    }
+    if (QoS && ovrDSCP == 0 && overrideDSCP != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_TOS_GQOS_CONFLICT, kTraceError,
+            "SetSendGQoS() QOS is already enabled and overrideDSCP differs,"
+            " not allowed");
+        return -1;
+    }
+    const WebRtc_Word32 maxBitrate(0);
+    if (_socketTransportModule.SetQoS(enable,
+                                      static_cast<WebRtc_Word32>(serviceType),
+                                      maxBitrate,
+                                      static_cast<WebRtc_Word32>(overrideDSCP),
+                                      true))
+    {
+        UdpTransport::ErrorCode lastSockError(
+            _socketTransportModule.LastError());
+        switch (lastSockError)
+        {
+        case UdpTransport::kQosError:
+            _engineStatisticsPtr->SetLastError(VE_GQOS_ERROR, kTraceError,
+                                               "SetSendGQoS() QOS error");
+            break;
+        default:
+            _engineStatisticsPtr->SetLastError(VE_SOCKET_ERROR, kTraceError,
+                                               "SetSendGQoS() Socket error");
+            break;
+        }
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
+                     "UdpTransport() => lastError = %d",
+                     lastSockError);
+        return -1;
+    }
+    return 0;
+}
+#endif
+
+#if defined(_WIN32)
+WebRtc_Word32
+Channel::GetSendGQoS(bool &enabled, int &serviceType, int &overrideDSCP)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetSendGQoS(enable=?, serviceType=?, "
+                 "overrideDSCP=?)");
+
+    bool QoS(false);
+    WebRtc_Word32 serviceTypeModule(0);
+    WebRtc_Word32 overrideDSCPModule(0);
+    _socketTransportModule.QoS(QoS, serviceTypeModule, overrideDSCPModule);
+
+    enabled = QoS;
+    serviceType = static_cast<int> (serviceTypeModule);
+    overrideDSCP = static_cast<int> (overrideDSCPModule);
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "GetSendGQoS() => enabled=%d, serviceType=%d, overrideDSCP=%d",
+                 (int)enabled, serviceType, overrideDSCP);
+    return 0;
+}
+#endif
+#endif
+
+WebRtc_Word32
+Channel::SetPacketTimeoutNotification(bool enable, int timeoutSeconds)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetPacketTimeoutNotification()");
+    if (enable)
+    {
+        const WebRtc_UWord32 RTPtimeoutMS = 1000*timeoutSeconds;
+        const WebRtc_UWord32 RTCPtimeoutMS = 0;
+        _rtpRtcpModule.SetPacketTimeout(RTPtimeoutMS, RTCPtimeoutMS);
+        _rtpPacketTimeOutIsEnabled = true;
+        _rtpTimeOutSeconds = timeoutSeconds;
+    }
+    else
+    {
+        _rtpRtcpModule.SetPacketTimeout(0, 0);
+        _rtpPacketTimeOutIsEnabled = false;
+        _rtpTimeOutSeconds = 0;
+    }
+    return 0;
+}
+
+WebRtc_Word32
+Channel::GetPacketTimeoutNotification(bool& enabled, int& timeoutSeconds)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetPacketTimeoutNotification()");
+    enabled = _rtpPacketTimeOutIsEnabled;
+    if (enabled)
+    {
+        timeoutSeconds = _rtpTimeOutSeconds;
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetPacketTimeoutNotification() => enabled=%d,"
+                 " timeoutSeconds=%d",
+                 enabled, timeoutSeconds);
+    return 0;
+}
+
+WebRtc_Word32
+Channel::RegisterDeadOrAliveObserver(VoEConnectionObserver& observer)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::RegisterDeadOrAliveObserver()");
+    CriticalSectionScoped cs(_callbackCritSect);
+
+    if (_connectionObserverPtr)
+    {
+        _engineStatisticsPtr->SetLastError(VE_INVALID_OPERATION, kTraceError,
+            "RegisterDeadOrAliveObserver() observer already enabled");
+        return -1;
+    }
+
+    _connectionObserverPtr = &observer;
+    _connectionObserver = true;
+
+    return 0;
+}
+
+WebRtc_Word32
+Channel::DeRegisterDeadOrAliveObserver()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::DeRegisterDeadOrAliveObserver()");
+    CriticalSectionScoped cs(_callbackCritSect);
+
+    if (!_connectionObserverPtr)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceWarning,
+            "DeRegisterDeadOrAliveObserver() observer already disabled");
+        return 0;
+    }
+
+    _connectionObserver = false;
+    _connectionObserverPtr = NULL;
+
+    return 0;
+}
+
+WebRtc_Word32
+Channel::SetPeriodicDeadOrAliveStatus(bool enable, int sampleTimeSeconds)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetPeriodicDeadOrAliveStatus()");
+    if (!_connectionObserverPtr)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
+                     "SetPeriodicDeadOrAliveStatus() connection observer has"
+                     " not been registered");
+    }
+    if (enable)
+    {
+        ResetDeadOrAliveCounters();
+    }
+    bool enabled(false);
+    WebRtc_UWord8 currentSampleTimeSec(0);
+    // Store last state (will be used later if dead-or-alive is disabled).
+    _rtpRtcpModule.PeriodicDeadOrAliveStatus(enabled, currentSampleTimeSec);
+    // Update the dead-or-alive state.
+    if (_rtpRtcpModule.SetPeriodicDeadOrAliveStatus(
+        enable, (WebRtc_UWord8)sampleTimeSeconds) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+                VE_RTP_RTCP_MODULE_ERROR,
+                kTraceError,
+                "SetPeriodicDeadOrAliveStatus() failed to set dead-or-alive "
+                "status");
+        return -1;
+    }
+    if (!enable)
+    {
+        // Restore last utilized sample time.
+        // Without this, the sample time would always be reset to default
+        // (2 sec), each time dead-or-alived was disabled without sample-time
+        // parameter.
+        _rtpRtcpModule.SetPeriodicDeadOrAliveStatus(enable,
+                                                    currentSampleTimeSec);
+    }
+    return 0;
+}
+
+WebRtc_Word32
+Channel::GetPeriodicDeadOrAliveStatus(bool& enabled, int& sampleTimeSeconds)
+{
+    _rtpRtcpModule.PeriodicDeadOrAliveStatus(
+        enabled,
+        (WebRtc_UWord8&)sampleTimeSeconds);
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetPeriodicDeadOrAliveStatus() => enabled=%d,"
+                 " sampleTimeSeconds=%d",
+                 enabled, sampleTimeSeconds);
+    return 0;
+}
+
+WebRtc_Word32
+Channel::SendUDPPacket(const void* data,
+                       unsigned int length,
+                       int& transmittedBytes,
+                       bool useRtcpSocket)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SendUDPPacket()");
+    if (_externalTransport)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+            "SendUDPPacket() external transport is enabled");
+        return -1;
+    }
+    if (useRtcpSocket && !_rtpRtcpModule.RTCP())
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_RTCP_ERROR, kTraceError,
+            "SendUDPPacket() RTCP is disabled");
+        return -1;
+    }
+    if (!_sending)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_NOT_SENDING, kTraceError,
+            "SendUDPPacket() not sending");
+        return -1;
+    }
+
+    char* dataC = new char[length];
+    if (NULL == dataC)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_NO_MEMORY, kTraceError,
+            "SendUDPPacket() memory allocation failed");
+        return -1;
+    }
+    memcpy(dataC, data, length);
+
+    transmittedBytes = SendPacketRaw(dataC, length, useRtcpSocket);
+
+    delete [] dataC;
+    dataC = NULL;
+
+    if (transmittedBytes <= 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+                VE_SEND_ERROR, kTraceError,
+                "SendUDPPacket() transmission failed");
+        transmittedBytes = 0;
+        return -1;
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "SendUDPPacket() => transmittedBytes=%d", transmittedBytes);
+    return 0;
+}
+
+
+int Channel::StartPlayingFileLocally(const char* fileName,
+                                     const bool loop,
+                                     const FileFormats format,
+                                     const int startPosition,
+                                     const float volumeScaling,
+                                     const int stopPosition,
+                                     const CodecInst* codecInst)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::StartPlayingFileLocally(fileNameUTF8[]=%s, loop=%d,"
+                 " format=%d, volumeScaling=%5.3f, startPosition=%d, "
+                 "stopPosition=%d)", fileName, loop, format, volumeScaling,
+                 startPosition, stopPosition);
+
+    if (_outputFilePlaying)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_ALREADY_PLAYING, kTraceError,
+            "StartPlayingFileLocally() is already playing");
+        return -1;
+    }
+
+    CriticalSectionScoped cs(_fileCritSect);
+
+    if (_outputFilePlayerPtr)
+    {
+        _outputFilePlayerPtr->RegisterModuleFileCallback(NULL);
+        FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
+        _outputFilePlayerPtr = NULL;
+    }
+
+    _outputFilePlayerPtr = FilePlayer::CreateFilePlayer(
+        _outputFilePlayerId, (const FileFormats)format);
+
+    if (_outputFilePlayerPtr == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "StartPlayingFileLocally() filePlayer format isnot correct");
+        return -1;
+    }
+
+    const WebRtc_UWord32 notificationTime(0);
+
+    if (_outputFilePlayerPtr->StartPlayingFile(
+        fileName,
+        loop,
+        startPosition,
+        volumeScaling,
+        notificationTime,
+        stopPosition,
+        (const CodecInst*)codecInst) != 0)
+    {
+       _engineStatisticsPtr->SetLastError(
+           VE_BAD_FILE, kTraceError,
+           "StartPlayingFile() failed to start file playout");
+        _outputFilePlayerPtr->StopPlayingFile();
+        FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
+        _outputFilePlayerPtr = NULL;
+        return -1;
+    }
+    _outputFilePlayerPtr->RegisterModuleFileCallback(this);
+    _outputFilePlaying = true;
+
+    return 0;
+}
+
+int Channel::StartPlayingFileLocally(InStream* stream,
+                                     const FileFormats format,
+                                     const int startPosition,
+                                     const float volumeScaling,
+                                     const int stopPosition,
+                                     const CodecInst* codecInst)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::StartPlayingFileLocally(format=%d,"
+                 " volumeScaling=%5.3f, startPosition=%d, stopPosition=%d)",
+                 format, volumeScaling, startPosition, stopPosition);
+
+    if(stream == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_FILE, kTraceError,
+            "StartPlayingFileLocally() NULL as input stream");
+        return -1;
+    }
+
+
+    if (_outputFilePlaying)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_ALREADY_PLAYING, kTraceError,
+            "StartPlayingFileLocally() is already playing");
+        return -1;
+    }
+
+    CriticalSectionScoped cs(_fileCritSect);
+
+    // Destroy the old instance
+    if (_outputFilePlayerPtr)
+    {
+        _outputFilePlayerPtr->RegisterModuleFileCallback(NULL);
+        FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
+        _outputFilePlayerPtr = NULL;
+    }
+
+    // Create the instance
+    _outputFilePlayerPtr = FilePlayer::CreateFilePlayer(
+        _outputFilePlayerId,
+        (const FileFormats)format);
+
+    if (_outputFilePlayerPtr == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "StartPlayingFileLocally() filePlayer format isnot correct");
+        return -1;
+    }
+
+    const WebRtc_UWord32 notificationTime(0);
+
+    if (_outputFilePlayerPtr->StartPlayingFile(*stream, startPosition,
+                                               volumeScaling, notificationTime,
+                                               stopPosition, codecInst) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
+                                           "StartPlayingFile() failed to "
+                                           "start file playout");
+        _outputFilePlayerPtr->StopPlayingFile();
+        FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
+        _outputFilePlayerPtr = NULL;
+        return -1;
+    }
+
+    _outputFilePlayerPtr->RegisterModuleFileCallback(this);
+    _outputFilePlaying = true;
+
+    return 0;
+}
+
+int Channel::StopPlayingFileLocally()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::StopPlayingFileLocally()");
+
+    if (!_outputFilePlaying)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceWarning,
+            "StopPlayingFileLocally() isnot playing");
+        return 0;
+    }
+
+    CriticalSectionScoped cs(_fileCritSect);
+
+    if (_outputFilePlayerPtr->StopPlayingFile() != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_STOP_RECORDING_FAILED, kTraceError,
+            "StopPlayingFile() could not stop playing");
+        return -1;
+    }
+    _outputFilePlayerPtr->RegisterModuleFileCallback(NULL);
+    FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
+    _outputFilePlayerPtr = NULL;
+    _outputFilePlaying = false;
+
+    return 0;
+}
+
+int Channel::IsPlayingFileLocally() const
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::IsPlayingFileLocally()");
+
+    return (WebRtc_Word32)_outputFilePlaying;
+}
+
+int Channel::ScaleLocalFilePlayout(const float scale)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::ScaleLocalFilePlayout(scale=%5.3f)", scale);
+
+    CriticalSectionScoped cs(_fileCritSect);
+
+    if (!_outputFilePlaying)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceError,
+            "ScaleLocalFilePlayout() isnot playing");
+        return -1;
+    }
+    if ((_outputFilePlayerPtr == NULL) ||
+        (_outputFilePlayerPtr->SetAudioScaling(scale) != 0))
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_ARGUMENT, kTraceError,
+            "SetAudioScaling() failed to scale the playout");
+        return -1;
+    }
+
+    return 0;
+}
+
+int Channel::GetLocalPlayoutPosition(int& positionMs)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetLocalPlayoutPosition(position=?)");
+
+    WebRtc_UWord32 position;
+
+    CriticalSectionScoped cs(_fileCritSect);
+
+    if (_outputFilePlayerPtr == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceError,
+            "GetLocalPlayoutPosition() filePlayer instance doesnot exist");
+        return -1;
+    }
+
+    if (_outputFilePlayerPtr->GetPlayoutPosition(position) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_FILE, kTraceError,
+            "GetLocalPlayoutPosition() failed");
+        return -1;
+    }
+    positionMs = position;
+
+    return 0;
+}
+
+int Channel::StartPlayingFileAsMicrophone(const char* fileName,
+                                          const bool loop,
+                                          const FileFormats format,
+                                          const int startPosition,
+                                          const float volumeScaling,
+                                          const int stopPosition,
+                                          const CodecInst* codecInst)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::StartPlayingFileAsMicrophone(fileNameUTF8[]=%s, "
+                 "loop=%d, format=%d, volumeScaling=%5.3f, startPosition=%d, "
+                 "stopPosition=%d)", fileName, loop, format, volumeScaling,
+                 startPosition, stopPosition);
+
+    if (_inputFilePlaying)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_ALREADY_PLAYING, kTraceWarning,
+            "StartPlayingFileAsMicrophone() filePlayer is playing");
+        return 0;
+    }
+
+    CriticalSectionScoped cs(_fileCritSect);
+
+    // Destroy the old instance
+    if (_inputFilePlayerPtr)
+    {
+        _inputFilePlayerPtr->RegisterModuleFileCallback(NULL);
+        FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
+        _inputFilePlayerPtr = NULL;
+    }
+
+    // Create the instance
+    _inputFilePlayerPtr = FilePlayer::CreateFilePlayer(
+        _inputFilePlayerId, (const FileFormats)format);
+
+    if (_inputFilePlayerPtr == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "StartPlayingFileAsMicrophone() filePlayer format isnot correct");
+        return -1;
+    }
+
+    const WebRtc_UWord32 notificationTime(0);
+
+    if (_inputFilePlayerPtr->StartPlayingFile(
+        fileName,
+        loop,
+        startPosition,
+        volumeScaling,
+        notificationTime,
+        stopPosition,
+        (const CodecInst*)codecInst) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_FILE, kTraceError,
+            "StartPlayingFile() failed to start file playout");
+        _inputFilePlayerPtr->StopPlayingFile();
+        FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
+        _inputFilePlayerPtr = NULL;
+        return -1;
+    }
+    _inputFilePlayerPtr->RegisterModuleFileCallback(this);
+    _inputFilePlaying = true;
+
+    return 0;
+}
+
+int Channel::StartPlayingFileAsMicrophone(InStream* stream,
+                                          const FileFormats format,
+                                          const int startPosition,
+                                          const float volumeScaling,
+                                          const int stopPosition,
+                                          const CodecInst* codecInst)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::StartPlayingFileAsMicrophone(format=%d, "
+                 "volumeScaling=%5.3f, startPosition=%d, stopPosition=%d)",
+                 format, volumeScaling, startPosition, stopPosition);
+
+    if(stream == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_FILE, kTraceError,
+            "StartPlayingFileAsMicrophone NULL as input stream");
+        return -1;
+    }
+
+    if (_inputFilePlaying)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_ALREADY_PLAYING, kTraceWarning,
+            "StartPlayingFileAsMicrophone() is playing");
+        return 0;
+    }
+
+    CriticalSectionScoped cs(_fileCritSect);
+
+    // Destroy the old instance
+    if (_inputFilePlayerPtr)
+    {
+        _inputFilePlayerPtr->RegisterModuleFileCallback(NULL);
+        FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
+        _inputFilePlayerPtr = NULL;
+    }
+
+    // Create the instance
+    _inputFilePlayerPtr = FilePlayer::CreateFilePlayer(
+        _inputFilePlayerId, (const FileFormats)format);
+
+    if (_inputFilePlayerPtr == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "StartPlayingInputFile() filePlayer format isnot correct");
+        return -1;
+    }
+
+    const WebRtc_UWord32 notificationTime(0);
+
+    if (_inputFilePlayerPtr->StartPlayingFile(*stream, startPosition,
+                                              volumeScaling, notificationTime,
+                                              stopPosition, codecInst) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
+                                           "StartPlayingFile() failed to start "
+                                           "file playout");
+        _inputFilePlayerPtr->StopPlayingFile();
+        FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
+        _inputFilePlayerPtr = NULL;
+        return -1;
+    }
+    
+    _inputFilePlayerPtr->RegisterModuleFileCallback(this);
+    _inputFilePlaying = true;
+
+    return 0;
+}
+
+int Channel::StopPlayingFileAsMicrophone()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::StopPlayingFileAsMicrophone()");
+
+    if (!_inputFilePlaying)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceWarning,
+            "StopPlayingFileAsMicrophone() isnot playing");
+        return 0;
+    }
+
+    CriticalSectionScoped cs(_fileCritSect);
+    if (_inputFilePlayerPtr->StopPlayingFile() != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_STOP_RECORDING_FAILED, kTraceError,
+            "StopPlayingFile() could not stop playing");
+        return -1;
+    }
+    _inputFilePlayerPtr->RegisterModuleFileCallback(NULL);
+    FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
+    _inputFilePlayerPtr = NULL;
+    _inputFilePlaying = false;
+
+    return 0;
+}
+
+int Channel::IsPlayingFileAsMicrophone() const
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::IsPlayingFileAsMicrophone()");
+
+    return _inputFilePlaying;
+}
+
+int Channel::ScaleFileAsMicrophonePlayout(const float scale)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::ScaleFileAsMicrophonePlayout(scale=%5.3f)", scale);
+
+    CriticalSectionScoped cs(_fileCritSect);
+
+    if (!_inputFilePlaying)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceError,
+            "ScaleFileAsMicrophonePlayout() isnot playing");
+        return -1;
+    }
+
+    if ((_inputFilePlayerPtr == NULL) ||
+        (_inputFilePlayerPtr->SetAudioScaling(scale) != 0))
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_ARGUMENT, kTraceError,
+            "SetAudioScaling() failed to scale playout");
+        return -1;
+    }
+
+    return 0;
+}
+
+int Channel::StartRecordingPlayout(const WebRtc_Word8* fileName,
+                                   const CodecInst* codecInst)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::StartRecordingPlayout(fileName=%s)", fileName);
+
+    if (_outputFileRecording)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
+                     "StartRecordingPlayout() is already recording");
+        return 0;
+    }
+
+    FileFormats format;
+    const WebRtc_UWord32 notificationTime(0); // Not supported in VoE
+    CodecInst dummyCodec={100,"L16",16000,320,1,320000};
+
+    if (codecInst != NULL && codecInst->channels != 1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_ARGUMENT, kTraceError,
+            "StartRecordingPlayout() invalid compression");
+        return(-1);
+    }
+    if(codecInst == NULL)
+    {
+        format = kFileFormatPcm16kHzFile;
+        codecInst=&dummyCodec;
+    }
+    else if((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
+        (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
+        (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
+    {
+        format = kFileFormatWavFile;
+    }
+    else
+    {
+        format = kFileFormatCompressedFile;
+    }
+
+    CriticalSectionScoped cs(_fileCritSect);
+
+    // Destroy the old instance
+    if (_outputFileRecorderPtr)
+    {
+        _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
+        FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+        _outputFileRecorderPtr = NULL;
+    }
+
+    _outputFileRecorderPtr = FileRecorder::CreateFileRecorder(
+        _outputFileRecorderId, (const FileFormats)format);
+    if (_outputFileRecorderPtr == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "StartRecordingPlayout() fileRecorder format isnot correct");
+        return -1;
+    }
+
+    if (_outputFileRecorderPtr->StartRecordingAudioFile(
+        fileName, (const CodecInst&)*codecInst, notificationTime) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_FILE, kTraceError,
+            "StartRecordingAudioFile() failed to start file recording");
+        _outputFileRecorderPtr->StopRecording();
+        FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+        _outputFileRecorderPtr = NULL;
+        return -1;
+    }
+    _outputFileRecorderPtr->RegisterModuleFileCallback(this);
+    _outputFileRecording = true;
+
+    return 0;
+}
+
+int Channel::StartRecordingPlayout(OutStream* stream,
+                                   const CodecInst* codecInst)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::StartRecordingPlayout()");
+
+    if (_outputFileRecording)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
+                     "StartRecordingPlayout() is already recording");
+        return 0;
+    }
+
+    FileFormats format;
+    const WebRtc_UWord32 notificationTime(0); // Not supported in VoE
+    CodecInst dummyCodec={100,"L16",16000,320,1,320000};
+
+    if (codecInst != NULL && codecInst->channels != 1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_ARGUMENT, kTraceError,
+            "StartRecordingPlayout() invalid compression");
+        return(-1);
+    }
+    if(codecInst == NULL)
+    {
+        format = kFileFormatPcm16kHzFile;
+        codecInst=&dummyCodec;
+    }
+    else if((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
+        (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
+        (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
+    {
+        format = kFileFormatWavFile;
+    }
+    else
+    {
+        format = kFileFormatCompressedFile;
+    }
+
+    CriticalSectionScoped cs(_fileCritSect);
+
+    // Destroy the old instance
+    if (_outputFileRecorderPtr)
+    {
+        _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
+        FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+        _outputFileRecorderPtr = NULL;
+    }
+
+    _outputFileRecorderPtr = FileRecorder::CreateFileRecorder(
+        _outputFileRecorderId, (const FileFormats)format);
+    if (_outputFileRecorderPtr == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "StartRecordingPlayout() fileRecorder format isnot correct");
+        return -1;
+    }
+
+    if (_outputFileRecorderPtr->StartRecordingAudioFile(*stream, *codecInst,
+                                                        notificationTime) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
+                                           "StartRecordingPlayout() failed to "
+                                           "start file recording");
+        _outputFileRecorderPtr->StopRecording();
+        FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+        _outputFileRecorderPtr = NULL;
+        return -1;
+    }
+    
+    _outputFileRecorderPtr->RegisterModuleFileCallback(this);
+    _outputFileRecording = true;
+
+    return 0;
+}
+
+int Channel::StopRecordingPlayout()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "Channel::StopRecordingPlayout()");
+
+    if (!_outputFileRecording)
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                     "StopRecordingPlayout() isnot recording");
+        return -1;
+    }
+
+
+    CriticalSectionScoped cs(_fileCritSect);
+
+    if (_outputFileRecorderPtr->StopRecording() != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_STOP_RECORDING_FAILED, kTraceError,
+            "StopRecording() could not stop recording");
+        return(-1);
+    }
+    _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
+    FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+    _outputFileRecorderPtr = NULL;
+    _outputFileRecording = false;
+
+    return 0;
+}
+
+void
+Channel::SetMixWithMicStatus(bool mix)
+{
+    _mixFileWithMicrophone=mix;
+}
+
+int
+Channel::GetSpeechOutputLevel(WebRtc_UWord32& level) const
+{
+    WebRtc_Word8 currentLevel = _outputAudioLevel.Level();
+    level = static_cast<WebRtc_Word32> (currentLevel);
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+               VoEId(_instanceId,_channelId),
+               "GetSpeechOutputLevel() => level=%u", level);
+    return 0;
+}
+
+int
+Channel::GetSpeechOutputLevelFullRange(WebRtc_UWord32& level) const
+{
+    WebRtc_Word16 currentLevel = _outputAudioLevel.LevelFullRange();
+    level = static_cast<WebRtc_Word32> (currentLevel);
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+               VoEId(_instanceId,_channelId),
+               "GetSpeechOutputLevelFullRange() => level=%u", level);
+    return 0;
+}
+
+int
+Channel::SetMute(bool enable)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+               "Channel::SetMute(enable=%d)", enable);
+    _mute = enable;
+    return 0;
+}
+
+bool
+Channel::Mute() const
+{
+    return _mute;
+}
+
+int
+Channel::SetOutputVolumePan(float left, float right)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+               "Channel::SetOutputVolumePan()");
+    _panLeft = left;
+    _panRight = right;
+    return 0;
+}
+
+int
+Channel::GetOutputVolumePan(float& left, float& right) const
+{
+    left = _panLeft;
+    right = _panRight;
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+               VoEId(_instanceId,_channelId),
+               "GetOutputVolumePan() => left=%3.2f, right=%3.2f", left, right);
+    return 0;
+}
+
+int
+Channel::SetChannelOutputVolumeScaling(float scaling)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+               "Channel::SetChannelOutputVolumeScaling()");
+    _outputGain = scaling;
+    return 0;
+}
+
+int
+Channel::GetChannelOutputVolumeScaling(float& scaling) const
+{
+    scaling = _outputGain;
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+               VoEId(_instanceId,_channelId),
+               "GetChannelOutputVolumeScaling() => scaling=%3.2f", scaling);
+    return 0;
+}
+
+#ifdef WEBRTC_SRTP
+
+int
+Channel::EnableSRTPSend(
+    CipherTypes cipherType,
+    int cipherKeyLength,
+    AuthenticationTypes authType,
+    int authKeyLength,
+    int authTagLength,
+    SecurityLevels level,
+    const unsigned char key[kVoiceEngineMaxSrtpKeyLength],
+    bool useForRTCP)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+               "Channel::EnableSRTPSend()");
+
+    CriticalSectionScoped cs(_callbackCritSect);
+
+    if (_encrypting)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceWarning,
+            "EnableSRTPSend() encryption already enabled");
+        return -1;
+    }
+
+    if (key == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceWarning,
+            "EnableSRTPSend() invalid key string");
+        return -1;
+    }
+
+    if (((kEncryption == level ||
+            kEncryptionAndAuthentication == level) &&
+            (cipherKeyLength < kVoiceEngineMinSrtpEncryptLength ||
+            cipherKeyLength > kVoiceEngineMaxSrtpEncryptLength)) ||
+        ((kAuthentication == level ||
+            kEncryptionAndAuthentication == level) &&
+            kAuthHmacSha1 == authType &&
+            (authKeyLength > kVoiceEngineMaxSrtpAuthSha1Length ||
+            authTagLength > kVoiceEngineMaxSrtpAuthSha1Length)) ||
+        ((kAuthentication == level ||
+            kEncryptionAndAuthentication == level) &&
+            kAuthNull == authType &&
+            (authKeyLength > kVoiceEngineMaxSrtpKeyAuthNullLength ||
+            authTagLength > kVoiceEngineMaxSrtpTagAuthNullLength)))
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "EnableSRTPSend() invalid key length(s)");
+        return -1;
+    }
+
+
+    if (_srtpModule.EnableSRTPEncrypt(
+        !useForRTCP,
+        (SrtpModule::CipherTypes)cipherType,
+        cipherKeyLength,
+        (SrtpModule::AuthenticationTypes)authType,
+        authKeyLength, authTagLength,
+        (SrtpModule::SecurityLevels)level,
+        key) == -1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_SRTP_ERROR, kTraceError,
+            "EnableSRTPSend() failed to enable SRTP encryption");
+        return -1;
+    }
+
+    if (_encryptionPtr == NULL)
+    {
+        _encryptionPtr = &_srtpModule;
+    }
+    _encrypting = true;
+
+    return 0;
+}
+
+int
+Channel::DisableSRTPSend()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+               "Channel::DisableSRTPSend()");
+
+    CriticalSectionScoped cs(_callbackCritSect);
+
+    if (!_encrypting)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceWarning,
+            "DisableSRTPSend() SRTP encryption already disabled");
+        return 0;
+    }
+
+    _encrypting = false;
+
+    if (_srtpModule.DisableSRTPEncrypt() == -1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_SRTP_ERROR, kTraceError,
+            "DisableSRTPSend() failed to disable SRTP encryption");
+        return -1;
+    }
+
+    if (!_srtpModule.SRTPDecrypt() && !_srtpModule.SRTPEncrypt())
+    {
+        // Both directions are disabled
+        _encryptionPtr = NULL;
+    }
+
+    return 0;
+}
+
+int
+Channel::EnableSRTPReceive(
+    CipherTypes  cipherType,
+    int cipherKeyLength,
+    AuthenticationTypes authType,
+    int authKeyLength,
+    int authTagLength,
+    SecurityLevels level,
+    const unsigned char key[kVoiceEngineMaxSrtpKeyLength],
+    bool useForRTCP)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+               "Channel::EnableSRTPReceive()");
+
+    CriticalSectionScoped cs(_callbackCritSect);
+
+    if (_decrypting)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceWarning,
+            "EnableSRTPReceive() SRTP decryption already enabled");
+        return -1;
+    }
+
+    if (key == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceWarning,
+            "EnableSRTPReceive() invalid key string");
+        return -1;
+    }
+
+    if ((((kEncryption == level) ||
+            (kEncryptionAndAuthentication == level)) &&
+            ((cipherKeyLength < kVoiceEngineMinSrtpEncryptLength) ||
+            (cipherKeyLength > kVoiceEngineMaxSrtpEncryptLength))) ||
+        (((kAuthentication == level) ||
+            (kEncryptionAndAuthentication == level)) &&
+            (kAuthHmacSha1 == authType) &&
+            ((authKeyLength > kVoiceEngineMaxSrtpAuthSha1Length) ||
+            (authTagLength > kVoiceEngineMaxSrtpAuthSha1Length))) ||
+        (((kAuthentication == level) ||
+            (kEncryptionAndAuthentication == level)) &&
+            (kAuthNull == authType) &&
+            ((authKeyLength > kVoiceEngineMaxSrtpKeyAuthNullLength) ||
+            (authTagLength > kVoiceEngineMaxSrtpTagAuthNullLength))))
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "EnableSRTPReceive() invalid key length(s)");
+        return -1;
+    }
+
+    if (_srtpModule.EnableSRTPDecrypt(
+        !useForRTCP,
+        (SrtpModule::CipherTypes)cipherType,
+        cipherKeyLength,
+        (SrtpModule::AuthenticationTypes)authType,
+        authKeyLength,
+        authTagLength,
+        (SrtpModule::SecurityLevels)level,
+        key) == -1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_SRTP_ERROR, kTraceError,
+            "EnableSRTPReceive() failed to enable SRTP decryption");
+        return -1;
+    }
+
+    if (_encryptionPtr == NULL)
+    {
+        _encryptionPtr = &_srtpModule;
+    }
+
+    _decrypting = true;
+
+    return 0;
+}
+
+int
+Channel::DisableSRTPReceive()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+               "Channel::DisableSRTPReceive()");
+
+    CriticalSectionScoped cs(_callbackCritSect);
+
+    if (!_decrypting)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceWarning,
+            "DisableSRTPReceive() SRTP decryption already disabled");
+        return 0;
+    }
+
+    _decrypting = false;
+
+    if (_srtpModule.DisableSRTPDecrypt() == -1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_SRTP_ERROR, kTraceError,
+            "DisableSRTPReceive() failed to disable SRTP decryption");
+        return -1;
+    }
+
+    if (!_srtpModule.SRTPDecrypt() && !_srtpModule.SRTPEncrypt())
+    {
+        _encryptionPtr = NULL;
+    }
+
+    return 0;
+}
+
+#endif
+
+int
+Channel::RegisterExternalEncryption(Encryption& encryption)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+               "Channel::RegisterExternalEncryption()");
+
+    CriticalSectionScoped cs(_callbackCritSect);
+
+    if (_encryptionPtr)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceError,
+            "RegisterExternalEncryption() encryption already enabled");
+        return -1;
+    }
+
+    _encryptionPtr = &encryption;
+
+    _decrypting = true;
+    _encrypting = true;
+
+    return 0;
+}
+
+int
+Channel::DeRegisterExternalEncryption()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+               "Channel::DeRegisterExternalEncryption()");
+
+    CriticalSectionScoped cs(_callbackCritSect);
+
+    if (!_encryptionPtr)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceWarning,
+            "DeRegisterExternalEncryption() encryption already disabled");
+        return 0;
+    }
+
+    _decrypting = false;
+    _encrypting = false;
+
+    _encryptionPtr = NULL;
+
+    return 0;
+}
+
+int Channel::SendTelephoneEventOutband(unsigned char eventCode,
+                                          int lengthMs, int attenuationDb,
+                                          bool playDtmfEvent)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+               "Channel::SendTelephoneEventOutband(..., playDtmfEvent=%d)",
+               playDtmfEvent);
+
+    _playOutbandDtmfEvent = playDtmfEvent;
+
+    if (_rtpRtcpModule.SendTelephoneEventOutband(eventCode, lengthMs,
+                                                 attenuationDb) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_SEND_DTMF_FAILED,
+            kTraceWarning,
+            "SendTelephoneEventOutband() failed to send event");
+        return -1;
+    }
+    return 0;
+}
+
+int Channel::SendTelephoneEventInband(unsigned char eventCode,
+                                         int lengthMs,
+                                         int attenuationDb,
+                                         bool playDtmfEvent)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+               "Channel::SendTelephoneEventInband(..., playDtmfEvent=%d)",
+               playDtmfEvent);
+
+    _playInbandDtmfEvent = playDtmfEvent;
+    _inbandDtmfQueue.AddDtmf(eventCode, lengthMs, attenuationDb);
+
+    return 0;
+}
+
+int
+Channel::SetDtmfPlayoutStatus(bool enable)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+               "Channel::SetDtmfPlayoutStatus()");
+    if (_audioCodingModule.SetDtmfPlayoutStatus(enable) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_AUDIO_CODING_MODULE_ERROR, kTraceWarning,
+            "SetDtmfPlayoutStatus() failed to set Dtmf playout");
+        return -1;
+    }
+    return 0;
+}
+
+bool
+Channel::DtmfPlayoutStatus() const
+{
+    return _audioCodingModule.DtmfPlayoutStatus();
+}
+
+int
+Channel::SetSendTelephoneEventPayloadType(unsigned char type)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+               "Channel::SetSendTelephoneEventPayloadType()");
+    if (type < 0 || type > 127)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "SetSendTelephoneEventPayloadType() invalid type");
+        return -1;
+    }
+    const WebRtc_Word8 payloadName[RTP_PAYLOAD_NAME_SIZE] =
+        "telephone-event";
+    if (_rtpRtcpModule.RegisterSendPayload(payloadName, type, 8000) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+            "SetSendTelephoneEventPayloadType() failed to register send"
+            "payload type");
+        return -1;
+    }
+    _sendTelephoneEventPayloadType = type;
+    return 0;
+}
+
+int
+Channel::GetSendTelephoneEventPayloadType(unsigned char& type)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetSendTelephoneEventPayloadType()");
+    type = _sendTelephoneEventPayloadType;
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+               VoEId(_instanceId,_channelId),
+               "GetSendTelephoneEventPayloadType() => type=%u", type);
+    return 0;
+}
+
+#ifdef WEBRTC_DTMF_DETECTION
+
+WebRtc_Word32
+Channel::RegisterTelephoneEventDetection(
+    TelephoneEventDetectionMethods detectionMethod,
+    VoETelephoneEventObserver& observer)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::RegisterTelephoneEventDetection()");
+    CriticalSectionScoped cs(_callbackCritSect);
+
+    if (_telephoneEventDetectionPtr)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceError,
+            "RegisterTelephoneEventDetection() detection already enabled");
+        return -1;
+    }
+
+    _telephoneEventDetectionPtr = &observer;
+
+    switch (detectionMethod)
+    {
+        case kInBand:
+            _inbandTelephoneEventDetection = true;
+            _outOfBandTelephoneEventDetecion = false;
+            break;
+        case kOutOfBand:
+            _inbandTelephoneEventDetection = false;
+            _outOfBandTelephoneEventDetecion = true;
+            break;
+        case kInAndOutOfBand:
+            _inbandTelephoneEventDetection = true;
+            _outOfBandTelephoneEventDetecion = true;
+            break;
+        default:
+            _engineStatisticsPtr->SetLastError(
+                VE_INVALID_ARGUMENT, kTraceError,
+                "RegisterTelephoneEventDetection() invalid detection method");
+            return -1;
+    }
+
+    if (_inbandTelephoneEventDetection)
+    {
+        // Enable in-band Dtmf detectin in the ACM.
+        if (_audioCodingModule.RegisterIncomingMessagesCallback(this) != 0)
+        {
+            _engineStatisticsPtr->SetLastError(
+                VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+                "RegisterTelephoneEventDetection() failed to enable Dtmf "
+                "detection");
+        }
+    }
+
+    // Enable/disable out-of-band detection of received telephone-events.
+    // When enabled, RtpAudioFeedback::OnReceivedTelephoneEvent() will be
+    // called two times by the RTP/RTCP module (start & end).
+    const bool forwardToDecoder =
+        _rtpRtcpModule.TelephoneEventForwardToDecoder();
+    const bool detectEndOfTone = true;
+    _rtpRtcpModule.SetTelephoneEventStatus(_outOfBandTelephoneEventDetecion,
+                                           forwardToDecoder,
+                                           detectEndOfTone);
+
+    return 0;
+}
+
+int
+Channel::DeRegisterTelephoneEventDetection()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+                 "Channel::DeRegisterTelephoneEventDetection()");
+
+    CriticalSectionScoped cs(_callbackCritSect);
+
+    if (!_telephoneEventDetectionPtr)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION,
+            kTraceWarning,
+            "DeRegisterTelephoneEventDetection() detection already disabled");
+        return 0;
+    }
+
+    // Disable out-of-band event detection
+    const bool forwardToDecoder =
+        _rtpRtcpModule.TelephoneEventForwardToDecoder();
+    _rtpRtcpModule.SetTelephoneEventStatus(false, forwardToDecoder);
+
+    // Disable in-band Dtmf detection
+    _audioCodingModule.RegisterIncomingMessagesCallback(NULL);
+
+    _inbandTelephoneEventDetection = false;
+    _outOfBandTelephoneEventDetecion = false;
+    _telephoneEventDetectionPtr = NULL;
+
+    return 0;
+}
+
+int
+Channel::GetTelephoneEventDetectionStatus(
+    bool& enabled,
+    TelephoneEventDetectionMethods& detectionMethod)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+                 "Channel::GetTelephoneEventDetectionStatus()");
+
+    {
+        CriticalSectionScoped cs(_callbackCritSect);
+        enabled = (_telephoneEventDetectionPtr != NULL);
+    }
+
+    if (enabled)
+    {
+        if (_inbandTelephoneEventDetection && !_outOfBandTelephoneEventDetecion)
+            detectionMethod = kInBand;
+        else if (!_inbandTelephoneEventDetection
+            && _outOfBandTelephoneEventDetecion)
+            detectionMethod = kOutOfBand;
+        else if (_inbandTelephoneEventDetection
+            && _outOfBandTelephoneEventDetecion)
+            detectionMethod = kInAndOutOfBand;
+        else
+        {
+            assert(false);
+            return -1;
+        }
+    }
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+               VoEId(_instanceId, _channelId),
+               "GetTelephoneEventDetectionStatus() => enabled=%d,"
+               "detectionMethod=%d", enabled, detectionMethod);
+    return 0;
+}
+
+#endif  // #ifdef WEBRTC_DTMF_DETECTION
+
+int
+Channel::UpdateRxVadDetection(AudioFrame& audioFrame)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::UpdateRxVadDetection()");
+
+    int vadDecision = 1;
+
+    vadDecision = (audioFrame._vadActivity == AudioFrame::kVadActive)? 1 : 0;
+
+    if ((vadDecision != _oldVadDecision) && _rxVadObserverPtr)
+    {
+        OnRxVadDetected(vadDecision);
+        _oldVadDecision = vadDecision;
+    }
+
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::UpdateRxVadDetection() => vadDecision=%d",
+                 vadDecision);
+    return 0;
+}
+
+int
+Channel::RegisterRxVadObserver(VoERxVadCallback &observer)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::RegisterRxVadObserver()");
+    CriticalSectionScoped cs(_callbackCritSect);
+
+    if (_rxVadObserverPtr)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceError,
+            "RegisterRxVadObserver() observer already enabled");
+        return -1;
+    }
+    if (!_audioCodingModule.ReceiveVADStatus())
+    {
+        if (_audioCodingModule.SetReceiveVADStatus(true) == -1)
+        {
+            _engineStatisticsPtr->SetLastError(
+                VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+                "RegisterRxVadObserver() failed to enable RX VAD");
+            return -1;
+        }
+    }
+    _rxVadObserverPtr = &observer;
+    _RxVadDetection = true;
+    return 0;
+}
+
+int
+Channel::DeRegisterRxVadObserver()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::DeRegisterRxVadObserver()");
+    CriticalSectionScoped cs(_callbackCritSect);
+
+    if (!_rxVadObserverPtr)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceWarning,
+            "DeRegisterRxVadObserver() observer already disabled");
+        return 0;
+    }
+    _rxVadObserverPtr = NULL;
+    _RxVadDetection = false;
+    return 0;
+}
+
+int
+Channel::VoiceActivityIndicator(int &activity)
+{
+    activity = _sendFrameType;
+
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+               "Channel::VoiceActivityIndicator(indicator=%d)", activity);
+    return 0;
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+
+int
+Channel::SetRxAgcStatus(const bool enable, const AgcModes mode)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetRxAgcStatus(enable=%d, mode=%d)",
+                 (int)enable, (int)mode);
+
+    GainControl::Mode agcMode(GainControl::kFixedDigital);
+    switch (mode)
+    {
+        case kAgcDefault:
+            agcMode = GainControl::kAdaptiveDigital;
+            break;
+        case kAgcUnchanged:
+            agcMode = _rxAudioProcessingModulePtr->gain_control()->mode();
+            break;
+        case kAgcFixedDigital:
+            agcMode = GainControl::kFixedDigital;
+            break;
+        case kAgcAdaptiveDigital:
+            agcMode =GainControl::kAdaptiveDigital;
+            break;
+        default:
+            _engineStatisticsPtr->SetLastError(
+                VE_INVALID_ARGUMENT, kTraceError,
+                "SetRxAgcStatus() invalid Agc mode");
+            return -1;
+    }
+
+    if (_rxAudioProcessingModulePtr->gain_control()->set_mode(agcMode) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_APM_ERROR, kTraceError,
+            "SetRxAgcStatus() failed to set Agc mode");
+        return -1;
+    }
+    if (_rxAudioProcessingModulePtr->gain_control()->Enable(enable) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_APM_ERROR, kTraceError,
+            "SetRxAgcStatus() failed to set Agc state");
+        return -1;
+    }
+
+    _rxAgcIsEnabled = enable;
+
+    _rxApmIsEnabled = ((_rxAgcIsEnabled == true) || (_rxNsIsEnabled == true));
+
+    return 0;
+}
+
+int
+Channel::GetRxAgcStatus(bool& enabled, AgcModes& mode)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                     "Channel::GetRxAgcStatus(enable=?, mode=?)");
+
+    bool enable = _rxAudioProcessingModulePtr->gain_control()->is_enabled();
+    GainControl::Mode agcMode =
+        _rxAudioProcessingModulePtr->gain_control()->mode();
+
+    enabled = enable;
+
+    switch (agcMode)
+    {
+        case GainControl::kFixedDigital:
+            mode = kAgcFixedDigital;
+            break;
+        case GainControl::kAdaptiveDigital:
+            mode = kAgcAdaptiveDigital;
+            break;
+        default:
+            _engineStatisticsPtr->SetLastError(
+                VE_APM_ERROR, kTraceError,
+                "GetRxAgcStatus() invalid Agc mode");
+            return -1;
+    }
+
+    return 0;
+}
+
+int
+Channel::SetRxAgcConfig(const AgcConfig config)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetRxAgcConfig()");
+
+    if (_rxAudioProcessingModulePtr->gain_control()->set_target_level_dbfs(
+        config.targetLeveldBOv) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_APM_ERROR, kTraceError,
+            "SetRxAgcConfig() failed to set target peak |level|"
+            "(or envelope) of the Agc");
+        return -1;
+    }
+    if (_rxAudioProcessingModulePtr->gain_control()->set_compression_gain_db(
+        config.digitalCompressionGaindB) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_APM_ERROR, kTraceError,
+            "SetRxAgcConfig() failed to set the range in |gain| the"
+            " digital compression stage may apply");
+        return -1;
+    }
+    if (_rxAudioProcessingModulePtr->gain_control()->enable_limiter(
+        config.limiterEnable) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_APM_ERROR, kTraceError,
+            "SetRxAgcConfig() failed to set hard limiter to the signal");
+        return -1;
+    }
+
+    return 0;
+}
+
+int
+Channel::GetRxAgcConfig(AgcConfig& config)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetRxAgcConfig(config=%?)");
+
+    config.targetLeveldBOv =
+        _rxAudioProcessingModulePtr->gain_control()->target_level_dbfs();
+    config.digitalCompressionGaindB =
+        _rxAudioProcessingModulePtr->gain_control()->compression_gain_db();
+    config.limiterEnable =
+        _rxAudioProcessingModulePtr->gain_control()->is_limiter_enabled();
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+               VoEId(_instanceId,_channelId), "GetRxAgcConfig() => "
+                   "targetLeveldBOv=%u, digitalCompressionGaindB=%u,"
+                   " limiterEnable=%d",
+                   config.targetLeveldBOv,
+                   config.digitalCompressionGaindB,
+                   config.limiterEnable);
+
+    return 0;
+}
+
+#endif // #ifdef WEBRTC_VOICE_ENGINE_AGC
+
+#ifdef WEBRTC_VOICE_ENGINE_NR
+
+int
+Channel::SetRxNsStatus(const bool enable, const NsModes mode)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetRxNsStatus(enable=%d, mode=%d)",
+                 (int)enable, (int)mode);
+
+    NoiseSuppression::Level nsLevel(
+        (NoiseSuppression::Level)WEBRTC_VOICE_ENGINE_RX_NS_DEFAULT_MODE);
+    switch (mode)
+    {
+
+        case kNsDefault:
+            nsLevel = (NoiseSuppression::Level)
+                WEBRTC_VOICE_ENGINE_RX_NS_DEFAULT_MODE;
+            break;
+        case kNsUnchanged:
+            nsLevel = _rxAudioProcessingModulePtr->noise_suppression()->level();
+            break;
+        case kNsConference:
+            nsLevel = NoiseSuppression::kHigh;
+            break;
+        case kNsLowSuppression:
+            nsLevel = NoiseSuppression::kLow;
+            break;
+        case kNsModerateSuppression:
+            nsLevel = NoiseSuppression::kModerate;
+            break;
+        case kNsHighSuppression:
+            nsLevel = NoiseSuppression::kHigh;
+            break;
+        case kNsVeryHighSuppression:
+            nsLevel = NoiseSuppression::kVeryHigh;
+            break;
+        default:
+            _engineStatisticsPtr->SetLastError(
+                VE_INVALID_ARGUMENT, kTraceError,
+                "SetRxNsStatus() invalid Ns mode");
+            return -1;
+    }
+
+    if (_rxAudioProcessingModulePtr->noise_suppression()->set_level(nsLevel)
+        != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_APM_ERROR, kTraceError,
+            "SetRxAgcStatus() failed to set Ns level");
+        return -1;
+    }
+    if (_rxAudioProcessingModulePtr->noise_suppression()->Enable(enable) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_APM_ERROR, kTraceError,
+            "SetRxAgcStatus() failed to set Agc state");
+        return -1;
+    }
+
+    _rxNsIsEnabled = enable;
+    _rxApmIsEnabled = ((_rxAgcIsEnabled == true) || (_rxNsIsEnabled == true));
+
+    return 0;
+}
+
+int
+Channel::GetRxNsStatus(bool& enabled, NsModes& mode)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetRxNsStatus(enable=?, mode=?)");
+
+    bool enable =
+        _rxAudioProcessingModulePtr->noise_suppression()->is_enabled();
+    NoiseSuppression::Level ncLevel =
+        _rxAudioProcessingModulePtr->noise_suppression()->level();
+
+    enabled = enable;
+
+    switch (ncLevel)
+    {
+        case NoiseSuppression::kLow:
+            mode = kNsLowSuppression;
+            break;
+        case NoiseSuppression::kModerate:
+            mode = kNsModerateSuppression;
+            break;
+        case NoiseSuppression::kHigh:
+            mode = kNsHighSuppression;
+            break;
+        case NoiseSuppression::kVeryHigh:
+            mode = kNsVeryHighSuppression;
+            break;
+        default:
+            _engineStatisticsPtr->SetLastError(
+                VE_APM_ERROR, kTraceError,
+                "GetRxNsStatus() invalid Ns mode");
+            return -1;
+    }
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+               VoEId(_instanceId,_channelId),
+               "GetRxNsStatus() => enabled=%d, mode=%d", enabled, mode);
+    return 0;
+}
+
+#endif // #ifdef WEBRTC_VOICE_ENGINE_NR
+
+int
+Channel::RegisterRTPObserver(VoERTPObserver& observer)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+                 "Channel::RegisterRTPObserver()");
+    CriticalSectionScoped cs(_callbackCritSect);
+
+    if (_rtpObserverPtr)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceError,
+            "RegisterRTPObserver() observer already enabled");
+        return -1;
+    }
+
+    _rtpObserverPtr = &observer;
+    _rtpObserver = true;
+
+    return 0;
+}
+
+int
+Channel::DeRegisterRTPObserver()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::DeRegisterRTPObserver()");
+    CriticalSectionScoped cs(_callbackCritSect);
+
+    if (!_rtpObserverPtr)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceWarning,
+            "DeRegisterRTPObserver() observer already disabled");
+        return 0;
+    }
+
+    _rtpObserver = false;
+    _rtpObserverPtr = NULL;
+
+    return 0;
+}
+
+int
+Channel::RegisterRTCPObserver(VoERTCPObserver& observer)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::RegisterRTCPObserver()");
+    CriticalSectionScoped cs(_callbackCritSect);
+
+    if (_rtcpObserverPtr)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceError,
+            "RegisterRTCPObserver() observer already enabled");
+        return -1;
+    }
+
+    _rtcpObserverPtr = &observer;
+    _rtcpObserver = true;
+
+    return 0;
+}
+
+int
+Channel::DeRegisterRTCPObserver()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+                 "Channel::DeRegisterRTCPObserver()");
+    CriticalSectionScoped cs(_callbackCritSect);
+
+    if (!_rtcpObserverPtr)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceWarning,
+            "DeRegisterRTCPObserver() observer already disabled");
+        return 0;
+    }
+
+    _rtcpObserver = false;
+    _rtcpObserverPtr = NULL;
+
+    return 0;
+}
+
+int
+Channel::SetLocalSSRC(unsigned int ssrc)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+                 "Channel::SetLocalSSRC()");
+    if (_sending)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_ALREADY_SENDING, kTraceError,
+            "SetLocalSSRC() already sending");
+        return -1;
+    }
+    if (_rtpRtcpModule.SetSSRC(ssrc) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+            "SetLocalSSRC() failed to set SSRC");
+        return -1;
+    }
+    return 0;
+}
+
+int
+Channel::GetLocalSSRC(unsigned int& ssrc)
+{
+    ssrc = _rtpRtcpModule.SSRC();
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                 VoEId(_instanceId,_channelId),
+                 "GetLocalSSRC() => ssrc=%lu", ssrc);
+    return 0;
+}
+
+int
+Channel::GetRemoteSSRC(unsigned int& ssrc)
+{
+    ssrc = _rtpRtcpModule.RemoteSSRC();
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                 VoEId(_instanceId,_channelId),
+                 "GetRemoteSSRC() => ssrc=%lu", ssrc);
+    return 0;
+}
+
+int
+Channel::GetRemoteCSRCs(unsigned int arrCSRC[15])
+{
+    if (arrCSRC == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "GetRemoteCSRCs() invalid array argument");
+        return -1;
+    }
+    WebRtc_UWord32 arrOfCSRC[kRtpCsrcSize];
+    WebRtc_Word32 CSRCs(0);
+    CSRCs = _rtpRtcpModule.CSRCs(arrOfCSRC);
+    if (CSRCs > 0)
+    {
+        memcpy(arrCSRC, arrOfCSRC, CSRCs * sizeof(WebRtc_UWord32));
+        for (int i = 0; i < (int) CSRCs; i++)
+        {
+            WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                       VoEId(_instanceId, _channelId),
+                       "GetRemoteCSRCs() => arrCSRC[%d]=%lu", i, arrCSRC[i]);
+        }
+    } else
+    {
+        WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                   VoEId(_instanceId, _channelId),
+                   "GetRemoteCSRCs() => list is empty!");
+    }
+    return CSRCs;
+}
+
+int
+Channel::SetRTPAudioLevelIndicationStatus(bool enable, unsigned char ID)
+{
+    _includeAudioLevelIndication = enable;
+    return _rtpRtcpModule.SetRTPAudioLevelIndicationStatus(enable, ID);
+}
+int
+Channel::GetRTPAudioLevelIndicationStatus(bool& enabled, unsigned char& ID)
+{
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                 VoEId(_instanceId,_channelId),
+                 "GetRTPAudioLevelIndicationStatus() => enabled=%d, ID=%u",
+                 enabled, ID);
+    return _rtpRtcpModule.GetRTPAudioLevelIndicationStatus(enabled, ID);
+}
+
+int
+Channel::SetRTCPStatus(bool enable)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetRTCPStatus()");
+    if (_rtpRtcpModule.SetRTCPStatus(enable ?
+        kRtcpCompound : kRtcpOff) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+            "SetRTCPStatus() failed to set RTCP status");
+        return -1;
+    }
+    return 0;
+}
+
+int
+Channel::GetRTCPStatus(bool& enabled)
+{
+    RTCPMethod method = _rtpRtcpModule.RTCP();
+    enabled = (method != kRtcpOff);
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                 VoEId(_instanceId,_channelId),
+                 "GetRTCPStatus() => enabled=%d", enabled);
+    return 0;
+}
+
+int
+Channel::SetRTCP_CNAME(const char cName[256])
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+                 "Channel::SetRTCP_CNAME()");
+    if (_rtpRtcpModule.SetCNAME(cName) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+            "SetRTCP_CNAME() failed to set RTCP CNAME");
+        return -1;
+    }
+    return 0;
+}
+
+int
+Channel::GetRTCP_CNAME(char cName[256])
+{
+    if (_rtpRtcpModule.CNAME(cName) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+            "GetRTCP_CNAME() failed to retrieve RTCP CNAME");
+        return -1;
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                 VoEId(_instanceId, _channelId),
+                 "GetRTCP_CNAME() => cName=%s", cName);
+    return 0;
+}
+
+int
+Channel::GetRemoteRTCP_CNAME(char cName[256])
+{
+    if (cName == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "GetRemoteRTCP_CNAME() invalid CNAME input buffer");
+        return -1;
+    }
+    WebRtc_Word8 cname[RTCP_CNAME_SIZE];
+    const WebRtc_UWord32 remoteSSRC = _rtpRtcpModule.RemoteSSRC();
+    if (_rtpRtcpModule.RemoteCNAME(remoteSSRC, cname) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_CANNOT_RETRIEVE_CNAME, kTraceError,
+            "GetRemoteRTCP_CNAME() failed to retrieve remote RTCP CNAME");
+        return -1;
+    }
+    strcpy(cName, cname);
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                 VoEId(_instanceId, _channelId),
+                 "GetRemoteRTCP_CNAME() => cName=%s", cName);
+    return 0;
+}
+
+int
+Channel::GetRemoteRTCPData(
+    unsigned int& NTPHigh,
+    unsigned int& NTPLow,
+    unsigned int& timestamp,
+    unsigned int& playoutTimestamp,
+    unsigned int* jitter,
+    unsigned short* fractionLost)
+{
+    // --- Information from sender info in received Sender Reports
+
+    RTCPSenderInfo senderInfo;
+    if (_rtpRtcpModule.RemoteRTCPStat(&senderInfo) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+            "GetRemoteRTCPData() failed to retrieve sender info for remote"
+            "side");
+        return -1;
+    }
+
+    // We only utilize 12 out of 20 bytes in the sender info (ignores packet
+    // and octet count)
+    NTPHigh = senderInfo.NTPseconds;
+    NTPLow = senderInfo.NTPfraction;
+    timestamp = senderInfo.RTPtimeStamp;
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                 VoEId(_instanceId, _channelId),
+                 "GetRemoteRTCPData() => NTPHigh=%lu, NTPLow=%lu, "
+                 "timestamp=%lu",
+                 NTPHigh, NTPLow, timestamp);
+
+    // --- Locally derived information
+
+    // This value is updated on each incoming RTCP packet (0 when no packet
+    // has been received)
+    playoutTimestamp = _playoutTimeStampRTCP;
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                 VoEId(_instanceId, _channelId),
+                 "GetRemoteRTCPData() => playoutTimestamp=%lu",
+                 _playoutTimeStampRTCP);
+
+    if (NULL != jitter || NULL != fractionLost)
+    {
+        WebRtc_Word32 ret(-1);
+        RTCPReportBlock reportBlock;
+        WebRtc_Word32 remoteSSRC = _rtpRtcpModule.RemoteSSRC();
+        if (remoteSSRC > 0)
+        {
+            // We must feed the module with remote SSRC to get the correct
+            // report block.
+            ret = _rtpRtcpModule.RemoteRTCPStat(remoteSSRC, &reportBlock);
+        }
+        if ((remoteSSRC < 0) || (ret != 0))
+        {
+            reportBlock.jitter = 0;
+            reportBlock.fractionLost = 0;
+            WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                         VoEId(_instanceId, _channelId),
+                         "GetRemoteRTCPData() failed to measure statistics due"
+                       "to lack of received RTP and/or RTCP packets");
+        }
+        if (NULL != jitter)
+        {
+            *jitter = reportBlock.jitter;
+            WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                         VoEId(_instanceId, _channelId),
+                         "GetRemoteRTCPData() => jitter = %lu", *jitter);
+        }
+        if (NULL != fractionLost)
+        {
+            *fractionLost = reportBlock.fractionLost;
+            WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                         VoEId(_instanceId, _channelId),
+                         "GetRemoteRTCPData() => fractionLost = %lu",
+                         *fractionLost);
+        }
+    }
+    return 0;
+}
+
+int
+Channel::SendApplicationDefinedRTCPPacket(const unsigned char subType,
+                                             unsigned int name,
+                                             const char* data,
+                                             unsigned short dataLengthInBytes)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+                 "Channel::SendApplicationDefinedRTCPPacket()");
+    if (!_sending)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_NOT_SENDING, kTraceError,
+            "SendApplicationDefinedRTCPPacket() not sending");
+        return -1;
+    }
+    if (NULL == data)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "SendApplicationDefinedRTCPPacket() invalid data value");
+        return -1;
+    }
+    if (dataLengthInBytes % 4 != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "SendApplicationDefinedRTCPPacket() invalid length value");
+        return -1;
+    }
+    RTCPMethod status = _rtpRtcpModule.RTCP();
+    if (status == kRtcpOff)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_RTCP_ERROR, kTraceError,
+            "SendApplicationDefinedRTCPPacket() RTCP is disabled");
+        return -1;
+    }
+
+    // Create and schedule the RTCP APP packet for transmission
+    if (_rtpRtcpModule.SetRTCPApplicationSpecificData(
+        subType,
+        name,
+        (const unsigned char*) data,
+        dataLengthInBytes) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_SEND_ERROR, kTraceError,
+            "SendApplicationDefinedRTCPPacket() failed to send RTCP packet");
+        return -1;
+    }
+    return 0;
+}
+
+int
+Channel::GetRTPStatistics(
+        unsigned int& averageJitterMs,
+        unsigned int& maxJitterMs,
+        unsigned int& discardedPackets)
+{
+    WebRtc_UWord8 fraction_lost(0);
+    WebRtc_UWord32 cum_lost(0);
+    WebRtc_UWord32 ext_max(0);
+    WebRtc_UWord32 jitter(0);
+    WebRtc_UWord32 max_jitter(0);
+
+    // The jitter statistics is updated for each received RTP packet and is
+    // based on received packets.
+    if (_rtpRtcpModule.StatisticsRTP(&fraction_lost,
+                                     &cum_lost,
+                                     &ext_max,
+                                     &jitter,
+                                     &max_jitter) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_CANNOT_RETRIEVE_RTP_STAT, kTraceWarning,
+            "GetRTPStatistics() failed to read RTP statistics from the"
+            "RTP/RTCP module");
+    }
+
+    const WebRtc_Word32 playoutFrequency =
+        _audioCodingModule.PlayoutFrequency();
+    if (playoutFrequency > 0)
+    {
+        // Scale RTP statistics given the current playout frequency
+        maxJitterMs = max_jitter / (playoutFrequency / 1000);
+        averageJitterMs = jitter / (playoutFrequency / 1000);
+    }
+
+    discardedPackets = _numberOfDiscardedPackets;
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+               VoEId(_instanceId, _channelId),
+               "GetRTPStatistics() => averageJitterMs = %lu, maxJitterMs = %lu,"
+               "discardedPackets = %lu)",
+               averageJitterMs, maxJitterMs, discardedPackets);
+    return 0;
+}
+
+int
+Channel::GetRTPStatistics(CallStatistics& stats)
+{
+    WebRtc_UWord8 fraction_lost(0);
+    WebRtc_UWord32 cum_lost(0);
+    WebRtc_UWord32 ext_max(0);
+    WebRtc_UWord32 jitter(0);
+    WebRtc_UWord32 max_jitter(0);
+
+    // --- Part one of the final structure (four values)
+
+    // The jitter statistics is updated for each received RTP packet and is
+    // based on received packets.
+    if (_rtpRtcpModule.StatisticsRTP(&fraction_lost,
+                                     &cum_lost,
+                                     &ext_max,
+                                     &jitter,
+                                     &max_jitter) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_CANNOT_RETRIEVE_RTP_STAT, kTraceWarning,
+            "GetRTPStatistics() failed to read RTP statistics from the "
+            "RTP/RTCP module");
+    }
+
+    stats.fractionLost = fraction_lost;
+    stats.cumulativeLost = cum_lost;
+    stats.extendedMax = ext_max;
+    stats.jitterSamples = jitter;
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                 VoEId(_instanceId, _channelId),
+                 "GetRTPStatistics() => fractionLost=%lu, cumulativeLost=%lu,"
+                 "extendedMax=%lu, jitterSamples=%li)",
+                 stats.fractionLost, stats.cumulativeLost, stats.extendedMax,
+                 stats.jitterSamples);
+
+    // --- Part two of the final structure (one value)
+
+    WebRtc_UWord16 RTT(0);
+    RTCPMethod method = _rtpRtcpModule.RTCP();
+    if (method == kRtcpOff)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                     VoEId(_instanceId, _channelId),
+                     "GetRTPStatistics() RTCP is disabled => valid RTT"
+                     "measurements cannot be retrieved");
+    } else
+    {
+        // The remote SSRC will be zero if no RTP packet has been received.
+        WebRtc_UWord32 remoteSSRC = _rtpRtcpModule.RemoteSSRC();
+        if (remoteSSRC > 0)
+        {
+            WebRtc_UWord16 avgRTT(0);
+            WebRtc_UWord16 maxRTT(0);
+            WebRtc_UWord16 minRTT(0);
+
+            if (_rtpRtcpModule.RTT(remoteSSRC, &RTT, &avgRTT, &minRTT, &maxRTT)
+                != 0)
+            {
+                WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                             VoEId(_instanceId, _channelId),
+                             "GetRTPStatistics() failed to retrieve RTT from"
+                             "the RTP/RTCP module");
+            }
+        } else
+        {
+            WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                         VoEId(_instanceId, _channelId),
+                         "GetRTPStatistics() failed to measure RTT since no"
+                         "RTP packets have been received yet");
+        }
+    }
+
+    stats.rttMs = static_cast<int> (RTT);
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                 VoEId(_instanceId, _channelId),
+                 "GetRTPStatistics() => rttMs=%d", stats.rttMs);
+
+    // --- Part three of the final structure (four values)
+
+    WebRtc_UWord32 bytesSent(0);
+    WebRtc_UWord32 packetsSent(0);
+    WebRtc_UWord32 bytesReceived(0);
+    WebRtc_UWord32 packetsReceived(0);
+
+    if (_rtpRtcpModule.DataCountersRTP(&bytesSent,
+                                       &packetsSent,
+                                       &bytesReceived,
+                                       &packetsReceived) != 0)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                     VoEId(_instanceId, _channelId),
+                     "GetRTPStatistics() failed to retrieve RTP datacounters =>"
+                     "output will not be complete");
+    }
+
+    stats.bytesSent = bytesSent;
+    stats.packetsSent = packetsSent;
+    stats.bytesReceived = bytesReceived;
+    stats.packetsReceived = packetsReceived;
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                 VoEId(_instanceId, _channelId),
+                 "GetRTPStatistics() => bytesSent=%d, packetsSent=%d,"
+                 "bytesReceived=%d, packetsReceived=%d)",
+                 stats.bytesSent, stats.packetsSent, stats.bytesReceived,
+                 stats.packetsReceived);
+
+    return 0;
+}
+
+int
+Channel::SetFECStatus(bool enable, int redPayloadtype)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+                 "Channel::SetFECStatus()");
+
+    CodecInst codec;
+
+    // Get default RED settings from the ACM database
+    bool foundRED(false);
+    const WebRtc_UWord8 nSupportedCodecs = AudioCodingModule::NumberOfCodecs();
+    for (int idx = 0; (!foundRED && idx < nSupportedCodecs); idx++)
+    {
+        _audioCodingModule.Codec(idx, codec);
+        if (!STR_CASE_CMP(codec.plname, "RED"))
+        {
+            foundRED = true;
+        }
+    }
+    if (!foundRED)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_CODEC_ERROR, kTraceError,
+            "SetFECStatus() RED is not supported");
+        return -1;
+    }
+
+    if (redPayloadtype != -1)
+    {
+        codec.pltype = redPayloadtype;
+    }
+
+    if (_audioCodingModule.RegisterSendCodec(codec) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+            "SetFECStatus() RED registration in ACM module failed");
+        return -1;
+    }
+    if (_rtpRtcpModule.SetSendREDPayloadType(codec.pltype) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+            "SetFECStatus() RED registration in RTP/RTCP module failed");
+        return -1;
+    }
+    if (_audioCodingModule.SetFECStatus(enable) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+            "SetFECStatus() failed to set FEC state in the ACM");
+        return -1;
+    }
+    return 0;
+}
+
+int
+Channel::GetFECStatus(bool& enabled, int& redPayloadtype)
+{
+    enabled = _audioCodingModule.FECStatus();
+    if (enabled)
+    {
+        WebRtc_Word8 payloadType(0);
+        if (_rtpRtcpModule.SendREDPayloadType(payloadType) != 0)
+        {
+            _engineStatisticsPtr->SetLastError(
+                VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+                "GetFECStatus() failed to retrieve RED PT from RTP/RTCP "
+                "module");
+            return -1;
+        }
+        WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                   VoEId(_instanceId, _channelId),
+                   "GetFECStatus() => enabled=%d, redPayloadtype=%d",
+                   enabled, redPayloadtype);
+        return 0;
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                 VoEId(_instanceId, _channelId),
+                 "GetFECStatus() => enabled=%d", enabled);
+    return 0;
+}
+
+int
+Channel::SetRTPKeepaliveStatus(bool enable,
+                               unsigned char unknownPayloadType,
+                               int deltaTransmitTimeSeconds)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+                 "Channel::SetRTPKeepaliveStatus()");
+    if (_sending)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_ALREADY_SENDING, kTraceError,
+            "SetRTPKeepaliveStatus() already sending");
+        return -1;
+    }
+    if (_rtpRtcpModule.SetRTPKeepaliveStatus(
+        enable,
+        unknownPayloadType,
+        1000 * deltaTransmitTimeSeconds) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+            "SetRTPKeepaliveStatus() failed to set RTP keepalive status");
+        return -1;
+    }
+    return 0;
+}
+
+int
+Channel::GetRTPKeepaliveStatus(bool& enabled,
+                               unsigned char& unknownPayloadType,
+                               int& deltaTransmitTimeSeconds)
+{
+    bool onOff(false);
+    WebRtc_Word8 payloadType(0);
+    WebRtc_UWord16 deltaTransmitTimeMS(0);
+    if (_rtpRtcpModule.RTPKeepaliveStatus(&onOff, &payloadType,
+                                          &deltaTransmitTimeMS) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+            "GetRTPKeepaliveStatus() failed to retrieve RTP keepalive status");
+        return -1;
+    }
+    enabled = onOff;
+    unknownPayloadType = payloadType;
+    deltaTransmitTimeSeconds = static_cast<int> (deltaTransmitTimeMS / 1000);
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                 VoEId(_instanceId, _channelId),
+                 "GetRTPKeepaliveStatus() => enabled=%d, "
+                 "unknownPayloadType=%u, deltaTransmitTimeSeconds=%d",
+                 enabled, unknownPayloadType, deltaTransmitTimeSeconds);
+    return 0;
+}
+
+int
+Channel::StartRTPDump(const char fileNameUTF8[1024],
+                      RTPDirections direction)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+                 "Channel::StartRTPDump()");
+    if ((direction != kRtpIncoming) && (direction != kRtpOutgoing))
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "StartRTPDump() invalid RTP direction");
+        return -1;
+    }
+    RtpDump* rtpDumpPtr = (direction == kRtpIncoming) ?
+        &_rtpDumpIn : &_rtpDumpOut;
+    if (rtpDumpPtr == NULL)
+    {
+        assert(false);
+        return -1;
+    }
+    if (rtpDumpPtr->IsActive())
+    {
+        rtpDumpPtr->Stop();
+    }
+    if (rtpDumpPtr->Start(fileNameUTF8) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_FILE, kTraceError,
+            "StartRTPDump() failed to create file");
+        return -1;
+    }
+    return 0;
+}
+
+int
+Channel::StopRTPDump(RTPDirections direction)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+                 "Channel::StopRTPDump()");
+    if ((direction != kRtpIncoming) && (direction != kRtpOutgoing))
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "StopRTPDump() invalid RTP direction");
+        return -1;
+    }
+    RtpDump* rtpDumpPtr = (direction == kRtpIncoming) ?
+        &_rtpDumpIn : &_rtpDumpOut;
+    if (rtpDumpPtr == NULL)
+    {
+        assert(false);
+        return -1;
+    }
+    if (!rtpDumpPtr->IsActive())
+    {
+        return 0;
+    }
+    return rtpDumpPtr->Stop();
+}
+
+bool
+Channel::RTPDumpIsActive(RTPDirections direction)
+{
+    if ((direction != kRtpIncoming) &&
+        (direction != kRtpOutgoing))
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "RTPDumpIsActive() invalid RTP direction");
+        return false;
+    }
+    RtpDump* rtpDumpPtr = (direction == kRtpIncoming) ?
+        &_rtpDumpIn : &_rtpDumpOut;
+    return rtpDumpPtr->IsActive();
+}
+
+int
+Channel::InsertExtraRTPPacket(unsigned char payloadType,
+                              bool markerBit,
+                              const char* payloadData,
+                              unsigned short payloadSize)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+               "Channel::InsertExtraRTPPacket()");
+    if (payloadType > 127)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_PLTYPE, kTraceError,
+            "InsertExtraRTPPacket() invalid payload type");
+        return -1;
+    }
+    if (payloadData == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "InsertExtraRTPPacket() invalid payload data");
+        return -1;
+    }
+    if (payloadSize > _rtpRtcpModule.MaxDataPayloadLength())
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "InsertExtraRTPPacket() invalid payload size");
+        return -1;
+    }
+    if (!_sending)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_NOT_SENDING, kTraceError,
+            "InsertExtraRTPPacket() not sending");
+        return -1;
+    }
+
+    // Create extra RTP packet by calling RtpRtcp::SendOutgoingData().
+    // Transport::SendPacket() will be called by the module when the RTP packet
+    // is created.
+    // The call to SendOutgoingData() does *not* modify the timestamp and
+    // payloadtype to ensure that the RTP module generates a valid RTP packet
+    // (user might utilize a non-registered payload type).
+    // The marker bit and payload type will be replaced just before the actual
+    // transmission, i.e., the actual modification is done *after* the RTP
+    // module has delivered its RTP packet back to the VoE.
+    // We will use the stored values above when the packet is modified
+    // (see Channel::SendPacket()).
+
+    _extraPayloadType = payloadType;
+    _extraMarkerBit = markerBit;
+    _insertExtraRTPPacket = true;
+
+    if (_rtpRtcpModule.SendOutgoingData(kAudioFrameSpeech,
+                                        _lastPayloadType,
+                                        _lastLocalTimeStamp,
+                                        (const WebRtc_UWord8*) payloadData,
+                                        payloadSize) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+            "InsertExtraRTPPacket() failed to send extra RTP packet");
+        return -1;
+    }
+
+    return 0;
+}
+
+WebRtc_UWord32
+Channel::Demultiplex(const AudioFrame& audioFrame,
+                     const WebRtc_UWord8 audioLevel_dBov)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::Demultiplex(audioLevel_dBov=%u)", audioLevel_dBov);
+    _audioFrame = audioFrame;
+    _audioFrame._id = _channelId;
+    _audioLevel_dBov = audioLevel_dBov;
+    return 0;
+}
+
+WebRtc_UWord32
+Channel::PrepareEncodeAndSend(WebRtc_UWord32 mixingFrequency)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::PrepareEncodeAndSend()");
+
+    if (_audioFrame._payloadDataLengthInSamples == 0)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
+                     "Channel::PrepareEncodeAndSend() invalid audio frame");
+        return -1;
+    }
+
+    if (_inputFilePlaying)
+    {
+        MixOrReplaceAudioWithFile(mixingFrequency);
+    }
+
+    if (_mute)
+    {
+        AudioFrameOperations::Mute(_audioFrame);
+    }
+
+    if (_inputExternalMedia)
+    {
+        CriticalSectionScoped cs(_callbackCritSect);
+        const bool isStereo = (_audioFrame._audioChannel == 2);
+        if (_inputExternalMediaCallbackPtr)
+        {
+            _inputExternalMediaCallbackPtr->Process(
+                _channelId,
+                kRecordingPerChannel,
+               (WebRtc_Word16*)_audioFrame._payloadData,
+                _audioFrame._payloadDataLengthInSamples,
+                _audioFrame._frequencyInHz,
+                isStereo);
+        }
+    }
+
+    InsertInbandDtmfTone();
+
+    return 0;
+}
+
+WebRtc_UWord32
+Channel::EncodeAndSend()
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::EncodeAndSend()");
+
+    assert(_audioFrame._audioChannel == 1);
+    if (_audioFrame._payloadDataLengthInSamples == 0)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
+                     "Channel::EncodeAndSend() invalid audio frame");
+        return -1;
+    }
+
+    _audioFrame._id = _channelId;
+
+    // --- Add 10ms of raw (PCM) audio data to the encoder @ 32kHz.
+
+    // The ACM resamples internally.
+    _audioFrame._timeStamp = _timeStamp;
+    if (_audioCodingModule.Add10MsData((AudioFrame&)_audioFrame) != 0)
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
+                     "Channel::EncodeAndSend() ACM encoding failed");
+        return -1;
+    }
+
+    _timeStamp += _audioFrame._payloadDataLengthInSamples;
+
+    // --- Encode if complete frame is ready
+
+    // This call will trigger AudioPacketizationCallback::SendData if encoding
+    // is done and payload is ready for packetization and transmission.
+    return _audioCodingModule.Process();
+}
+
+int Channel::RegisterExternalMediaProcessing(
+    ProcessingTypes type,
+    VoEMediaProcess& processObject)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::RegisterExternalMediaProcessing()");
+
+    CriticalSectionScoped cs(_callbackCritSect);
+
+    if (kPlaybackPerChannel == type)
+    {
+        if (_outputExternalMediaCallbackPtr)
+        {
+            _engineStatisticsPtr->SetLastError(
+                VE_INVALID_OPERATION, kTraceError,
+                "Channel::RegisterExternalMediaProcessing() "
+                "output external media already enabled");
+            return -1;
+        }
+        _outputExternalMediaCallbackPtr = &processObject;
+        _outputExternalMedia = true;
+    }
+    else if (kRecordingPerChannel == type)
+    {
+        if (_inputExternalMediaCallbackPtr)
+        {
+            _engineStatisticsPtr->SetLastError(
+                VE_INVALID_OPERATION, kTraceError,
+                "Channel::RegisterExternalMediaProcessing() "
+                "output external media already enabled");
+            return -1;
+        }
+        _inputExternalMediaCallbackPtr = &processObject;
+        _inputExternalMedia = true;
+    }
+    return 0;
+}
+
+int Channel::DeRegisterExternalMediaProcessing(ProcessingTypes type)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::DeRegisterExternalMediaProcessing()");
+
+    CriticalSectionScoped cs(_callbackCritSect);
+
+    if (kPlaybackPerChannel == type)
+    {
+        if (!_outputExternalMediaCallbackPtr)
+        {
+            _engineStatisticsPtr->SetLastError(
+                VE_INVALID_OPERATION, kTraceWarning,
+                "Channel::DeRegisterExternalMediaProcessing() "
+                "output external media already disabled");
+            return 0;
+        }
+        _outputExternalMedia = false;
+        _outputExternalMediaCallbackPtr = NULL;
+    }
+    else if (kRecordingPerChannel == type)
+    {
+        if (!_inputExternalMediaCallbackPtr)
+        {
+            _engineStatisticsPtr->SetLastError(
+                VE_INVALID_OPERATION, kTraceWarning,
+                "Channel::DeRegisterExternalMediaProcessing() "
+                "input external media already disabled");
+            return 0;
+        }
+        _inputExternalMedia = false;
+        _inputExternalMediaCallbackPtr = NULL;
+    }
+
+    return 0;
+}
+
+int
+Channel::ResetRTCPStatistics()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::ResetRTCPStatistics()");
+    WebRtc_UWord32 remoteSSRC(0);
+    remoteSSRC = _rtpRtcpModule.RemoteSSRC();
+    return _rtpRtcpModule.ResetRTT(remoteSSRC);
+}
+
+int
+Channel::GetRoundTripTimeSummary(StatVal& delaysMs) const
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetRoundTripTimeSummary()");
+    // Override default module outputs for the case when RTCP is disabled.
+    // This is done to ensure that we are backward compatible with the
+    // VoiceEngine where we did not use RTP/RTCP module.
+    if (!_rtpRtcpModule.RTCP())
+    {
+        delaysMs.min = -1;
+        delaysMs.max = -1;
+        delaysMs.average = -1;
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
+                     "Channel::GetRoundTripTimeSummary() RTCP is disabled =>"
+                     " valid RTT measurements cannot be retrieved");
+        return 0;
+    }
+
+    WebRtc_UWord32 remoteSSRC;
+    WebRtc_UWord16 RTT;
+    WebRtc_UWord16 avgRTT;
+    WebRtc_UWord16 maxRTT;
+    WebRtc_UWord16 minRTT;
+    // The remote SSRC will be zero if no RTP packet has been received.
+    remoteSSRC = _rtpRtcpModule.RemoteSSRC();
+    if (remoteSSRC == 0)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
+                     "Channel::GetRoundTripTimeSummary() unable to measure RTT"
+                     " since no RTP packet has been received yet");
+    }
+
+    // Retrieve RTT statistics from the RTP/RTCP module for the specified
+    // channel and SSRC. The SSRC is required to parse out the correct source
+    // in conference scenarios.
+    if (_rtpRtcpModule.RTT(remoteSSRC, &RTT, &avgRTT, &minRTT,&maxRTT) != 0)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
+                     "GetRoundTripTimeSummary unable to retrieve RTT values"
+                     " from the RTCP layer");
+        delaysMs.min = -1; delaysMs.max = -1; delaysMs.average = -1;
+    }
+    else
+    {
+        delaysMs.min = minRTT;
+        delaysMs.max = maxRTT;
+        delaysMs.average = avgRTT;
+    }
+    return 0;
+}
+
+int
+Channel::GetNetworkStatistics(NetworkStatistics& stats)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetNetworkStatistics()");
+    return _audioCodingModule.NetworkStatistics(
+        (ACMNetworkStatistics &)stats);
+}
+
+int
+Channel::GetJitterStatistics(JitterStatistics& stats)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetNetworkStatistics()");
+    return _audioCodingModule.JitterStatistics(
+        (ACMJitterStatistics &)stats);
+}
+
+int
+Channel::GetPreferredBufferSize(unsigned short& preferredBufferSize)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetPreferredBufferSize()");
+    return _audioCodingModule.PreferredBufferSize(
+        (WebRtc_UWord16 &)preferredBufferSize);
+}
+
+int
+Channel::ResetJitterStatistics()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::ResetJitterStatistics()");
+    return _audioCodingModule.ResetJitterStatistics();
+}
+
+int
+Channel::GetDelayEstimate(int& delayMs) const
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetDelayEstimate()");
+    delayMs = (_averageDelayMs + 5) / 10 + _recPacketDelayMs;
+    return 0;
+}
+
+int
+Channel::SetMinimumPlayoutDelay(int delayMs)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetMinimumPlayoutDelay()");
+    if ((delayMs < kVoiceEngineMinMinPlayoutDelayMs) ||
+        (delayMs > kVoiceEngineMaxMinPlayoutDelayMs))
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "SetMinimumPlayoutDelay() invalid min delay");
+        return -1;
+    }
+    if (_audioCodingModule.SetMinimumPlayoutDelay(delayMs) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+            "SetMinimumPlayoutDelay() failed to set min playout delay");
+        return -1;
+    }
+    return 0;
+}
+
+int
+Channel::GetPlayoutTimestamp(unsigned int& timestamp)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetPlayoutTimestamp()");
+    WebRtc_UWord32 playoutTimestamp(0);
+    if (GetPlayoutTimeStamp(playoutTimestamp) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_CANNOT_RETRIEVE_VALUE, kTraceError,
+            "GetPlayoutTimestamp() failed to retrieve timestamp");
+        return -1;
+    }
+    timestamp = playoutTimestamp;
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+                 VoEId(_instanceId,_channelId),
+                 "GetPlayoutTimestamp() => timestamp=%u", timestamp);
+    return 0;
+}
+
+int
+Channel::SetInitTimestamp(unsigned int timestamp)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+               "Channel::SetInitTimestamp()");
+    if (_sending)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_SENDING, kTraceError, "SetInitTimestamp() already sending");
+        return -1;
+    }
+    if (_rtpRtcpModule.SetStartTimestamp(timestamp) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+            "SetInitTimestamp() failed to set timestamp");
+        return -1;
+    }
+    return 0;
+}
+
+int
+Channel::SetInitSequenceNumber(short sequenceNumber)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::SetInitSequenceNumber()");
+    if (_sending)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_SENDING, kTraceError,
+            "SetInitSequenceNumber() already sending");
+        return -1;
+    }
+    if (_rtpRtcpModule.SetSequenceNumber(sequenceNumber) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+            "SetInitSequenceNumber() failed to set sequence number");
+        return -1;
+    }
+    return 0;
+}
+
+int
+Channel::GetRtpRtcp(RtpRtcp* &rtpRtcpModule) const
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetRtpRtcp()");
+    rtpRtcpModule = &_rtpRtcpModule;
+    return 0;
+}
+
+WebRtc_Word32
+Channel::MixOrReplaceAudioWithFile(const WebRtc_UWord32 mixingFrequency)
+{
+    WebRtc_Word16 fileBuffer[320];
+    WebRtc_UWord32 fileSamples(0);
+
+    {
+        CriticalSectionScoped cs(_fileCritSect);
+
+        if (_inputFilePlayerPtr == NULL)
+        {
+            WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                         VoEId(_instanceId, _channelId),
+                         "Channel::MixOrReplaceAudioWithFile() fileplayer"
+                             " doesnt exist");
+            return -1;
+        }
+
+        if (_inputFilePlayerPtr->Get10msAudioFromFile(fileBuffer,
+                                                      fileSamples,
+                                                      mixingFrequency) == -1)
+        {
+            WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                         VoEId(_instanceId, _channelId),
+                         "Channel::MixOrReplaceAudioWithFile() file mixing "
+                         "failed");
+            return -1;
+        }
+        if (fileSamples == 0)
+        {
+            WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                         VoEId(_instanceId, _channelId),
+                         "Channel::MixOrReplaceAudioWithFile() file is ended");
+            return 0;
+        }
+    }
+
+    assert(_audioFrame._payloadDataLengthInSamples == fileSamples);
+
+    if (_mixFileWithMicrophone)
+    {
+        Utility::MixWithSat(_audioFrame._payloadData,
+                            fileBuffer,
+                            (WebRtc_UWord16)fileSamples);
+    }
+    else
+    {
+        // replace ACM audio with file
+        _audioFrame.UpdateFrame(_channelId,
+                                -1,
+                                fileBuffer,
+                                (WebRtc_UWord16)fileSamples,
+                                mixingFrequency,
+                                AudioFrame::kNormalSpeech,
+                                AudioFrame::kVadUnknown,
+                                1);
+
+    }
+    return 0;
+}
+
+WebRtc_Word32
+Channel::MixAudioWithFile(AudioFrame& audioFrame,
+                          const WebRtc_UWord32 mixingFrequency)
+{
+    assert(mixingFrequency <= 32000);
+
+    WebRtc_Word16 fileBuffer[320];
+    WebRtc_UWord32 fileSamples(0);
+
+    {
+        CriticalSectionScoped cs(_fileCritSect);
+
+        if (_outputFilePlayerPtr == NULL)
+        {
+            WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                         VoEId(_instanceId, _channelId),
+                         "Channel::MixAudioWithFile() file mixing failed");
+            return -1;
+        }
+
+        // We should get the frequency we ask for.
+        if (_outputFilePlayerPtr->Get10msAudioFromFile(fileBuffer,
+                                                       fileSamples,
+                                                       mixingFrequency) == -1)
+        {
+            WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                         VoEId(_instanceId, _channelId),
+                         "Channel::MixAudioWithFile() file mixing failed");
+            return -1;
+        }
+    }
+
+    if (audioFrame._payloadDataLengthInSamples == fileSamples)
+    {
+        Utility::MixWithSat(audioFrame._payloadData,
+                            fileBuffer,
+                            (WebRtc_UWord16)fileSamples);
+    }
+    else
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
+            "Channel::MixAudioWithFile() _payloadDataLengthInSamples(%d) != "
+            "fileSamples(%d)",
+            audioFrame._payloadDataLengthInSamples, fileSamples);
+        return -1;
+    }
+
+    return 0;
+}
+
+int
+Channel::InsertInbandDtmfTone()
+{
+    if (_inbandDtmfQueue.PendingDtmf() &&
+        !_inbandDtmfGenerator.IsAddingTone() &&
+        _inbandDtmfGenerator.DelaySinceLastTone() >
+        kMinTelephoneEventSeparationMs)
+    {
+        WebRtc_Word8 eventCode(0);
+        WebRtc_UWord16 lengthMs(0);
+        WebRtc_UWord8 attenuationDb(0);
+
+        eventCode = _inbandDtmfQueue.NextDtmf(&lengthMs, &attenuationDb);
+        _inbandDtmfGenerator.AddTone(eventCode, lengthMs, attenuationDb);
+        if (_playInbandDtmfEvent)
+        {
+            // Add tone to output mixer using a reduced length to minimize
+            // risk of echo.
+            _outputMixerPtr->PlayDtmfTone(eventCode, lengthMs - 80,
+                                          attenuationDb);
+        }
+    }
+
+    if (_inbandDtmfGenerator.IsAddingTone())
+    {
+        WebRtc_UWord16 frequency(0);
+        _inbandDtmfGenerator.GetSampleRate(frequency);
+
+        if (frequency != _audioFrame._frequencyInHz)
+        {
+            // Update sample rate of Dtmf tone since the mixing frequency
+            // has changed.
+            _inbandDtmfGenerator.SetSampleRate(
+                (WebRtc_UWord16) (_audioFrame._frequencyInHz));
+            // Reset the tone to be added taking the new sample rate into
+            // account.
+            _inbandDtmfGenerator.ResetTone();
+        }
+
+        WebRtc_Word16 toneBuffer[320];
+        WebRtc_UWord16 toneSamples(0);
+        // Get 10ms tone segment and set time since last tone to zero
+        if (_inbandDtmfGenerator.Get10msTone(toneBuffer, toneSamples) == -1)
+        {
+            WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                       VoEId(_instanceId, _channelId),
+                       "Channel::EncodeAndSend() inserting Dtmf failed");
+            return -1;
+        }
+
+        // Replace mixed audio with Dtmf tone
+        memcpy(_audioFrame._payloadData, toneBuffer, sizeof(WebRtc_Word16)
+            * toneSamples);
+
+        assert(_audioFrame._audioChannel == 1);
+        assert(_audioFrame._payloadDataLengthInSamples == toneSamples);
+    } else
+    {
+        // Add 10ms to "delay-since-last-tone" counter
+        _inbandDtmfGenerator.UpdateDelaySinceLastTone();
+    }
+    return 0;
+}
+
+WebRtc_Word32
+Channel::GetPlayoutTimeStamp(WebRtc_UWord32& playoutTimestamp)
+{
+    WebRtc_UWord32 timestamp(0);
+    CodecInst currRecCodec;
+
+    if (_audioCodingModule.PlayoutTimestamp(timestamp) == -1)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
+                     "Channel::GetPlayoutTimeStamp() failed to read playout"
+                     " timestamp from the ACM");
+        return -1;
+    }
+
+    WebRtc_UWord16 delayMS(0);
+    if (_audioDeviceModulePtr->PlayoutDelay(&delayMS) == -1)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
+                     "Channel::GetPlayoutTimeStamp() failed to read playout"
+                     " delay from the ADM");
+        return -1;
+    }
+
+    WebRtc_Word32 playoutFrequency = _audioCodingModule.PlayoutFrequency();
+    if (_audioCodingModule.ReceiveCodec(currRecCodec) == 0)
+    {
+        if (STR_CASE_CMP("G722", currRecCodec.plname) == 0)
+        {
+            playoutFrequency = 8000;
+        }
+    }
+    timestamp -= (delayMS * (playoutFrequency/1000));
+
+    playoutTimestamp = timestamp;
+
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::GetPlayoutTimeStamp() => playoutTimestamp = %lu",
+                 playoutTimestamp);
+    return 0;
+}
+
+void
+Channel::ResetDeadOrAliveCounters()
+{
+    _countDeadDetections = 0;
+    _countAliveDetections = 0;
+}
+
+void
+Channel::UpdateDeadOrAliveCounters(bool alive)
+{
+    if (alive)
+        _countAliveDetections++;
+    else
+        _countDeadDetections++;
+}
+
+int
+Channel::GetDeadOrAliveCounters(int& countDead, int& countAlive) const
+{
+    bool enabled;
+    WebRtc_UWord8 timeSec;
+
+    _rtpRtcpModule.PeriodicDeadOrAliveStatus(enabled, timeSec);
+    if (!enabled)
+        return (-1);
+
+    countDead = static_cast<int> (_countDeadDetections);
+    countAlive = static_cast<int> (_countAliveDetections);
+    return 0;
+}
+
+WebRtc_Word32
+Channel::SendPacketRaw(const void *data, int len, bool RTCP)
+{
+    if (_transportPtr == NULL)
+    {
+        return -1;
+    }
+    if (!RTCP)
+    {
+        return _transportPtr->SendPacket(_channelId, data, len);
+    }
+    else
+    {
+        return _transportPtr->SendRTCPPacket(_channelId, data, len);
+    }
+}
+
+WebRtc_Word32
+Channel::UpdatePacketDelay(const WebRtc_UWord32 timestamp,
+                           const WebRtc_UWord16 sequenceNumber)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::UpdatePacketDelay(timestamp=%lu, sequenceNumber=%u)",
+                 timestamp, sequenceNumber);
+
+    WebRtc_Word32 rtpReceiveFrequency(0);
+
+    // Get frequency of last received payload
+    rtpReceiveFrequency = _audioCodingModule.ReceiveFrequency();
+
+    CodecInst currRecCodec;
+    if (_audioCodingModule.ReceiveCodec(currRecCodec) == 0)
+    {
+        if (STR_CASE_CMP("G722", currRecCodec.plname) == 0)
+        {
+            // Even though the actual sampling rate for G.722 audio is
+            // 16,000 Hz, the RTP clock rate for the G722 payload format is
+            // 8,000 Hz because that value was erroneously assigned in
+            // RFC 1890 and must remain unchanged for backward compatibility.
+            rtpReceiveFrequency = 8000;
+        }
+    }
+
+    const WebRtc_UWord32 timeStampDiff = timestamp - _playoutTimeStampRTP;
+    WebRtc_UWord32 timeStampDiffMs(0);
+
+    if (timeStampDiff > 0)
+    {
+        switch (rtpReceiveFrequency)
+        {
+            case 8000:
+                timeStampDiffMs = timeStampDiff >> 3;
+                break;
+            case 16000:
+                timeStampDiffMs = timeStampDiff >> 4;
+                break;
+            case 32000:
+                timeStampDiffMs = timeStampDiff >> 5;
+                break;
+            default:
+                WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                             VoEId(_instanceId, _channelId),
+                             "Channel::UpdatePacketDelay() invalid sample "
+                             "rate");
+                timeStampDiffMs = 0;
+                return -1;
+        }
+        if (timeStampDiffMs > 5000)
+        {
+            timeStampDiffMs = 0;
+        }
+
+        if (_averageDelayMs == 0)
+        {
+            _averageDelayMs = timeStampDiffMs;
+        }
+        else
+        {
+            // Filter average delay value using exponential filter (alpha is
+            // 7/8). We derive 10*_averageDelayMs here (reduces risk of
+            // rounding error) and compensate for it in GetDelayEstimate()
+            // later. Adding 4/8 results in correct rounding.
+            _averageDelayMs = ((_averageDelayMs*7 + 10*timeStampDiffMs + 4)>>3);
+        }
+
+        if (sequenceNumber - _previousSequenceNumber == 1)
+        {
+            WebRtc_UWord16 packetDelayMs = 0;
+            switch (rtpReceiveFrequency)
+            {
+            case 8000:
+                packetDelayMs = (WebRtc_UWord16)(
+                    (timestamp - _previousTimestamp) >> 3);
+                break;
+            case 16000:
+                packetDelayMs = (WebRtc_UWord16)(
+                    (timestamp - _previousTimestamp) >> 4);
+                break;
+            case 32000:
+                packetDelayMs = (WebRtc_UWord16)(
+                    (timestamp - _previousTimestamp) >> 5);
+                break;
+            }
+
+            if (packetDelayMs >= 10 && packetDelayMs <= 60)
+                _recPacketDelayMs = packetDelayMs;
+        }
+    }
+
+    _previousSequenceNumber = sequenceNumber;
+    _previousTimestamp = timestamp;
+
+    return 0;
+}
+
+void
+Channel::RegisterReceiveCodecsToRTPModule()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::RegisterReceiveCodecsToRTPModule()");
+
+
+    CodecInst codec;
+    const WebRtc_UWord8 nSupportedCodecs = AudioCodingModule::NumberOfCodecs();
+
+    for (int idx = 0; idx < nSupportedCodecs; idx++)
+    {
+        // Open up the RTP/RTCP receiver for all supported codecs
+        if ((_audioCodingModule.Codec(idx, codec) == -1) ||
+            (_rtpRtcpModule.RegisterReceivePayload(codec.plname,
+                                                   codec.pltype,
+                                                   codec.plfreq,
+                                                   codec.channels,
+                                                   codec.rate) == -1))
+        {
+            WEBRTC_TRACE(
+                         kTraceWarning,
+                         kTraceVoice,
+                         VoEId(_instanceId, _channelId),
+                         "Channel::RegisterReceiveCodecsToRTPModule() unable"
+                         " to register %s (%d/%d/%d/%d) to RTP/RTCP receiver",
+                         codec.plname, codec.pltype, codec.plfreq,
+                         codec.channels, codec.rate);
+        }
+        else
+        {
+            WEBRTC_TRACE(
+                         kTraceInfo,
+                         kTraceVoice,
+                         VoEId(_instanceId, _channelId),
+                         "Channel::RegisterReceiveCodecsToRTPModule() %s "
+                         "(%d/%d/%d/%d) has been added to the RTP/RTCP"
+                         "receiver",
+                         codec.plname, codec.pltype, codec.plfreq,
+                         codec.channels, codec.rate);
+        }
+    }
+}
+
+int
+Channel::ApmProcessRx(AudioFrame& audioFrame)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+                 "Channel::ApmProcessRx()");
+
+    // Reset the APM frequency if the frequency has changed
+    if(_rxAudioProcessingModulePtr->sample_rate_hz()!=audioFrame._frequencyInHz)
+    {
+        if(_rxAudioProcessingModulePtr->set_sample_rate_hz(
+            audioFrame._frequencyInHz))
+        {
+            WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
+                       "AudioProcessingModule::set_sample_rate_hz("
+                       "_frequencyInHz=%u) => error ",
+                       _audioFrame._frequencyInHz);
+        }
+    }
+
+    if (_rxAudioProcessingModulePtr->ProcessStream(&audioFrame) == -1)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
+                   "AudioProcessingModule::ProcessStream() => error");
+    }
+
+    return 0;
+}
+
+} // namespace voe
+
+} // namespace webrtc
diff --git a/voice_engine/main/source/channel.h b/voice_engine/main/source/channel.h
new file mode 100644
index 0000000..aface12
--- /dev/null
+++ b/voice_engine/main/source/channel.h
@@ -0,0 +1,676 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_CHANNEL_H
+#define WEBRTC_VOICE_ENGINE_CHANNEL_H
+
+#include "voe_network.h"
+
+#include "audio_coding_module.h"
+#include "common_types.h"
+#include "shared_data.h"
+#include "rtp_rtcp.h"
+#include "voe_audio_processing.h"
+#include "voice_engine_defines.h"
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+#include "udp_transport.h"
+#endif
+#include "audio_conference_mixer_defines.h"
+#include "file_player.h"
+#include "file_recorder.h"
+#ifdef WEBRTC_SRTP
+#include "SrtpModule.h"
+#endif
+#include "dtmf_inband.h"
+#include "dtmf_inband_queue.h"
+#include "level_indicator.h"
+#include "resampler.h"
+#ifdef WEBRTC_DTMF_DETECTION
+#include "voe_dtmf.h" // TelephoneEventDetectionMethods, TelephoneEventObserver
+#endif
+
+namespace webrtc
+{
+class CriticalSectionWrapper;
+class ProcessThread;
+class AudioDeviceModule;
+class RtpRtcp;
+class FileWrapper;
+class RtpDump;
+class VoiceEngineObserver;
+class VoEMediaProcess;
+class VoERTPObserver;
+class VoERTCPObserver;
+
+struct CallStatistics;
+
+namespace voe
+{
+class Statistics;
+class TransmitMixer;
+class OutputMixer;
+
+
+class Channel:
+    public RtpData,
+    public RtpFeedback,
+    public RtcpFeedback,
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    public UdpTransportData, // receiving packet from sockets
+#endif
+    public FileCallback, // receiving notification from file player & recorder
+    public Transport,
+    public RtpAudioFeedback,
+    public AudioPacketizationCallback, // receive encoded packets from the ACM
+    public ACMVADCallback, // receive voice activity from the ACM
+#ifdef WEBRTC_DTMF_DETECTION
+    public AudioCodingFeedback, // inband Dtmf detection in the ACM
+#endif
+    public MixerParticipant // supplies output mixer with audio frames
+{
+public:
+    enum {KNumSocketThreads = 1};
+    enum {KNumberOfSocketBuffers = 8};
+    static WebRtc_UWord8 numSocketThreads;
+public:
+    virtual ~Channel();
+    static WebRtc_Word32 CreateChannel(Channel*& channel,
+                                       const WebRtc_Word32 channelId,
+                                       const WebRtc_UWord32 instanceId);
+    Channel(const WebRtc_Word32 channelId, const WebRtc_UWord32 instanceId);
+    WebRtc_Word32 Init();
+    WebRtc_Word32 SetEngineInformation(
+        Statistics& engineStatistics,
+        OutputMixer& outputMixer,
+        TransmitMixer& transmitMixer,
+        ProcessThread& moduleProcessThread,
+        AudioDeviceModule& audioDeviceModule,
+        VoiceEngineObserver* voiceEngineObserver,
+        CriticalSectionWrapper* callbackCritSect);
+    WebRtc_Word32 UpdateLocalTimeStamp();
+
+public:
+    // API methods
+
+    // VoEBase
+    WebRtc_Word32 StartPlayout();
+    WebRtc_Word32 StopPlayout();
+    WebRtc_Word32 StartSend();
+    WebRtc_Word32 StopSend();
+    WebRtc_Word32 StartReceiving();
+    WebRtc_Word32 StopReceiving();
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    WebRtc_Word32 SetLocalReceiver(const WebRtc_UWord16 rtpPort,
+                                   const WebRtc_UWord16 rtcpPort,
+                                   const WebRtc_Word8 ipAddr[64],
+                                   const WebRtc_Word8 multicastIpAddr[64]);
+    WebRtc_Word32 GetLocalReceiver(int& port, int& RTCPport, char ipAddr[]);
+    WebRtc_Word32 SetSendDestination(const WebRtc_UWord16 rtpPort,
+                                     const WebRtc_Word8 ipAddr[64],
+                                     const int sourcePort,
+                                     const WebRtc_UWord16 rtcpPort);
+    WebRtc_Word32 GetSendDestination(int& port, char ipAddr[64],
+                                     int& sourcePort, int& RTCPport);
+#endif
+    WebRtc_Word32 SetNetEQPlayoutMode(NetEqModes mode);
+    WebRtc_Word32 GetNetEQPlayoutMode(NetEqModes& mode);
+    WebRtc_Word32 SetNetEQBGNMode(NetEqBgnModes mode);
+    WebRtc_Word32 GetNetEQBGNMode(NetEqBgnModes& mode);
+    WebRtc_Word32 SetOnHoldStatus(bool enable, OnHoldModes mode);
+    WebRtc_Word32 GetOnHoldStatus(bool& enabled, OnHoldModes& mode);
+    WebRtc_Word32 RegisterVoiceEngineObserver(VoiceEngineObserver& observer);
+    WebRtc_Word32 DeRegisterVoiceEngineObserver();
+
+    // VoECodec
+    WebRtc_Word32 GetSendCodec(CodecInst& codec);
+    WebRtc_Word32 GetRecCodec(CodecInst& codec);
+    WebRtc_Word32 SetSendCodec(const CodecInst& codec);
+    WebRtc_Word32 SetVADStatus(bool enableVAD, ACMVADMode mode,
+                               bool disableDTX);
+    WebRtc_Word32 GetVADStatus(bool& enabledVAD, ACMVADMode& mode,
+                               bool& disabledDTX);
+    WebRtc_Word32 SetRecPayloadType(const CodecInst& codec);
+    WebRtc_Word32 GetRecPayloadType(CodecInst& codec);
+    WebRtc_Word32 SetAMREncFormat(AmrMode mode);
+    WebRtc_Word32 SetAMRDecFormat(AmrMode mode);
+    WebRtc_Word32 SetAMRWbEncFormat(AmrMode mode);
+    WebRtc_Word32 SetAMRWbDecFormat(AmrMode mode);
+    WebRtc_Word32 SetSendCNPayloadType(int type, PayloadFrequencies frequency);
+    WebRtc_Word32 SetISACInitTargetRate(int rateBps, bool useFixedFrameSize);
+    WebRtc_Word32 SetISACMaxRate(int rateBps);
+    WebRtc_Word32 SetISACMaxPayloadSize(int sizeBytes);
+
+    // VoENetwork
+    WebRtc_Word32 RegisterExternalTransport(Transport& transport);
+    WebRtc_Word32 DeRegisterExternalTransport();
+    WebRtc_Word32 ReceivedRTPPacket(const WebRtc_Word8* data,
+                                    WebRtc_Word32 length);
+    WebRtc_Word32 ReceivedRTCPPacket(const WebRtc_Word8* data,
+                                     WebRtc_Word32 length);
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    WebRtc_Word32 GetSourceInfo(int& rtpPort, int& rtcpPort, char ipAddr[64]);
+    WebRtc_Word32 EnableIPv6();
+    bool IPv6IsEnabled() const;
+    WebRtc_Word32 SetSourceFilter(int rtpPort, int rtcpPort,
+                                  const char ipAddr[64]);
+    WebRtc_Word32 GetSourceFilter(int& rtpPort, int& rtcpPort, char ipAddr[64]);
+    WebRtc_Word32 SetSendTOS(int DSCP, int priority, bool useSetSockopt);
+    WebRtc_Word32 GetSendTOS(int &DSCP, int& priority, bool &useSetSockopt);
+#if defined(_WIN32)
+    WebRtc_Word32 SetSendGQoS(bool enable, int serviceType, int overrideDSCP);
+    WebRtc_Word32 GetSendGQoS(bool &enabled, int &serviceType,
+                              int &overrideDSCP);
+#endif
+#endif
+    WebRtc_Word32 SetPacketTimeoutNotification(bool enable, int timeoutSeconds);
+    WebRtc_Word32 GetPacketTimeoutNotification(bool& enabled,
+                                               int& timeoutSeconds);
+    WebRtc_Word32 RegisterDeadOrAliveObserver(VoEConnectionObserver& observer);
+    WebRtc_Word32 DeRegisterDeadOrAliveObserver();
+    WebRtc_Word32 SetPeriodicDeadOrAliveStatus(bool enable,
+                                               int sampleTimeSeconds);
+    WebRtc_Word32 GetPeriodicDeadOrAliveStatus(bool& enabled,
+                                               int& sampleTimeSeconds);
+    WebRtc_Word32 SendUDPPacket(const void* data, unsigned int length,
+                                int& transmittedBytes, bool useRtcpSocket);
+
+    // VoEFile
+    int StartPlayingFileLocally(const char* fileName, const bool loop,
+                                const FileFormats format,
+                                const int startPosition,
+                                const float volumeScaling,
+                                const int stopPosition,
+                                const CodecInst* codecInst);
+    int StartPlayingFileLocally(InStream* stream, const FileFormats format,
+                                const int startPosition,
+                                const float volumeScaling,
+                                const int stopPosition,
+                                const CodecInst* codecInst);
+    int StopPlayingFileLocally();
+    int IsPlayingFileLocally() const;
+    int ScaleLocalFilePlayout(const float scale);
+    int GetLocalPlayoutPosition(int& positionMs);
+    int StartPlayingFileAsMicrophone(const char* fileName, const bool loop,
+                                     const FileFormats format,
+                                     const int startPosition,
+                                     const float volumeScaling,
+                                     const int stopPosition,
+                                     const CodecInst* codecInst);
+    int StartPlayingFileAsMicrophone(InStream* stream,
+                                     const FileFormats format,
+                                     const int startPosition,
+                                     const float volumeScaling,
+                                     const int stopPosition,
+                                     const CodecInst* codecInst);
+    int StopPlayingFileAsMicrophone();
+    int IsPlayingFileAsMicrophone() const;
+    int ScaleFileAsMicrophonePlayout(const float scale);
+    int StartRecordingPlayout(const char* fileName, const CodecInst* codecInst);
+    int StartRecordingPlayout(OutStream* stream, const CodecInst* codecInst);
+    int StopRecordingPlayout();
+
+    void SetMixWithMicStatus(bool mix);
+
+    // VoEExternalMediaProcessing
+    int RegisterExternalMediaProcessing(ProcessingTypes type,
+                                        VoEMediaProcess& processObject);
+    int DeRegisterExternalMediaProcessing(ProcessingTypes type);
+
+    // VoEVolumeControl
+    int GetSpeechOutputLevel(WebRtc_UWord32& level) const;
+    int GetSpeechOutputLevelFullRange(WebRtc_UWord32& level) const;
+    int SetMute(const bool enable);
+    bool Mute() const;
+    int SetOutputVolumePan(float left, float right);
+    int GetOutputVolumePan(float& left, float& right) const;
+    int SetChannelOutputVolumeScaling(float scaling);
+    int GetChannelOutputVolumeScaling(float& scaling) const;
+
+    // VoECallReport
+    void ResetDeadOrAliveCounters();
+    int ResetRTCPStatistics();
+    int GetRoundTripTimeSummary(StatVal& delaysMs) const;
+    int GetDeadOrAliveCounters(int& countDead, int& countAlive) const;
+
+    // VoENetEqStats
+    int GetNetworkStatistics(NetworkStatistics& stats);
+    int GetJitterStatistics(JitterStatistics& stats);
+    int GetPreferredBufferSize(unsigned short& preferredBufferSize);
+    int ResetJitterStatistics();
+
+    // VoEVideoSync
+    int GetDelayEstimate(int& delayMs) const;
+    int SetMinimumPlayoutDelay(int delayMs);
+    int GetPlayoutTimestamp(unsigned int& timestamp);
+    int SetInitTimestamp(unsigned int timestamp);
+    int SetInitSequenceNumber(short sequenceNumber);
+
+    // VoEVideoSyncExtended
+    int GetRtpRtcp(RtpRtcp* &rtpRtcpModule) const;
+
+    // VoEEncryption
+#ifdef WEBRTC_SRTP
+    int EnableSRTPSend(
+            CipherTypes cipherType,
+            int cipherKeyLength,
+            AuthenticationTypes authType,
+            int authKeyLength,
+            int authTagLength,
+            SecurityLevels level,
+            const unsigned char key[kVoiceEngineMaxSrtpKeyLength],
+            bool useForRTCP);
+    int DisableSRTPSend();
+    int EnableSRTPReceive(
+            CipherTypes cipherType,
+            int cipherKeyLength,
+            AuthenticationTypes authType,
+            int authKeyLength,
+            int authTagLength,
+            SecurityLevels level,
+            const unsigned char key[kVoiceEngineMaxSrtpKeyLength],
+            bool useForRTCP);
+    int DisableSRTPReceive();
+#endif
+    int RegisterExternalEncryption(Encryption& encryption);
+    int DeRegisterExternalEncryption();
+
+    // VoEDtmf
+    int SendTelephoneEventOutband(unsigned char eventCode, int lengthMs,
+                                  int attenuationDb, bool playDtmfEvent);
+    int SendTelephoneEventInband(unsigned char eventCode, int lengthMs,
+                                 int attenuationDb, bool playDtmfEvent);
+    int SetDtmfPlayoutStatus(bool enable);
+    bool DtmfPlayoutStatus() const;
+    int SetSendTelephoneEventPayloadType(unsigned char type);
+    int GetSendTelephoneEventPayloadType(unsigned char& type);
+#ifdef WEBRTC_DTMF_DETECTION
+    int RegisterTelephoneEventDetection(
+            TelephoneEventDetectionMethods detectionMethod,
+            VoETelephoneEventObserver& observer);
+    int DeRegisterTelephoneEventDetection();
+    int GetTelephoneEventDetectionStatus(
+            bool& enabled,
+            TelephoneEventDetectionMethods& detectionMethod);
+#endif
+
+    // VoEAudioProcessingImpl
+    int UpdateRxVadDetection(AudioFrame& audioFrame);
+    int RegisterRxVadObserver(VoERxVadCallback &observer);
+    int DeRegisterRxVadObserver();
+    int VoiceActivityIndicator(int &activity);
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+    int SetRxAgcStatus(const bool enable, const AgcModes mode);
+    int GetRxAgcStatus(bool& enabled, AgcModes& mode);
+    int SetRxAgcConfig(const AgcConfig config);
+    int GetRxAgcConfig(AgcConfig& config);
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_NR
+    int SetRxNsStatus(const bool enable, const NsModes mode);
+    int GetRxNsStatus(bool& enabled, NsModes& mode);
+#endif
+
+    // VoERTP_RTCP
+    int RegisterRTPObserver(VoERTPObserver& observer);
+    int DeRegisterRTPObserver();
+    int RegisterRTCPObserver(VoERTCPObserver& observer);
+    int DeRegisterRTCPObserver();
+    int SetLocalSSRC(unsigned int ssrc);
+    int GetLocalSSRC(unsigned int& ssrc);
+    int GetRemoteSSRC(unsigned int& ssrc);
+    int GetRemoteCSRCs(unsigned int arrCSRC[15]);
+    int SetRTPAudioLevelIndicationStatus(bool enable, unsigned char ID);
+    int GetRTPAudioLevelIndicationStatus(bool& enable, unsigned char& ID);
+    int SetRTCPStatus(bool enable);
+    int GetRTCPStatus(bool& enabled);
+    int SetRTCP_CNAME(const char cName[256]);
+    int GetRTCP_CNAME(char cName[256]);
+    int GetRemoteRTCP_CNAME(char cName[256]);
+    int GetRemoteRTCPData(unsigned int& NTPHigh, unsigned int& NTPLow,
+                          unsigned int& timestamp,
+                          unsigned int& playoutTimestamp, unsigned int* jitter,
+                          unsigned short* fractionLost);
+    int SendApplicationDefinedRTCPPacket(const unsigned char subType,
+                                         unsigned int name, const char* data,
+                                         unsigned short dataLengthInBytes);
+    int GetRTPStatistics(unsigned int& averageJitterMs,
+                         unsigned int& maxJitterMs,
+                         unsigned int& discardedPackets);
+    int GetRTPStatistics(CallStatistics& stats);
+    int SetFECStatus(bool enable, int redPayloadtype);
+    int GetFECStatus(bool& enabled, int& redPayloadtype);
+    int SetRTPKeepaliveStatus(bool enable, unsigned char unknownPayloadType,
+                              int deltaTransmitTimeSeconds);
+    int GetRTPKeepaliveStatus(bool& enabled, unsigned char& unknownPayloadType,
+                              int& deltaTransmitTimeSeconds);
+    int StartRTPDump(const char fileNameUTF8[1024], RTPDirections direction);
+    int StopRTPDump(RTPDirections direction);
+    bool RTPDumpIsActive(RTPDirections direction);
+    int InsertExtraRTPPacket(unsigned char payloadType, bool markerBit,
+                             const char* payloadData,
+                             unsigned short payloadSize);
+
+public:
+    // From AudioPacketizationCallback in the ACM
+    WebRtc_Word32 SendData(FrameType frameType,
+                           WebRtc_UWord8 payloadType,
+                           WebRtc_UWord32 timeStamp,
+                           const WebRtc_UWord8* payloadData,
+                           WebRtc_UWord16 payloadSize,
+                           const RTPFragmentationHeader* fragmentation);
+    // From ACMVADCallback in the ACM
+    WebRtc_Word32 InFrameType(WebRtc_Word16 frameType);
+
+#ifdef WEBRTC_DTMF_DETECTION
+public: // From AudioCodingFeedback in the ACM
+    int IncomingDtmf(const WebRtc_UWord8 digitDtmf, const bool end);
+#endif
+
+public:
+    WebRtc_Word32 OnRxVadDetected(const int vadDecision);
+
+public:
+    // From RtpData in the RTP/RTCP module
+    WebRtc_Word32 OnReceivedPayloadData(const WebRtc_UWord8* payloadData,
+                                        const WebRtc_UWord16 payloadSize,
+                                        const WebRtcRTPHeader* rtpHeader);
+
+public:
+    // From RtpFeedback in the RTP/RTCP module
+    WebRtc_Word32 OnInitializeDecoder(
+            const WebRtc_Word32 id,
+            const WebRtc_Word8 payloadType,
+            const WebRtc_Word8 payloadName[RTP_PAYLOAD_NAME_SIZE],
+            const WebRtc_UWord32 frequency,
+            const WebRtc_UWord8 channels,
+            const WebRtc_UWord32 rate);
+
+    void OnPacketTimeout(const WebRtc_Word32 id);
+
+    void OnReceivedPacket(const WebRtc_Word32 id,
+                          const RtpRtcpPacketType packetType);
+
+    void OnPeriodicDeadOrAlive(const WebRtc_Word32 id,
+                               const RTPAliveType alive);
+
+    void OnIncomingSSRCChanged(const WebRtc_Word32 id,
+                               const WebRtc_UWord32 SSRC);
+
+    void OnIncomingCSRCChanged(const WebRtc_Word32 id,
+                               const WebRtc_UWord32 CSRC, const bool added);
+
+public:
+    // From RtcpFeedback in the RTP/RTCP module
+    void OnLipSyncUpdate(const WebRtc_Word32 id,
+                         const WebRtc_Word32 audioVideoOffset) {};
+
+    void OnApplicationDataReceived(const WebRtc_Word32 id,
+                                   const WebRtc_UWord8 subType,
+                                   const WebRtc_UWord32 name,
+                                   const WebRtc_UWord16 length,
+                                   const WebRtc_UWord8* data);
+
+    void OnRTCPPacketTimeout(const WebRtc_Word32 id) {} ;
+
+    void OnTMMBRReceived(const WebRtc_Word32 id,
+                         const WebRtc_UWord16 bwEstimateKbit) {};
+
+    void OnSendReportReceived(const WebRtc_Word32 id,
+                              const WebRtc_UWord32 senderSSRC,
+                              const WebRtc_UWord8* packet,
+                              const WebRtc_UWord16 packetLength) {};
+
+    void OnReceiveReportReceived(const WebRtc_Word32 id,
+                                 const WebRtc_UWord32 senderSSRC,
+                                 const WebRtc_UWord8* packet,
+                                 const WebRtc_UWord16 packetLength) {};
+
+public:
+    // From RtpAudioFeedback in the RTP/RTCP module
+    void OnReceivedTelephoneEvent(const WebRtc_Word32 id,
+                                  const WebRtc_UWord8 event,
+                                  const bool endOfEvent);
+
+    void OnPlayTelephoneEvent(const WebRtc_Word32 id,
+                              const WebRtc_UWord8 event,
+                              const WebRtc_UWord16 lengthMs,
+                              const WebRtc_UWord8 volume);
+
+public:
+    // From UdpTransportData in the Socket Transport module
+    void IncomingRTPPacket(const WebRtc_Word8* incomingRtpPacket,
+                           const WebRtc_Word32 rtpPacketLength,
+                           const WebRtc_Word8* fromIP,
+                           const WebRtc_UWord16 fromPort);
+
+    void IncomingRTCPPacket(const WebRtc_Word8* incomingRtcpPacket,
+                            const WebRtc_Word32 rtcpPacketLength,
+                            const WebRtc_Word8* fromIP,
+                            const WebRtc_UWord16 fromPort);
+
+public:
+    // From Transport (called by the RTP/RTCP module)
+    int SendPacket(int /*channel*/, const void *data, int len);
+    int SendRTCPPacket(int /*channel*/, const void *data, int len);
+
+public:
+    // From MixerParticipant
+    WebRtc_Word32 GetAudioFrame(const WebRtc_Word32 id,
+                                AudioFrame& audioFrame);
+    WebRtc_Word32 NeededFrequency(const WebRtc_Word32 id);
+
+public:
+    // From MonitorObserver
+    void OnPeriodicProcess();
+
+public:
+    // From FileCallback
+    void PlayNotification(const WebRtc_Word32 id,
+                          const WebRtc_UWord32 durationMs);
+    void RecordNotification(const WebRtc_Word32 id,
+                            const WebRtc_UWord32 durationMs);
+    void PlayFileEnded(const WebRtc_Word32 id);
+    void RecordFileEnded(const WebRtc_Word32 id);
+
+public:
+    WebRtc_UWord32 InstanceId() const
+    {
+        return _instanceId;
+    };
+    WebRtc_Word32 ChannelId() const
+    {
+        return _channelId;
+    };
+    bool Playing() const
+    {
+        return _playing;
+    };
+    bool Sending() const
+    {
+        return _sending;
+    };
+    bool Receiving() const
+    {
+        return _receiving;
+    };
+    bool ExternalTransport() const
+    {
+        return _externalTransport;
+    };
+    bool OutputIsOnHold() const
+    {
+        return _outputIsOnHold;
+    };
+    bool InputIsOnHold() const
+    {
+        return _inputIsOnHold;
+    };
+    RtpRtcp* const RtpRtcpModulePtr()
+    {
+        return &_rtpRtcpModule;
+    };
+    WebRtc_Word8 const OutputEnergyLevel()
+    {
+        return _outputAudioLevel.Level();
+    };
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    bool SendSocketsInitialized() const
+    {
+        return _socketTransportModule.SendSocketsInitialized();
+    };
+    bool ReceiveSocketsInitialized() const
+    {
+        return _socketTransportModule.ReceiveSocketsInitialized();
+    };
+#endif
+    WebRtc_UWord32 Demultiplex(const AudioFrame& audioFrame,
+                               const WebRtc_UWord8 audioLevel_dBov);
+    WebRtc_UWord32 PrepareEncodeAndSend(WebRtc_UWord32 mixingFrequency);
+    WebRtc_UWord32 EncodeAndSend();
+
+private:
+    int InsertInbandDtmfTone();
+    WebRtc_Word32
+            MixOrReplaceAudioWithFile(const WebRtc_UWord32 mixingFrequency);
+    WebRtc_Word32 MixAudioWithFile(AudioFrame& audioFrame,
+                                   const WebRtc_UWord32 mixingFrequency);
+    WebRtc_Word32 GetPlayoutTimeStamp(WebRtc_UWord32& playoutTimestamp);
+    void UpdateDeadOrAliveCounters(bool alive);
+    WebRtc_Word32 SendPacketRaw(const void *data, int len, bool RTCP);
+    WebRtc_Word32 UpdatePacketDelay(const WebRtc_UWord32 timestamp,
+                                    const WebRtc_UWord16 sequenceNumber);
+    void RegisterReceiveCodecsToRTPModule();
+    int ApmProcessRx(AudioFrame& audioFrame);
+
+private:
+    CriticalSectionWrapper& _fileCritSect;
+    CriticalSectionWrapper& _callbackCritSect;
+    CriticalSectionWrapper& _transmitCritSect;
+    WebRtc_UWord32 _instanceId;
+    WebRtc_Word32 _channelId;
+
+private:
+    RtpRtcp& _rtpRtcpModule;
+    AudioCodingModule& _audioCodingModule;
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    UdpTransport& _socketTransportModule;
+#endif
+#ifdef WEBRTC_SRTP
+    SrtpModule& _srtpModule;
+#endif
+    RtpDump& _rtpDumpIn;
+    RtpDump& _rtpDumpOut;
+private:
+    AudioLevel _outputAudioLevel;
+    bool _externalTransport;
+    AudioFrame _audioFrame;
+    WebRtc_UWord8 _audioLevel_dBov;
+    FilePlayer* _inputFilePlayerPtr;
+    FilePlayer* _outputFilePlayerPtr;
+    FileRecorder* _outputFileRecorderPtr;
+    WebRtc_UWord32 _inputFilePlayerId;
+    WebRtc_UWord32 _outputFilePlayerId;
+    WebRtc_UWord32 _outputFileRecorderId;
+    bool _inputFilePlaying;
+    bool _outputFilePlaying;
+    bool _outputFileRecording;
+    DtmfInbandQueue _inbandDtmfQueue;
+    DtmfInband _inbandDtmfGenerator;
+    bool _outputExternalMedia;
+    bool _inputExternalMedia;
+    VoEMediaProcess* _inputExternalMediaCallbackPtr;
+    VoEMediaProcess* _outputExternalMediaCallbackPtr;
+    WebRtc_UWord8* _encryptionRTPBufferPtr;
+    WebRtc_UWord8* _decryptionRTPBufferPtr;
+    WebRtc_UWord8* _encryptionRTCPBufferPtr;
+    WebRtc_UWord8* _decryptionRTCPBufferPtr;
+    WebRtc_UWord32 _timeStamp;
+    WebRtc_UWord8 _sendTelephoneEventPayloadType;
+    WebRtc_UWord32 _playoutTimeStampRTP;
+    WebRtc_UWord32 _playoutTimeStampRTCP;
+    WebRtc_UWord32 _numberOfDiscardedPackets;
+private:
+    // uses
+    Statistics* _engineStatisticsPtr;
+    OutputMixer* _outputMixerPtr;
+    TransmitMixer* _transmitMixerPtr;
+    ProcessThread* _moduleProcessThreadPtr;
+    AudioDeviceModule* _audioDeviceModulePtr;
+    VoiceEngineObserver* _voiceEngineObserverPtr; // owned by base
+    CriticalSectionWrapper* _callbackCritSectPtr; // owned by base
+    Transport* _transportPtr; // WebRtc socket or external transport
+    Encryption* _encryptionPtr; // WebRtc SRTP or external encryption
+    AudioProcessing* _rxAudioProcessingModulePtr; // far end AudioProcessing
+#ifdef WEBRTC_DTMF_DETECTION
+    VoETelephoneEventObserver* _telephoneEventDetectionPtr;
+#endif
+    VoERxVadCallback* _rxVadObserverPtr;
+    WebRtc_Word32 _oldVadDecision;
+    WebRtc_Word32 _sendFrameType; // Send data is voice, 1-voice, 0-otherwise
+    VoERTPObserver* _rtpObserverPtr;
+    VoERTCPObserver* _rtcpObserverPtr;
+private:
+    // VoEBase
+    bool _outputIsOnHold;
+    bool _externalPlayout;
+    bool _inputIsOnHold;
+    bool _playing;
+    bool _sending;
+    bool _receiving;
+    bool _mixFileWithMicrophone;
+    bool _rtpObserver;
+    bool _rtcpObserver;
+    // VoEVolumeControl
+    bool _mute;
+    float _panLeft;
+    float _panRight;
+    float _outputGain;
+    // VoEEncryption
+    bool _encrypting;
+    bool _decrypting;
+    // VoEDtmf
+    bool _playOutbandDtmfEvent;
+    bool _playInbandDtmfEvent;
+    bool _inbandTelephoneEventDetection;
+    bool _outOfBandTelephoneEventDetecion;
+    // VoeRTP_RTCP
+    WebRtc_UWord8 _extraPayloadType;
+    bool _insertExtraRTPPacket;
+    bool _extraMarkerBit;
+    WebRtc_UWord32 _lastLocalTimeStamp;
+    WebRtc_Word8 _lastPayloadType;
+    bool _includeAudioLevelIndication;
+    // VoENetwork
+    bool _rtpPacketTimedOut;
+    bool _rtpPacketTimeOutIsEnabled;
+    WebRtc_UWord32 _rtpTimeOutSeconds;
+    bool _connectionObserver;
+    VoEConnectionObserver* _connectionObserverPtr;
+    WebRtc_UWord32 _countAliveDetections;
+    WebRtc_UWord32 _countDeadDetections;
+    AudioFrame::SpeechType _outputSpeechType;
+    // VoEVideoSync
+    WebRtc_UWord32 _averageDelayMs;
+    WebRtc_UWord16 _previousSequenceNumber;
+    WebRtc_UWord32 _previousTimestamp;
+    WebRtc_UWord16 _recPacketDelayMs;
+    // VoEAudioProcessing
+    bool _RxVadDetection;
+    bool _rxApmIsEnabled;
+    bool _rxAgcIsEnabled;
+    bool _rxNsIsEnabled;
+};
+
+} // namespace voe
+
+} // namespace webrtc
+
+#endif  // WEBRTC_VOICE_ENGINE_CHANNEL_H
diff --git a/voice_engine/main/source/channel_manager.cc b/voice_engine/main/source/channel_manager.cc
new file mode 100644
index 0000000..47cec4a
--- /dev/null
+++ b/voice_engine/main/source/channel_manager.cc
@@ -0,0 +1,161 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "channel.h"
+#include "channel_manager.h"
+
+namespace webrtc
+{
+
+namespace voe
+{
+
+ChannelManager::ChannelManager(const WebRtc_UWord32 instanceId) :
+    ChannelManagerBase(),
+    _instanceId(instanceId)
+{
+}
+
+ChannelManager::~ChannelManager()
+{
+    ChannelManagerBase::DestroyAllItems();
+}
+
+bool ChannelManager::CreateChannel(WebRtc_Word32& channelId)
+{
+    return ChannelManagerBase::CreateItem(channelId);
+}
+
+WebRtc_Word32 ChannelManager::DestroyChannel(const WebRtc_Word32 channelId)
+{
+    Channel* deleteChannel =
+        static_cast<Channel*> (ChannelManagerBase::RemoveItem(channelId));
+    if (!deleteChannel)
+    {
+        return -1;
+    }
+    delete deleteChannel;
+    return 0;
+}
+
+WebRtc_Word32 ChannelManager::NumOfChannels() const
+{
+    return ChannelManagerBase::NumOfItems();
+}
+
+WebRtc_Word32 ChannelManager::MaxNumOfChannels() const
+{
+    return ChannelManagerBase::MaxNumOfItems();
+}
+
+void* ChannelManager::NewItem(WebRtc_Word32 itemID)
+{
+    Channel* channel;
+    if (Channel::CreateChannel(channel, itemID, _instanceId) == -1)
+    {
+        return NULL;
+    }
+    return static_cast<void*> (channel);
+}
+
+void ChannelManager::DeleteItem(void* item)
+{
+    Channel* deleteItem = static_cast<Channel*> (item);
+    delete deleteItem;
+}
+
+Channel* ChannelManager::GetChannel(const WebRtc_Word32 channelId) const
+{
+    return static_cast<Channel*> (ChannelManagerBase::GetItem(channelId));
+}
+
+void ChannelManager::ReleaseChannel()
+{
+    ChannelManagerBase::ReleaseItem();
+}
+
+void ChannelManager::GetChannelIds(WebRtc_Word32* channelsArray,
+                                   WebRtc_Word32& numOfChannels) const
+{
+    ChannelManagerBase::GetItemIds(channelsArray, numOfChannels);
+}
+
+void ChannelManager::GetChannels(MapWrapper& channels) const
+{
+    ChannelManagerBase::GetChannels(channels);
+}
+
+ScopedChannel::ScopedChannel(ChannelManager& chManager) :
+    _chManager(chManager),
+    _channelPtr(NULL)
+{
+    // Copy all existing channels to the local map.
+    // It is not possible to utilize the ChannelPtr() API after
+    // this constructor. The intention is that this constructor
+    // is used in combination with the scoped iterator.
+    _chManager.GetChannels(_channels);
+}
+
+ScopedChannel::ScopedChannel(ChannelManager& chManager,
+                             WebRtc_Word32 channelId) :
+    _chManager(chManager),
+    _channelPtr(NULL)
+{
+    _channelPtr = _chManager.GetChannel(channelId);
+}
+
+ScopedChannel::~ScopedChannel()
+{
+    if (_channelPtr != NULL || _channels.Size() != 0)
+    {
+        _chManager.ReleaseChannel();
+    }
+
+    // Delete the map
+    while (_channels.Erase(_channels.First()) == 0)
+        ;
+}
+
+Channel* ScopedChannel::ChannelPtr()
+{
+    return _channelPtr;
+}
+
+Channel* ScopedChannel::GetFirstChannel(void*& iterator) const
+{
+    MapItem* it = _channels.First();
+    iterator = (void*) it;
+    if (!it)
+    {
+        return NULL;
+    }
+    return static_cast<Channel*> (it->GetItem());
+}
+
+Channel* ScopedChannel::GetNextChannel(void*& iterator) const
+{
+    MapItem* it = (MapItem*) iterator;
+    if (!it)
+    {
+        iterator = NULL;
+        return NULL;
+    }
+    it = _channels.Next(it);
+    iterator = (void*) it;
+    if (!it)
+    {
+        return NULL;
+    }
+    return static_cast<Channel*> (it->GetItem());
+}
+
+} // namespace voe
+
+} // namespace webrtc
diff --git a/voice_engine/main/source/channel_manager.h b/voice_engine/main/source/channel_manager.h
new file mode 100644
index 0000000..6c40ef1
--- /dev/null
+++ b/voice_engine/main/source/channel_manager.h
@@ -0,0 +1,89 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_CHANNEL_MANAGER_H
+#define WEBRTC_VOICE_ENGINE_CHANNEL_MANAGER_H
+
+#include "channel_manager_base.h"
+#include "typedefs.h"
+
+namespace webrtc
+{
+
+namespace voe
+{
+
+class ScopedChannel;
+class Channel;
+
+class ChannelManager: private ChannelManagerBase
+{
+    friend class ScopedChannel;
+
+public:
+    bool CreateChannel(WebRtc_Word32& channelId);
+
+    WebRtc_Word32 DestroyChannel(const WebRtc_Word32 channelId);
+
+    WebRtc_Word32 MaxNumOfChannels() const;
+
+    WebRtc_Word32 NumOfChannels() const;
+
+    void GetChannelIds(WebRtc_Word32* channelsArray,
+                       WebRtc_Word32& numOfChannels) const;
+
+    ChannelManager(const WebRtc_UWord32 instanceId);
+
+    ~ChannelManager();
+
+private:
+    ChannelManager(const ChannelManager&);
+
+    ChannelManager& operator=(const ChannelManager&);
+
+    Channel* GetChannel(const WebRtc_Word32 channelId) const;
+
+    void GetChannels(MapWrapper& channels) const;
+
+    void ReleaseChannel();
+
+    virtual void* NewItem(WebRtc_Word32 itemID);
+
+    virtual void DeleteItem(void* item);
+
+    WebRtc_UWord32 _instanceId;
+};
+
+class ScopedChannel
+{
+public:
+    // Can only be created by the channel manager
+    ScopedChannel(ChannelManager& chManager);
+
+    ScopedChannel(ChannelManager& chManager, WebRtc_Word32 channelId);
+
+    Channel* ChannelPtr();
+
+    Channel* GetFirstChannel(void*& iterator) const;
+
+    Channel* GetNextChannel(void*& iterator) const;
+
+    ~ScopedChannel();
+private:
+    ChannelManager& _chManager;
+    Channel* _channelPtr;
+    MapWrapper _channels;
+};
+
+} // namespace voe
+
+} // namespace webrtc
+
+#endif  // WEBRTC_VOICE_ENGINE_CHANNEL_MANAGER_H
diff --git a/voice_engine/main/source/channel_manager_base.cc b/voice_engine/main/source/channel_manager_base.cc
new file mode 100644
index 0000000..49d0235
--- /dev/null
+++ b/voice_engine/main/source/channel_manager_base.cc
@@ -0,0 +1,227 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "channel_manager_base.h"
+
+#include "critical_section_wrapper.h"
+#include "rw_lock_wrapper.h"
+#include <cassert>
+
+namespace webrtc
+{
+
+namespace voe
+{
+
+ChannelManagerBase::ChannelManagerBase() :
+    _itemsCritSectPtr(CriticalSectionWrapper::CreateCriticalSection()),
+    _itemsRWLockPtr(RWLockWrapper::CreateRWLock())
+{
+    for (int i = 0; i < KMaxNumberOfItems; i++)
+    {
+        _freeItemIds[i] = true;
+    }
+}
+
+ChannelManagerBase::~ChannelManagerBase()
+{
+    if (_itemsRWLockPtr)
+    {
+        delete _itemsRWLockPtr;
+        _itemsRWLockPtr = NULL;
+    }
+    if (_itemsCritSectPtr)
+    {
+        delete _itemsCritSectPtr;
+        _itemsCritSectPtr = NULL;
+    }
+}
+
+bool ChannelManagerBase::GetFreeItemId(WebRtc_Word32& itemId)
+{
+    CriticalSectionScoped cs(*_itemsCritSectPtr);
+    WebRtc_Word32 i(0);
+    while (i < KMaxNumberOfItems)
+    {
+        if (_freeItemIds[i])
+        {
+            itemId = i;
+            _freeItemIds[i] = false;
+            return true;
+        }
+        i++;
+    }
+    return false;
+}
+
+void ChannelManagerBase::AddFreeItemId(WebRtc_Word32 itemId)
+{
+    assert(itemId < KMaxNumberOfItems);
+    _freeItemIds[itemId] = true;
+}
+
+void ChannelManagerBase::RemoveFreeItemIds()
+{
+    for (int i = 0; i < KMaxNumberOfItems; i++)
+    {
+        _freeItemIds[i] = false;
+    }
+}
+
+bool ChannelManagerBase::CreateItem(WebRtc_Word32& itemId)
+{
+    _itemsCritSectPtr->Enter();
+    void* itemPtr;
+    itemId = -1;
+    const bool success = GetFreeItemId(itemId);
+    if (!success)
+    {
+        _itemsCritSectPtr->Leave();
+        return false;
+    }
+    itemPtr = NewItem(itemId);
+    if (!itemPtr)
+    {
+        _itemsCritSectPtr->Leave();
+        return false;
+    }
+    _itemsCritSectPtr->Leave();
+    InsertItem(itemId, itemPtr);
+
+    return true;
+}
+
+void ChannelManagerBase::InsertItem(WebRtc_Word32 itemId, void* item)
+{
+    CriticalSectionScoped cs(*_itemsCritSectPtr);
+    assert(!_items.Find(itemId));
+    _items.Insert(itemId, item);
+}
+
+void*
+ChannelManagerBase::RemoveItem(WebRtc_Word32 itemId)
+{
+    CriticalSectionScoped cs(*_itemsCritSectPtr);
+    WriteLockScoped wlock(*_itemsRWLockPtr);
+    MapItem* it = _items.Find(itemId);
+    if (!it)
+    {
+        return 0;
+    }
+    void* returnItem = it->GetItem();
+    _items.Erase(it);
+    AddFreeItemId(itemId);
+
+    return returnItem;
+}
+
+void ChannelManagerBase::DestroyAllItems()
+{
+    CriticalSectionScoped cs(*_itemsCritSectPtr);
+    MapItem* it = _items.First();
+    while (it)
+    {
+        DeleteItem(it->GetItem());
+        _items.Erase(it);
+        it = _items.First();
+    }
+    RemoveFreeItemIds();
+}
+
+WebRtc_Word32 ChannelManagerBase::NumOfItems() const
+{
+    return _items.Size();
+}
+
+WebRtc_Word32 ChannelManagerBase::MaxNumOfItems() const
+{
+    return static_cast<WebRtc_Word32> (KMaxNumberOfItems);
+}
+
+void*
+ChannelManagerBase::GetItem(WebRtc_Word32 itemId) const
+{
+    CriticalSectionScoped cs(*_itemsCritSectPtr);
+    MapItem* it = _items.Find(itemId);
+    if (!it)
+    {
+        return 0;
+    }
+    _itemsRWLockPtr->AcquireLockShared();
+    return it->GetItem();
+}
+
+void*
+ChannelManagerBase::GetFirstItem(void*& iterator) const
+{
+    CriticalSectionScoped cs(*_itemsCritSectPtr);
+    MapItem* it = _items.First();
+    iterator = (void*) it;
+    if (!it)
+    {
+        return 0;
+    }
+    return it->GetItem();
+}
+
+void*
+ChannelManagerBase::GetNextItem(void*& iterator) const
+{
+    CriticalSectionScoped cs(*_itemsCritSectPtr);
+    MapItem* it = (MapItem*) iterator;
+    if (!it)
+    {
+        iterator = 0;
+        return 0;
+    }
+    it = _items.Next(it);
+    iterator = (void*) it;
+    if (!it)
+    {
+        return 0;
+    }
+    return it->GetItem();
+}
+
+void ChannelManagerBase::ReleaseItem()
+{
+    _itemsRWLockPtr->ReleaseLockShared();
+}
+
+void ChannelManagerBase::GetItemIds(WebRtc_Word32* channelsArray,
+                                    WebRtc_Word32& numOfChannels) const
+{
+    MapItem* it = _items.First();
+    numOfChannels = (numOfChannels <= _items.Size()) ?
+        numOfChannels : _items.Size();
+    for (int i = 0; i < numOfChannels; i++)
+    {
+        channelsArray[i] = it->GetId();
+        it = _items.Next(it);
+    }
+}
+
+void ChannelManagerBase::GetChannels(MapWrapper& channels) const
+{
+    CriticalSectionScoped cs(*_itemsCritSectPtr);
+    if (_items.Size() == 0)
+    {
+        return;
+    }
+    _itemsRWLockPtr->AcquireLockShared();
+    for (MapItem* it = _items.First(); it != NULL; it = _items.Next(it))
+    {
+        channels.Insert(it->GetId(), it->GetItem());
+    }
+}
+
+} // namespace voe
+
+} // namespace webrtc
diff --git a/voice_engine/main/source/channel_manager_base.h b/voice_engine/main/source/channel_manager_base.h
new file mode 100644
index 0000000..0831e43
--- /dev/null
+++ b/voice_engine/main/source/channel_manager_base.h
@@ -0,0 +1,90 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_CHANNEL_MANAGER_BASE_H
+#define WEBRTC_VOICE_ENGINE_CHANNEL_MANAGER_BASE_H
+
+#include "typedefs.h"
+#include "map_wrapper.h"
+#include "voice_engine_defines.h"
+
+namespace webrtc
+{
+class CriticalSectionWrapper;
+class RWLockWrapper;
+
+namespace voe
+{
+
+class ScopedChannel;
+class Channel;
+
+class ChannelManagerBase
+{
+public:
+    enum {KMaxNumberOfItems = kVoiceEngineMaxNumOfChannels};
+
+protected:
+    bool CreateItem(WebRtc_Word32& itemId);
+
+    void InsertItem(WebRtc_Word32 itemId, void* item);
+
+    void* RemoveItem(WebRtc_Word32 itemId);
+
+    void* GetItem(WebRtc_Word32 itemId) const;
+
+    void* GetFirstItem(void*& iterator) const ;
+
+    void* GetNextItem(void*& iterator) const;
+
+    void ReleaseItem();
+
+    void AddFreeItemId(WebRtc_Word32 itemId);
+
+    bool GetFreeItemId(WebRtc_Word32& itemId);
+
+    void RemoveFreeItemIds();
+
+    void DestroyAllItems();
+
+    WebRtc_Word32 NumOfItems() const;
+
+    WebRtc_Word32 MaxNumOfItems() const;
+
+    void GetItemIds(WebRtc_Word32* channelsArray,
+                    WebRtc_Word32& numOfChannels) const;
+
+    void GetChannels(MapWrapper& channels) const;
+
+    virtual void* NewItem(WebRtc_Word32 itemId) = 0;
+
+    virtual void DeleteItem(void* item) = 0;
+
+    ChannelManagerBase();
+
+    virtual ~ChannelManagerBase();
+
+private:
+    // Protects _items and _freeItemIds
+    CriticalSectionWrapper* _itemsCritSectPtr;
+
+    MapWrapper _items;
+
+    bool _freeItemIds[KMaxNumberOfItems];
+
+    // Protects channels from being destroyed while being used
+    RWLockWrapper* _itemsRWLockPtr;
+};
+
+} // namespace voe
+
+} // namespace webrtc
+
+#endif  // WEBRTC_VOICE_ENGINE_CHANNEL_MANAGER_BASE_H
diff --git a/voice_engine/main/source/dtmf_inband.cc b/voice_engine/main/source/dtmf_inband.cc
new file mode 100644
index 0000000..44505c9
--- /dev/null
+++ b/voice_engine/main/source/dtmf_inband.cc
@@ -0,0 +1,389 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "dtmf_inband.h"
+
+#include "critical_section_wrapper.h"
+#include "trace.h"
+#include <cassert>
+
+namespace webrtc {
+
+const WebRtc_Word16 Dtmf_a_times2Tab8Khz[8]=
+{
+	27978, 26956, 25701, 24219,
+	19073, 16325, 13085, 9314
+};
+
+const WebRtc_Word16 Dtmf_a_times2Tab16Khz[8]=
+{
+	31548, 31281, 30951, 30556,
+	29144, 28361, 27409, 26258
+};
+
+const WebRtc_Word16 Dtmf_a_times2Tab32Khz[8]=
+{
+	32462,32394, 32311, 32210, 31849, 31647, 31400, 31098
+};
+
+// Second table is sin(2*pi*f/fs) in Q14
+
+const WebRtc_Word16 Dtmf_ym2Tab8Khz[8]=
+{
+	8527, 9315, 10163, 11036,
+	13322, 14206, 15021, 15708
+};
+
+const WebRtc_Word16 Dtmf_ym2Tab16Khz[8]=
+{
+	4429, 4879, 5380, 5918,
+	7490, 8207, 8979, 9801
+};
+
+const WebRtc_Word16 Dtmf_ym2Tab32Khz[8]=
+{
+	2235, 2468, 2728, 3010, 3853, 4249, 4685, 5164
+};
+
+const WebRtc_Word16 Dtmf_dBm0kHz[37]=
+{
+       16141,      14386,      12821,      11427,      10184,       9077,
+        8090,       7210,       6426,       5727,       5104,       4549,
+        4054,       3614,       3221,       2870,       2558,       2280,
+        2032,       1811,       1614,       1439,       1282,       1143,
+        1018,        908,        809,        721,        643,        573,
+         510,        455,        405,        361,        322,        287,
+		 256
+};
+
+
+DtmfInband::DtmfInband(const WebRtc_Word32 id) :
+    _id(id),
+    _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
+    _outputFrequencyHz(8000),
+    _reinit(true),
+    _remainingSamples(0),
+    _frameLengthSamples(0),
+    _eventCode(0),
+    _attenuationDb(0),
+    _playing(false),
+    _delaySinceLastToneMS(1000),
+    _lengthMs(0)
+{
+    memset(_oldOutputLow, 0, sizeof(_oldOutputLow));
+    memset(_oldOutputHigh, 0, sizeof(_oldOutputHigh));
+}
+
+DtmfInband::~DtmfInband()
+{
+	delete &_critSect;
+}
+
+int
+DtmfInband::SetSampleRate(const WebRtc_UWord16 frequency)
+{
+    if (frequency != 8000 &&
+            frequency != 16000 &&
+            frequency != 32000)
+    {
+        // invalid sample rate
+        assert(false);
+        return -1;
+    }
+    _outputFrequencyHz = frequency;
+    return 0;
+}
+
+int
+DtmfInband::GetSampleRate(WebRtc_UWord16& frequency)
+{
+    frequency = _outputFrequencyHz;
+    return 0;
+}
+
+void 
+DtmfInband::Init()
+{
+    _remainingSamples = 0;
+    _frameLengthSamples = 0;
+    _eventCode = 0;
+    _attenuationDb = 0;
+    _lengthMs = 0;
+    _reinit = true;
+    _oldOutputLow[0] = 0;
+    _oldOutputLow[1] = 0;
+    _oldOutputHigh[0] = 0;
+    _oldOutputHigh[1] = 0;
+    _delaySinceLastToneMS = 1000;
+}
+
+int
+DtmfInband::AddTone(const WebRtc_UWord8 eventCode,
+                    WebRtc_Word32 lengthMs,
+                    WebRtc_Word32 attenuationDb)
+{
+    CriticalSectionScoped lock(_critSect);
+
+    if (attenuationDb > 36 || eventCode > 15)
+    {
+        assert(false);
+        return -1;
+    }
+
+    if (IsAddingTone())
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_id,-1),
+                   "DtmfInband::AddTone() new tone interrupts ongoing tone");
+    }
+
+    ReInit();
+
+    _frameLengthSamples = static_cast<WebRtc_Word16> (_outputFrequencyHz / 100);
+    _eventCode = static_cast<WebRtc_Word16> (eventCode);
+    _attenuationDb = static_cast<WebRtc_Word16> (attenuationDb);
+    _remainingSamples = static_cast<WebRtc_Word32>
+        (lengthMs * (_outputFrequencyHz / 1000));
+    _lengthMs = lengthMs;
+
+    return 0;
+}
+
+int
+DtmfInband::ResetTone()
+{
+    CriticalSectionScoped lock(_critSect);
+
+    ReInit();
+
+    _frameLengthSamples = static_cast<WebRtc_Word16> (_outputFrequencyHz / 100);
+    _remainingSamples = static_cast<WebRtc_Word32>
+        (_lengthMs * (_outputFrequencyHz / 1000));
+
+    return 0;
+}
+
+int
+DtmfInband::StartTone(const WebRtc_UWord8 eventCode,
+                      WebRtc_Word32 attenuationDb)
+{
+    CriticalSectionScoped lock(_critSect);
+
+    if (attenuationDb > 36 || eventCode > 15)
+    {
+        assert(false);
+        return -1;
+    }
+
+    if (IsAddingTone())
+    {
+            return -1;
+    }
+
+    ReInit();
+
+    _frameLengthSamples = static_cast<WebRtc_Word16> (_outputFrequencyHz / 100);
+    _eventCode = static_cast<WebRtc_Word16> (eventCode);
+    _attenuationDb = static_cast<WebRtc_Word16> (attenuationDb);
+    _playing = true;
+
+    return 0;
+}
+
+int
+DtmfInband::StopTone()
+{
+    CriticalSectionScoped lock(_critSect);
+
+    if (!_playing)
+    {
+        return 0;
+    }
+
+    _playing = false;
+
+    return 0;
+}
+
+// Shall be called between tones
+void 
+DtmfInband::ReInit()
+{
+    _reinit = true;
+}
+
+bool 
+DtmfInband::IsAddingTone()
+{
+    CriticalSectionScoped lock(_critSect);
+    return (_remainingSamples > 0 || _playing);
+}
+
+int
+DtmfInband::Get10msTone(WebRtc_Word16 output[320],
+                        WebRtc_UWord16& outputSizeInSamples)
+{
+    CriticalSectionScoped lock(_critSect);
+    if (DtmfFix_generate(output,
+                         _eventCode,
+                         _attenuationDb,
+                         _frameLengthSamples,
+                         _outputFrequencyHz) == -1)
+    {
+        return -1;
+    }
+    _remainingSamples -= _frameLengthSamples;
+    outputSizeInSamples = _frameLengthSamples;
+    _delaySinceLastToneMS = 0;
+    return 0;
+}
+
+void
+DtmfInband::UpdateDelaySinceLastTone()
+{
+    _delaySinceLastToneMS += kDtmfFrameSizeMs;
+    // avoid wraparound
+    if (_delaySinceLastToneMS > (1<<30))
+    {
+        _delaySinceLastToneMS = 1000;
+    }
+}
+
+WebRtc_UWord32
+DtmfInband::DelaySinceLastTone() const
+{
+    return _delaySinceLastToneMS;
+}
+
+WebRtc_Word16
+DtmfInband::DtmfFix_generate(WebRtc_Word16 *decoded,
+                             const WebRtc_Word16 value,
+                             const WebRtc_Word16 volume,
+                             const WebRtc_Word16 frameLen,
+                             const WebRtc_Word16 fs)
+{
+    const WebRtc_Word16 *a_times2Tbl;
+    const WebRtc_Word16 *y2_Table;
+    WebRtc_Word16 a1_times2 = 0, a2_times2 = 0;
+
+    if (fs==8000) {
+        a_times2Tbl=Dtmf_a_times2Tab8Khz;
+        y2_Table=Dtmf_ym2Tab8Khz;
+    } else if (fs==16000) {
+        a_times2Tbl=Dtmf_a_times2Tab16Khz;
+        y2_Table=Dtmf_ym2Tab16Khz;
+    } else if (fs==32000) {
+        a_times2Tbl=Dtmf_a_times2Tab32Khz;
+        y2_Table=Dtmf_ym2Tab32Khz;
+    } else {
+        return(-1);
+    }
+
+    if ((value==1)||(value==2)||(value==3)||(value==12)) {
+        a1_times2=a_times2Tbl[0];
+        if (_reinit) {
+            _oldOutputLow[0]=y2_Table[0];
+            _oldOutputLow[1]=0;
+        }
+    } else if ((value==4)||(value==5)||(value==6)||(value==13)) {
+        a1_times2=a_times2Tbl[1];
+        if (_reinit) {
+            _oldOutputLow[0]=y2_Table[1];
+            _oldOutputLow[1]=0;
+        }
+    } else if ((value==7)||(value==8)||(value==9)||(value==14)) {
+        a1_times2=a_times2Tbl[2];
+        if (_reinit) {
+            _oldOutputLow[0]=y2_Table[2];
+            _oldOutputLow[1]=0;
+        }
+    } else if ((value==10)||(value==0)||(value==11)||(value==15)) {
+        a1_times2=a_times2Tbl[3];
+        if (_reinit) {
+            _oldOutputLow[0]=y2_Table[3];
+            _oldOutputLow[1]=0;
+        }
+    }
+    if ((value==1)||(value==4)||(value==7)||(value==10)) {
+        a2_times2=a_times2Tbl[4];
+        if (_reinit) {
+            _oldOutputHigh[0]=y2_Table[4];
+            _oldOutputHigh[1]=0;
+            _reinit=false;
+        }
+    } else if ((value==2)||(value==5)||(value==8)||(value==0)) {
+        a2_times2=a_times2Tbl[5];
+        if (_reinit) {
+            _oldOutputHigh[0]=y2_Table[5];
+            _oldOutputHigh[1]=0;
+            _reinit=false;
+        }
+    } else if ((value==3)||(value==6)||(value==9)||(value==11)) {
+        a2_times2=a_times2Tbl[6];
+        if (_reinit) {
+            _oldOutputHigh[0]=y2_Table[6];
+            _oldOutputHigh[1]=0;
+            _reinit=false;
+        }
+    } else if ((value==12)||(value==13)||(value==14)||(value==15)) {
+        a2_times2=a_times2Tbl[7];
+        if (_reinit) {
+            _oldOutputHigh[0]=y2_Table[7];
+            _oldOutputHigh[1]=0;
+            _reinit=false;
+        }
+    }
+
+    return (DtmfFix_generateSignal(a1_times2,
+                                   a2_times2,
+                                   volume,
+                                   decoded,
+                                   frameLen));
+}
+
+WebRtc_Word16
+DtmfInband::DtmfFix_generateSignal(const WebRtc_Word16 a1_times2,
+                                   const WebRtc_Word16 a2_times2,
+                                   const WebRtc_Word16 volume,
+                                   WebRtc_Word16 *signal,
+                                   const WebRtc_Word16 length)
+{
+    int i;
+
+    /* Generate Signal */
+    for (i=0;i<length;i++) {
+        WebRtc_Word32 tempVal;
+        WebRtc_Word16 tempValLow, tempValHigh;
+
+        /* Use recursion formula y[n] = a*2*y[n-1] - y[n-2] */
+        tempValLow  = (WebRtc_Word16)(((( (WebRtc_Word32)(a1_times2 *
+            _oldOutputLow[1])) + 8192) >> 14) - _oldOutputLow[0]);
+        tempValHigh = (WebRtc_Word16)(((( (WebRtc_Word32)(a2_times2 *
+            _oldOutputHigh[1])) + 8192) >> 14) - _oldOutputHigh[0]);
+
+        /* Update memory */
+        _oldOutputLow[0]=_oldOutputLow[1];
+        _oldOutputLow[1]=tempValLow;
+        _oldOutputHigh[0]=_oldOutputHigh[1];
+        _oldOutputHigh[1]=tempValHigh;
+
+        tempVal = (WebRtc_Word32)(kDtmfAmpLow * tempValLow) +
+            (WebRtc_Word32)(kDtmfAmpHigh * tempValHigh);
+
+        /* Norm the signal to Q14 */
+        tempVal=(tempVal+16384)>>15;
+
+        /* Scale the signal to correct dbM0 value */
+        signal[i]=(WebRtc_Word16)((tempVal*Dtmf_dBm0kHz[volume]+8192)>>14);
+    }
+
+    return(0);
+}
+
+}  // namespace webrtc
diff --git a/voice_engine/main/source/dtmf_inband.h b/voice_engine/main/source/dtmf_inband.h
new file mode 100644
index 0000000..806fff0
--- /dev/null
+++ b/voice_engine/main/source/dtmf_inband.h
@@ -0,0 +1,93 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_DTMF_INBAND_H
+#define WEBRTC_VOICE_ENGINE_DTMF_INBAND_H
+
+#if _MSC_VER > 1000
+#pragma once
+#endif
+
+#include "typedefs.h"
+#include "voice_engine_defines.h"
+
+namespace webrtc {
+class CriticalSectionWrapper;
+
+class DtmfInband
+{
+public:
+    DtmfInband(const WebRtc_Word32 id);
+
+    virtual ~DtmfInband();
+
+    void Init();
+
+    int SetSampleRate(const WebRtc_UWord16 frequency);
+
+    int GetSampleRate(WebRtc_UWord16& frequency);
+
+    int AddTone(const WebRtc_UWord8 eventCode,
+                WebRtc_Word32 lengthMs,
+                WebRtc_Word32 attenuationDb);
+
+    int ResetTone();
+    int StartTone(const WebRtc_UWord8 eventCode,
+                  WebRtc_Word32 attenuationDb);
+
+    int StopTone();
+
+    bool IsAddingTone();
+
+    int Get10msTone(WebRtc_Word16 output[320],
+                    WebRtc_UWord16& outputSizeInSamples);
+
+    WebRtc_UWord32 DelaySinceLastTone() const;
+
+    void UpdateDelaySinceLastTone();
+
+private:
+    void ReInit();
+    WebRtc_Word16 DtmfFix_generate(WebRtc_Word16* decoded,
+                                   const WebRtc_Word16 value,
+                                   const WebRtc_Word16 volume,
+                                   const WebRtc_Word16 frameLen,
+                                   const WebRtc_Word16 fs);
+
+private:
+    enum {kDtmfFrameSizeMs = 10};
+    enum {kDtmfAmpHigh = 32768};
+    enum {kDtmfAmpLow  = 23171};	// 3 dB lower than the high frequency
+
+    WebRtc_Word16 DtmfFix_generateSignal(const WebRtc_Word16 a1_times2,
+                                         const WebRtc_Word16 a2_times2,
+                                         const WebRtc_Word16 volume,
+                                         WebRtc_Word16* signal,
+                                         const WebRtc_Word16 length);
+
+private:
+    CriticalSectionWrapper& _critSect;
+    WebRtc_Word32 _id;
+    WebRtc_UWord16 _outputFrequencyHz;  // {8000, 16000, 32000}
+    WebRtc_Word16 _oldOutputLow[2];     // Data needed for oscillator model
+    WebRtc_Word16 _oldOutputHigh[2];    // Data needed for oscillator model
+    WebRtc_Word16 _frameLengthSamples;  // {80, 160, 320}
+    WebRtc_Word32 _remainingSamples;
+    WebRtc_Word16 _eventCode;           // [0, 15]
+    WebRtc_Word16 _attenuationDb;       // [0, 36]
+    WebRtc_Word32 _lengthMs;
+    bool _reinit;  // 'true' if the oscillator should be reinit for next event
+    bool _playing;
+    WebRtc_UWord32 _delaySinceLastToneMS; // time since last generated tone [ms]
+};
+
+}   // namespace webrtc
+
+#endif // #ifndef WEBRTC_VOICE_ENGINE_DTMF_INBAND_H
diff --git a/voice_engine/main/source/dtmf_inband_queue.cc b/voice_engine/main/source/dtmf_inband_queue.cc
new file mode 100644
index 0000000..080ef3e
--- /dev/null
+++ b/voice_engine/main/source/dtmf_inband_queue.cc
@@ -0,0 +1,88 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "dtmf_inband_queue.h"
+#include "trace.h"
+
+namespace webrtc {
+
+DtmfInbandQueue::DtmfInbandQueue(const WebRtc_Word32 id):
+    _id(id),
+    _DtmfCritsect(*CriticalSectionWrapper::CreateCriticalSection()),
+    _nextEmptyIndex(0)
+{
+    memset(_DtmfKey,0, sizeof(_DtmfKey));
+    memset(_DtmfLen,0, sizeof(_DtmfLen));
+    memset(_DtmfLevel,0, sizeof(_DtmfLevel));
+}
+
+DtmfInbandQueue::~DtmfInbandQueue()
+{
+    delete &_DtmfCritsect;
+}
+
+int
+DtmfInbandQueue::AddDtmf(WebRtc_UWord8 key,
+                         WebRtc_UWord16 len,
+                         WebRtc_UWord8 level)
+{
+    CriticalSectionScoped lock(_DtmfCritsect);
+
+    if (_nextEmptyIndex >= kDtmfInbandMax)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_id,-1),
+                   "DtmfInbandQueue::AddDtmf() unable to add Dtmf tone");
+        return -1;
+    }
+    WebRtc_Word32 index = _nextEmptyIndex;
+    _DtmfKey[index] = key;
+    _DtmfLen[index] = len;
+    _DtmfLevel[index] = level;
+    _nextEmptyIndex++;
+    return 0;
+}
+
+WebRtc_Word8
+DtmfInbandQueue::NextDtmf(WebRtc_UWord16* len, WebRtc_UWord8* level)
+{
+    CriticalSectionScoped lock(_DtmfCritsect);
+
+    if(!PendingDtmf())
+    {
+        return -1;
+    }
+    WebRtc_Word8 nextDtmf = _DtmfKey[0];
+    *len=_DtmfLen[0];
+    *level=_DtmfLevel[0];
+
+    memmove(&(_DtmfKey[0]), &(_DtmfKey[1]),
+            _nextEmptyIndex*sizeof(WebRtc_UWord8));
+    memmove(&(_DtmfLen[0]), &(_DtmfLen[1]),
+            _nextEmptyIndex*sizeof(WebRtc_UWord16));
+    memmove(&(_DtmfLevel[0]), &(_DtmfLevel[1]),
+            _nextEmptyIndex*sizeof(WebRtc_UWord8));
+
+    _nextEmptyIndex--;
+    return nextDtmf;
+}
+
+bool 
+DtmfInbandQueue::PendingDtmf()
+{
+    return(_nextEmptyIndex>0);        
+}
+
+void 
+DtmfInbandQueue::ResetDtmf()
+{
+    _nextEmptyIndex = 0;
+}
+
+}  // namespace webrtc
diff --git a/voice_engine/main/source/dtmf_inband_queue.h b/voice_engine/main/source/dtmf_inband_queue.h
new file mode 100644
index 0000000..b3bd39e
--- /dev/null
+++ b/voice_engine/main/source/dtmf_inband_queue.h
@@ -0,0 +1,52 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_DTMF_INBAND_QUEUE_H
+#define WEBRTC_VOICE_ENGINE_DTMF_INBAND_QUEUE_H
+
+#include "critical_section_wrapper.h"
+#include "typedefs.h"
+#include "voice_engine_defines.h"
+
+
+namespace webrtc {
+
+class DtmfInbandQueue
+{
+public:
+
+    DtmfInbandQueue(const WebRtc_Word32 id);
+
+    virtual ~DtmfInbandQueue();
+
+    int AddDtmf(WebRtc_UWord8 DtmfKey,
+                WebRtc_UWord16 len,
+                WebRtc_UWord8 level);
+
+    WebRtc_Word8 NextDtmf(WebRtc_UWord16* len, WebRtc_UWord8* level);
+
+    bool PendingDtmf();
+
+    void ResetDtmf();
+
+private:
+    enum {kDtmfInbandMax = 20};
+
+    WebRtc_Word32 _id;
+    CriticalSectionWrapper& _DtmfCritsect;
+    WebRtc_UWord8 _nextEmptyIndex;
+    WebRtc_UWord8 _DtmfKey[kDtmfInbandMax];
+    WebRtc_UWord16 _DtmfLen[kDtmfInbandMax];
+    WebRtc_UWord8 _DtmfLevel[kDtmfInbandMax];
+};
+
+}   // namespace webrtc
+
+#endif  // WEBRTC_VOICE_ENGINE_DTMF_INBAND_QUEUE_H
diff --git a/voice_engine/main/source/level_indicator.cc b/voice_engine/main/source/level_indicator.cc
new file mode 100644
index 0000000..89004a5
--- /dev/null
+++ b/voice_engine/main/source/level_indicator.cc
@@ -0,0 +1,99 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "level_indicator.h"
+#include "module_common_types.h"
+#include "signal_processing_library.h"
+
+namespace webrtc {
+
+namespace voe {
+
+
+// Number of bars on the indicator.
+// Note that the number of elements is specified because we are indexing it
+// in the range of 0-32
+const WebRtc_Word8 permutation[33] =
+    {0,1,2,3,4,4,5,5,5,5,6,6,6,6,6,7,7,7,7,8,8,8,9,9,9,9,9,9,9,9,9,9,9};
+
+
+AudioLevel::AudioLevel() :
+    _absMax(0),
+    _count(0),
+    _currentLevel(0),
+    _currentLevelFullRange(0)
+{
+}
+
+AudioLevel::~AudioLevel()
+{
+}
+
+void
+AudioLevel::Clear()
+{
+    _absMax = 0;
+    _count = 0;
+    _currentLevel = 0;
+    _currentLevelFullRange = 0;
+}
+
+void
+AudioLevel::ComputeLevel(const AudioFrame& audioFrame)
+{
+    WebRtc_Word16 absValue(0);
+
+    // Check speech level (works for 2 channels as well)
+    absValue = WebRtcSpl_MaxAbsValueW16(
+        audioFrame._payloadData,
+        audioFrame._payloadDataLengthInSamples*audioFrame._audioChannel);
+    if (absValue > _absMax)
+    _absMax = absValue;
+
+    // Update level approximately 10 times per second
+    if (_count++ == kUpdateFrequency)
+    {
+        _currentLevelFullRange = _absMax;
+
+        _count = 0;
+
+        // Highest value for a WebRtc_Word16 is 0x7fff = 32767
+        // Divide with 1000 to get in the range of 0-32 which is the range of
+        // the permutation vector
+        WebRtc_Word32 position = _absMax/1000;
+
+        // Make it less likely that the bar stays at position 0. I.e. only if
+        // its in the range 0-250 (instead of 0-1000)
+        if ((position == 0) && (_absMax > 250))
+        {
+            position = 1;
+        }
+        _currentLevel = permutation[position];
+
+        // Decay the absolute maximum (divide by 4)
+        _absMax >>= 2;
+    }
+}
+
+WebRtc_Word8
+AudioLevel::Level() const
+{
+    return _currentLevel;
+}
+
+WebRtc_Word16
+AudioLevel::LevelFullRange() const
+{
+    return _currentLevelFullRange;
+}
+
+}  // namespace voe
+
+}  //  namespace webrtc
diff --git a/voice_engine/main/source/level_indicator.h b/voice_engine/main/source/level_indicator.h
new file mode 100644
index 0000000..564b068
--- /dev/null
+++ b/voice_engine/main/source/level_indicator.h
@@ -0,0 +1,49 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_LEVEL_INDICATOR_H
+#define WEBRTC_VOICE_ENGINE_LEVEL_INDICATOR_H
+
+#include "typedefs.h"
+#include "voice_engine_defines.h"
+
+namespace webrtc {
+
+class AudioFrame;
+namespace voe {
+
+class AudioLevel
+{
+public:
+    AudioLevel();
+    virtual ~AudioLevel();
+
+    void ComputeLevel(const AudioFrame& audioFrame);
+
+    WebRtc_Word8 Level() const;
+
+    WebRtc_Word16 LevelFullRange() const;
+
+    void Clear();
+
+private:
+    enum { kUpdateFrequency = 10};
+
+    WebRtc_Word16 _absMax;
+    WebRtc_Word16 _count;
+    WebRtc_Word8 _currentLevel;
+    WebRtc_Word16 _currentLevelFullRange;
+};
+
+}  // namespace voe
+
+}  // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_LEVEL_INDICATOR_H
diff --git a/voice_engine/main/source/monitor_module.cc b/voice_engine/main/source/monitor_module.cc
new file mode 100644
index 0000000..834264e
--- /dev/null
+++ b/voice_engine/main/source/monitor_module.cc
@@ -0,0 +1,91 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "critical_section_wrapper.h"
+#include "monitor_module.h"
+
+namespace webrtc  {
+
+namespace voe  {
+
+MonitorModule::MonitorModule() :
+    _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
+    _observerPtr(NULL),
+    _lastProcessTime(GET_TIME_IN_MS())
+{
+}
+
+MonitorModule::~MonitorModule()
+{
+    delete &_callbackCritSect;
+}
+
+WebRtc_Word32 
+MonitorModule::RegisterObserver(MonitorObserver& observer)
+{
+    CriticalSectionScoped lock(_callbackCritSect);
+    if (_observerPtr)
+    {
+        return -1;
+    }
+    _observerPtr = &observer;
+    return 0;
+}
+
+WebRtc_Word32 
+MonitorModule::DeRegisterObserver()
+{
+    CriticalSectionScoped lock(_callbackCritSect);
+    if (!_observerPtr)
+    {
+        return 0;
+    }
+    _observerPtr = NULL;
+    return 0;
+}
+
+WebRtc_Word32 
+MonitorModule::Version(WebRtc_Word8* version,
+                       WebRtc_UWord32& remainingBufferInBytes,
+                       WebRtc_UWord32& position) const
+{
+    return 0;
+}
+   
+WebRtc_Word32 
+MonitorModule::ChangeUniqueId(const WebRtc_Word32 id)
+{
+    return 0;
+}
+
+WebRtc_Word32 
+MonitorModule::TimeUntilNextProcess()
+{
+    WebRtc_UWord32 now = GET_TIME_IN_MS();
+    WebRtc_Word32 timeToNext =
+        kAverageProcessUpdateTimeMs - (now - _lastProcessTime);
+    return (timeToNext); 
+}
+
+WebRtc_Word32 
+MonitorModule::Process()
+{
+    _lastProcessTime = GET_TIME_IN_MS();
+    if (_observerPtr)
+    {
+        CriticalSectionScoped lock(_callbackCritSect);
+        _observerPtr->OnPeriodicProcess();
+    }
+    return 0;
+}
+
+}  //  namespace voe
+
+}  //  namespace webrtc
diff --git a/voice_engine/main/source/monitor_module.h b/voice_engine/main/source/monitor_module.h
new file mode 100644
index 0000000..45cf228
--- /dev/null
+++ b/voice_engine/main/source/monitor_module.h
@@ -0,0 +1,63 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_MONITOR_MODULE_H
+#define WEBRTC_VOICE_ENGINE_MONITOR_MODULE_H
+
+#include "module.h"
+#include "typedefs.h"
+#include "voice_engine_defines.h"
+
+class MonitorObserver
+{
+public:
+    virtual void OnPeriodicProcess() = 0;
+protected:
+    virtual ~MonitorObserver() {}
+};
+
+
+namespace webrtc {
+class CriticalSectionWrapper;
+
+namespace voe {
+
+class MonitorModule : public Module
+{
+public:
+    WebRtc_Word32 RegisterObserver(MonitorObserver& observer);
+
+    WebRtc_Word32 DeRegisterObserver();
+
+    MonitorModule();
+
+    virtual ~MonitorModule();
+public:	// module
+    WebRtc_Word32 Version(WebRtc_Word8* version,
+                          WebRtc_UWord32& remainingBufferInBytes,
+                          WebRtc_UWord32& position) const;
+
+    WebRtc_Word32 ChangeUniqueId(const WebRtc_Word32 id);
+
+    WebRtc_Word32 TimeUntilNextProcess();
+
+    WebRtc_Word32 Process();
+private:
+    enum { kAverageProcessUpdateTimeMs = 1000 };
+    MonitorObserver* _observerPtr;
+    CriticalSectionWrapper&	_callbackCritSect;
+    WebRtc_Word32 _lastProcessTime;
+};
+
+}  //  namespace voe
+
+}  //  namespace webrtc
+
+#endif // VOICE_ENGINE_MONITOR_MODULE
diff --git a/voice_engine/main/source/output_mixer.cc b/voice_engine/main/source/output_mixer.cc
new file mode 100644
index 0000000..a701ad9
--- /dev/null
+++ b/voice_engine/main/source/output_mixer.cc
@@ -0,0 +1,748 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "output_mixer.h"
+
+#include "audio_processing.h"
+#include "audio_frame_operations.h"
+#include "critical_section_wrapper.h"
+#include "file_wrapper.h"
+#include "trace.h"
+#include "statistics.h"
+#include "voe_external_media.h"
+
+namespace webrtc {
+
+namespace voe {
+
+void
+OutputMixer::NewMixedAudio(const WebRtc_Word32 id,
+                           const AudioFrame& generalAudioFrame,
+                           const AudioFrame** uniqueAudioFrames,
+                           const WebRtc_UWord32 size)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+                 "OutputMixer::NewMixedAudio(id=%d, size=%u)", id, size);
+
+    _audioFrame = generalAudioFrame;
+    _audioFrame._id = id;
+}
+
+void OutputMixer::MixedParticipants(
+    const WebRtc_Word32 id,
+    const ParticipantStatistics* participantStatistics,
+    const WebRtc_UWord32 size)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+                 "OutputMixer::MixedParticipants(id=%d, size=%u)", id, size);
+}
+
+void OutputMixer::VADPositiveParticipants(
+    const WebRtc_Word32 id,
+    const ParticipantStatistics* participantStatistics,
+    const WebRtc_UWord32 size)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+                 "OutputMixer::VADPositiveParticipants(id=%d, size=%u)",
+                 id, size);
+}
+
+void OutputMixer::MixedAudioLevel(const WebRtc_Word32  id,
+                                  const WebRtc_UWord32 level)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+                 "OutputMixer::MixedAudioLevel(id=%d, level=%u)", id, level);
+}
+
+void OutputMixer::PlayNotification(const WebRtc_Word32 id,
+                                   const WebRtc_UWord32 durationMs)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+                 "OutputMixer::PlayNotification(id=%d, durationMs=%d)",
+                 id, durationMs);
+    // Not implement yet
+}
+
+void OutputMixer::RecordNotification(const WebRtc_Word32 id,
+                                     const WebRtc_UWord32 durationMs)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+                 "OutputMixer::RecordNotification(id=%d, durationMs=%d)",
+                 id, durationMs);
+
+    // Not implement yet
+}
+
+void OutputMixer::PlayFileEnded(const WebRtc_Word32 id)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+                 "OutputMixer::PlayFileEnded(id=%d)", id);
+
+    // not needed
+}
+
+void OutputMixer::RecordFileEnded(const WebRtc_Word32 id)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+                 "OutputMixer::RecordFileEnded(id=%d)", id);
+    assert(id == _instanceId);
+
+    CriticalSectionScoped cs(_fileCritSect);
+    _outputFileRecording = false;
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "OutputMixer::RecordFileEnded() =>"
+                 "output file recorder module is shutdown");
+}
+
+WebRtc_Word32
+OutputMixer::Create(OutputMixer*& mixer, const WebRtc_UWord32 instanceId)
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, instanceId,
+                 "OutputMixer::Create(instanceId=%d)", instanceId);
+    mixer = new OutputMixer(instanceId);
+    if (mixer == NULL)
+    {
+        WEBRTC_TRACE(kTraceMemory, kTraceVoice, instanceId,
+                     "OutputMixer::Create() unable to allocate memory for"
+                     "mixer");
+        return -1;
+    }
+    return 0;
+}
+
+OutputMixer::OutputMixer(const WebRtc_UWord32 instanceId) :
+    _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
+    _fileCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
+    _instanceId(instanceId),
+    _outputFileRecorderPtr(NULL),
+    _outputFileRecording(false),
+    _dtmfGenerator(instanceId),
+    _mixerModule(*AudioConferenceMixer::
+                 CreateAudioConferenceMixer(instanceId)),
+    _externalMediaCallbackPtr(NULL),
+    _audioLevel(),
+    _externalMedia(false),
+    _panLeft(1.0f),
+    _panRight(1.0f),
+    _mixingFrequencyHz(8000)
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
+                 "OutputMixer::OutputMixer() - ctor");
+	
+    if ((_mixerModule.RegisterMixedStreamCallback(*this) == -1) ||
+        (_mixerModule.RegisterMixerStatusCallback(*this, 100) == -1))
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                     "OutputMixer::OutputMixer() failed to register mixer"
+                     "callbacks");
+    }
+	
+    _dtmfGenerator.Init();
+}
+
+void
+OutputMixer::Destroy(OutputMixer*& mixer)
+{
+    if (mixer)
+    {
+        delete mixer;
+        mixer = NULL;
+    }
+}
+	
+OutputMixer::~OutputMixer()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
+                 "OutputMixer::~OutputMixer() - dtor");
+    if (_externalMedia)
+    {
+        DeRegisterExternalMediaProcessing();
+    }
+    {
+        CriticalSectionScoped cs(_fileCritSect);
+        if (_outputFileRecorderPtr)
+        {
+            _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
+            _outputFileRecorderPtr->StopRecording();
+            FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+            _outputFileRecorderPtr = NULL;
+        }
+    }
+    _mixerModule.UnRegisterMixerStatusCallback();
+    _mixerModule.UnRegisterMixedStreamCallback();
+    delete &_mixerModule;
+    delete &_callbackCritSect;
+    delete &_fileCritSect;
+}
+
+WebRtc_Word32
+OutputMixer::SetEngineInformation(voe::Statistics& engineStatistics)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "OutputMixer::SetEngineInformation()");
+    _engineStatisticsPtr = &engineStatistics;
+    return 0;
+}
+
+WebRtc_Word32 
+OutputMixer::SetAudioProcessingModule(
+    AudioProcessing* audioProcessingModule)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "OutputMixer::SetAudioProcessingModule("
+                 "audioProcessingModule=0x%x)", audioProcessingModule);
+    _audioProcessingModulePtr = audioProcessingModule;
+    return 0;
+}
+
+int OutputMixer::RegisterExternalMediaProcessing(
+    VoEMediaProcess& proccess_object)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+               "OutputMixer::RegisterExternalMediaProcessing()");
+
+    CriticalSectionScoped cs(_callbackCritSect);
+    _externalMediaCallbackPtr = &proccess_object;
+    _externalMedia = true;
+
+    return 0;
+}
+
+int OutputMixer::DeRegisterExternalMediaProcessing()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "OutputMixer::DeRegisterExternalMediaProcessing()");
+
+    CriticalSectionScoped cs(_callbackCritSect);
+    _externalMedia = false;
+    _externalMediaCallbackPtr = NULL;
+
+    return 0;
+}
+
+int OutputMixer::PlayDtmfTone(WebRtc_UWord8 eventCode, int lengthMs,
+                              int attenuationDb)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "OutputMixer::PlayDtmfTone()");
+    if (_dtmfGenerator.AddTone(eventCode, lengthMs, attenuationDb) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(VE_STILL_PLAYING_PREV_DTMF,
+                                           kTraceError,
+                                           "OutputMixer::PlayDtmfTone()");
+        return -1;
+    }
+    return 0;
+}
+
+int OutputMixer::StartPlayingDtmfTone(WebRtc_UWord8 eventCode,
+                                      int attenuationDb)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "OutputMixer::StartPlayingDtmfTone()");
+    if (_dtmfGenerator.StartTone(eventCode, attenuationDb) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_STILL_PLAYING_PREV_DTMF,
+            kTraceError,
+            "OutputMixer::StartPlayingDtmfTone())");
+        return -1;
+    }
+    return 0;
+}
+
+int OutputMixer::StopPlayingDtmfTone()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "OutputMixer::StopPlayingDtmfTone()");
+    return (_dtmfGenerator.StopTone());
+}
+
+WebRtc_Word32
+OutputMixer::SetMixabilityStatus(MixerParticipant& participant,
+                                 const bool mixable)
+{
+    return _mixerModule.SetMixabilityStatus(participant, mixable);
+}
+
+WebRtc_Word32
+OutputMixer::MixActiveChannels()
+{
+    return _mixerModule.Process();
+}
+
+int
+OutputMixer::GetSpeechOutputLevel(WebRtc_UWord32& level)
+{
+    WebRtc_Word8 currentLevel = _audioLevel.Level();
+    level = static_cast<WebRtc_UWord32> (currentLevel);
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetSpeechOutputLevel() => level=%u", level);
+    return 0;
+}
+
+int
+OutputMixer::GetSpeechOutputLevelFullRange(WebRtc_UWord32& level)
+{
+    WebRtc_Word16 currentLevel = _audioLevel.LevelFullRange();
+    level = static_cast<WebRtc_UWord32> (currentLevel);
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetSpeechOutputLevelFullRange() => level=%u", level);
+    return 0;
+}
+
+int
+OutputMixer::SetOutputVolumePan(float left, float right)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "OutputMixer::SetOutputVolumePan()");
+    _panLeft = left;
+    _panRight = right;
+    return 0;
+}
+
+int
+OutputMixer::GetOutputVolumePan(float& left, float& right)
+{
+    left = _panLeft;
+    right = _panRight;
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetOutputVolumePan() => left=%2.1f, right=%2.1f",
+                 left, right);
+    return 0;
+}
+
+int OutputMixer::StartRecordingPlayout(const char* fileName,
+                                       const CodecInst* codecInst)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "OutputMixer::StartRecordingPlayout(fileName=%s)", fileName);
+
+    if (_outputFileRecording)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
+                     "StartRecordingPlayout() is already recording");
+        return 0;
+    }
+
+    FileFormats format;
+    const WebRtc_UWord32 notificationTime(0);
+    CodecInst dummyCodec={100,"L16",16000,320,1,320000};
+
+    if (codecInst != NULL && codecInst->channels != 1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_ARGUMENT, kTraceError,
+            "StartRecordingPlayout() invalid compression");
+        return(-1);
+    }
+    if(codecInst == NULL)
+    {
+        format = kFileFormatPcm16kHzFile;
+        codecInst=&dummyCodec;
+    }
+    else if((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
+        (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
+        (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
+    {
+        format = kFileFormatWavFile;
+    }
+    else
+    {
+        format = kFileFormatCompressedFile;
+    }
+
+    CriticalSectionScoped cs(_fileCritSect);
+    
+    // Destroy the old instance
+    if (_outputFileRecorderPtr)
+    {
+        _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
+        FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+        _outputFileRecorderPtr = NULL;
+    }
+
+    _outputFileRecorderPtr = FileRecorder::CreateFileRecorder(
+        _instanceId,
+        (const FileFormats)format);
+    if (_outputFileRecorderPtr == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "StartRecordingPlayout() fileRecorder format isnot correct");
+        return -1;
+    }
+
+    if (_outputFileRecorderPtr->StartRecordingAudioFile(
+        fileName,
+        (const CodecInst&)*codecInst,
+        notificationTime) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_FILE, kTraceError,
+            "StartRecordingAudioFile() failed to start file recording");
+        _outputFileRecorderPtr->StopRecording();
+        FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+        _outputFileRecorderPtr = NULL;
+        return -1;
+    }
+    _outputFileRecorderPtr->RegisterModuleFileCallback(this);
+    _outputFileRecording = true;
+
+    return 0;
+}
+
+int OutputMixer::StartRecordingPlayout(OutStream* stream,
+                                       const CodecInst* codecInst)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "OutputMixer::StartRecordingPlayout()");
+
+    if (_outputFileRecording)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
+                     "StartRecordingPlayout() is already recording");
+        return 0;
+    }
+
+    FileFormats format;
+    const WebRtc_UWord32 notificationTime(0);
+    CodecInst dummyCodec={100,"L16",16000,320,1,320000};
+
+    if (codecInst != NULL && codecInst->channels != 1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_ARGUMENT, kTraceError,
+            "StartRecordingPlayout() invalid compression");
+        return(-1);
+    }
+    if(codecInst == NULL)
+    {
+        format = kFileFormatPcm16kHzFile;
+        codecInst=&dummyCodec;
+    }
+    else if((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
+        (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
+        (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
+    {
+        format = kFileFormatWavFile;
+    }
+    else
+    {
+        format = kFileFormatCompressedFile;
+    }
+
+    CriticalSectionScoped cs(_fileCritSect);
+
+    // Destroy the old instance
+    if (_outputFileRecorderPtr)
+    {
+        _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
+        FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+        _outputFileRecorderPtr = NULL;
+    }
+
+    _outputFileRecorderPtr = FileRecorder::CreateFileRecorder(
+        _instanceId,
+        (const FileFormats)format);
+    if (_outputFileRecorderPtr == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "StartRecordingPlayout() fileRecorder format isnot correct");
+        return -1;
+    }
+
+    if (_outputFileRecorderPtr->StartRecordingAudioFile(*stream,
+                                                        *codecInst,
+                                                        notificationTime) != 0)
+    {
+       _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
+	    "StartRecordingAudioFile() failed to start file recording");
+        _outputFileRecorderPtr->StopRecording();
+        FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+        _outputFileRecorderPtr = NULL;
+        return -1;
+    }
+    
+    _outputFileRecorderPtr->RegisterModuleFileCallback(this);
+    _outputFileRecording = true;
+
+    return 0;
+}
+
+int OutputMixer::StopRecordingPlayout()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "OutputMixer::StopRecordingPlayout()");
+
+    if (!_outputFileRecording)
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                     "StopRecordingPlayout() file isnot recording");
+        return -1;
+    }
+
+    CriticalSectionScoped cs(_fileCritSect);
+
+    if (_outputFileRecorderPtr->StopRecording() != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_STOP_RECORDING_FAILED, kTraceError,
+            "StopRecording(), could not stop recording");
+        return -1;
+    }
+    _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
+    FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+    _outputFileRecorderPtr = NULL;
+    _outputFileRecording = false;
+
+    return 0;
+}
+
+WebRtc_Word32 
+OutputMixer::GetMixedAudio(const WebRtc_Word32 desiredFreqHz,
+                           const WebRtc_UWord8 channels,
+                           AudioFrame& audioFrame)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+                 "OutputMixer::GetMixedAudio(desiredFreqHz=%d, channels=&d)",
+                 desiredFreqHz, channels);
+
+    audioFrame = _audioFrame;
+
+    // --- Record playout if enabled
+    {
+        CriticalSectionScoped cs(_fileCritSect);
+        if (_outputFileRecording)
+        {
+            assert(audioFrame._audioChannel == 1);
+        
+            if (_outputFileRecorderPtr)
+            {
+                _outputFileRecorderPtr->RecordAudioToFile(audioFrame);
+            }
+        }
+    }
+
+    int outLen(0);
+
+    if (audioFrame._audioChannel == 1)
+    {
+        if (_resampler.ResetIfNeeded(audioFrame._frequencyInHz,
+                                     desiredFreqHz,
+                                     kResamplerSynchronous) != 0)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "OutputMixer::GetMixedAudio() unable to resample - 1");
+            return -1;
+        }
+    }
+    else
+    {
+        if (_resampler.ResetIfNeeded(audioFrame._frequencyInHz,
+                                     desiredFreqHz,
+                                     kResamplerSynchronousStereo) != 0)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "OutputMixer::GetMixedAudio() unable to resample - 2");
+            return -1;
+        }
+    }
+    if (_resampler.Push(
+        _audioFrame._payloadData,
+        _audioFrame._payloadDataLengthInSamples*_audioFrame._audioChannel,
+        audioFrame._payloadData,
+        AudioFrame::kMaxAudioFrameSizeSamples,
+        outLen) == 0)
+    {
+        // Ensure that output from resampler matches the audio-frame format.
+        // Example: 10ms stereo output at 48kHz => outLen = 960 =>
+        // convert _payloadDataLengthInSamples to 480
+        audioFrame._payloadDataLengthInSamples =
+            (outLen / _audioFrame._audioChannel);
+        audioFrame._frequencyInHz = desiredFreqHz;
+    }
+    else
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                     "OutputMixer::GetMixedAudio() resampling failed");
+        return -1;
+    }
+
+    if ((channels == 2) && (audioFrame._audioChannel == 1))
+    {
+        AudioFrameOperations::MonoToStereo(audioFrame);
+    }
+
+    return 0;
+}
+
+WebRtc_Word32 
+OutputMixer::DoOperationsOnCombinedSignal()
+{
+    if (_audioFrame._frequencyInHz != _mixingFrequencyHz)
+    {
+        WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+                     "OutputMixer::DoOperationsOnCombinedSignal() => "
+                     "mixing frequency = %d", _audioFrame._frequencyInHz);
+        _mixingFrequencyHz = _audioFrame._frequencyInHz;
+    }
+
+    // --- Insert inband Dtmf tone
+    if (_dtmfGenerator.IsAddingTone())
+    {
+        InsertInbandDtmfTone();
+    }
+
+    // Scale left and/or right channel(s) if balance is active
+    if (_panLeft != 1.0 || _panRight != 1.0)
+    {
+        if (_audioFrame._audioChannel == 1)
+        {
+            AudioFrameOperations::MonoToStereo(_audioFrame);
+        }
+        else
+        {
+            // Pure stereo mode (we are receiving a stereo signal).
+        }
+
+        assert(_audioFrame._audioChannel == 2);
+        AudioFrameOperations::Scale(_panLeft, _panRight, _audioFrame);
+    }
+
+    // --- Far-end Voice Quality Enhancement (AudioProcessing Module)
+
+    APMAnalyzeReverseStream();
+
+    // --- External media processing
+
+    if (_externalMedia)
+    {
+        CriticalSectionScoped cs(_callbackCritSect);
+        const bool isStereo = (_audioFrame._audioChannel == 2);
+        if (_externalMediaCallbackPtr)
+        {
+            _externalMediaCallbackPtr->Process(
+                -1,
+                kPlaybackAllChannelsMixed, 
+                (WebRtc_Word16*)_audioFrame._payloadData,
+                _audioFrame._payloadDataLengthInSamples,
+                _audioFrame._frequencyInHz,
+                isStereo);
+        }
+    }
+
+    // --- Measure audio level (0-9) for the combined signal
+    _audioLevel.ComputeLevel(_audioFrame);
+
+    return 0;
+}
+
+// ----------------------------------------------------------------------------
+//	                             Private methods
+// ----------------------------------------------------------------------------
+
+int 
+OutputMixer::APMAnalyzeReverseStream()
+{
+    int outLen(0);
+    AudioFrame audioFrame = _audioFrame;
+
+    // Convert from mixing frequency to APM frequency.
+    // Sending side determines APM frequency.
+
+    if (audioFrame._audioChannel == 1)
+    {
+        _apmResampler.ResetIfNeeded(_audioFrame._frequencyInHz,
+                                    _audioProcessingModulePtr->sample_rate_hz(),
+                                    kResamplerSynchronous);
+    }
+    else
+    {
+        _apmResampler.ResetIfNeeded(_audioFrame._frequencyInHz,
+                                    _audioProcessingModulePtr->sample_rate_hz(),
+                                    kResamplerSynchronousStereo);
+    }
+    if (_apmResampler.Push(
+        _audioFrame._payloadData,
+        _audioFrame._payloadDataLengthInSamples*_audioFrame._audioChannel,
+        audioFrame._payloadData,
+        AudioFrame::kMaxAudioFrameSizeSamples,
+        outLen) == 0)
+    {
+        audioFrame._payloadDataLengthInSamples =
+            (outLen / _audioFrame._audioChannel);
+        audioFrame._frequencyInHz = _audioProcessingModulePtr->sample_rate_hz();
+    }
+
+    if (audioFrame._audioChannel == 2)
+    {
+        AudioFrameOperations::StereoToMono(audioFrame);
+    }
+
+    // Perform far-end APM analyze
+
+    if (_audioProcessingModulePtr->AnalyzeReverseStream(&audioFrame) == -1)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
+                     "AudioProcessingModule::AnalyzeReverseStream() => error");
+    }
+
+    return 0;
+}
+
+int
+OutputMixer::InsertInbandDtmfTone()
+{
+    WebRtc_UWord16 sampleRate(0);
+    _dtmfGenerator.GetSampleRate(sampleRate);
+    if (sampleRate != _audioFrame._frequencyInHz)
+    {
+        // Update sample rate of Dtmf tone since the mixing frequency changed.
+        _dtmfGenerator.SetSampleRate(
+            (WebRtc_UWord16)(_audioFrame._frequencyInHz));
+        // Reset the tone to be added taking the new sample rate into account.
+        _dtmfGenerator.ResetTone();
+    }
+
+    WebRtc_Word16 toneBuffer[320];
+    WebRtc_UWord16 toneSamples(0);
+    if (_dtmfGenerator.Get10msTone(toneBuffer, toneSamples) == -1)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                     "OutputMixer::InsertInbandDtmfTone() inserting Dtmf"
+                     "tone failed");
+        return -1;
+    }
+
+    // replace mixed audio with Dtmf tone
+    if (_audioFrame._audioChannel == 1)
+    {
+        // mono
+        memcpy(_audioFrame._payloadData, toneBuffer, sizeof(WebRtc_Word16)
+            * toneSamples);
+    } else
+    {
+        // stereo
+        for (int i = 0; i < _audioFrame._payloadDataLengthInSamples; i++)
+        {
+            _audioFrame._payloadData[2 * i] = toneBuffer[i];
+            _audioFrame._payloadData[2 * i + 1] = 0;
+        }
+    }
+    assert(_audioFrame._payloadDataLengthInSamples == toneSamples);
+
+    return 0;
+}
+
+}  //  namespace voe
+
+}  //  namespace webrtc
diff --git a/voice_engine/main/source/output_mixer.h b/voice_engine/main/source/output_mixer.h
new file mode 100644
index 0000000..724cdf5
--- /dev/null
+++ b/voice_engine/main/source/output_mixer.h
@@ -0,0 +1,159 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_OUTPUT_MIXER_H
+#define WEBRTC_VOICE_ENGINE_OUTPUT_MIXER_H
+
+#include "audio_conference_mixer.h"
+#include "audio_conference_mixer_defines.h"
+#include "common_types.h"
+#include "dtmf_inband.h"
+#include "file_recorder.h"
+#include "level_indicator.h"
+#include "resampler.h"
+#include "voice_engine_defines.h"
+
+namespace webrtc {
+
+class AudioProcessing;
+class CriticalSectionWrapper;
+class FileWrapper;
+class VoEMediaProcess;
+
+namespace voe {
+
+class Statistics;
+
+class OutputMixer : public AudioMixerOutputReceiver,
+                    public AudioMixerStatusReceiver,
+                    public FileCallback
+{
+public:
+    static WebRtc_Word32 Create(OutputMixer*& mixer,
+                                const WebRtc_UWord32 instanceId);
+
+    static void Destroy(OutputMixer*& mixer);
+
+    WebRtc_Word32 SetEngineInformation(Statistics& engineStatistics);
+
+    WebRtc_Word32 SetAudioProcessingModule(
+        AudioProcessing* audioProcessingModule);
+
+    // VoEExternalMedia
+    int RegisterExternalMediaProcessing(
+        VoEMediaProcess& proccess_object);
+
+    int DeRegisterExternalMediaProcessing();
+
+    // VoEDtmf
+    int PlayDtmfTone(WebRtc_UWord8 eventCode,
+                     int lengthMs,
+                     int attenuationDb);
+
+    int StartPlayingDtmfTone(WebRtc_UWord8 eventCode,
+                             int attenuationDb);
+
+    int StopPlayingDtmfTone();
+
+    WebRtc_Word32 MixActiveChannels();
+
+    WebRtc_Word32 DoOperationsOnCombinedSignal();
+
+    WebRtc_Word32 SetMixabilityStatus(MixerParticipant& participant,
+                                      const bool mixable);
+
+    WebRtc_Word32 GetMixedAudio(const WebRtc_Word32 desiredFreqHz,
+                                const WebRtc_UWord8 channels,
+                                AudioFrame& audioFrame);
+
+    // VoEVolumeControl
+    int GetSpeechOutputLevel(WebRtc_UWord32& level);
+
+    int GetSpeechOutputLevelFullRange(WebRtc_UWord32& level);
+
+    int SetOutputVolumePan(float left, float right);
+
+    int GetOutputVolumePan(float& left, float& right);
+
+    // VoEFile
+    int StartRecordingPlayout(const char* fileName,
+                              const CodecInst* codecInst);
+
+    int StartRecordingPlayout(OutStream* stream,
+                              const CodecInst* codecInst);
+    int StopRecordingPlayout();
+
+    virtual ~OutputMixer();
+
+public:	// from AudioMixerOutputReceiver
+    virtual void NewMixedAudio(
+        const WebRtc_Word32 id,
+        const AudioFrame& generalAudioFrame,
+        const AudioFrame** uniqueAudioFrames,
+        const WebRtc_UWord32 size);
+
+public:  // from AudioMixerStatusReceiver
+    virtual void MixedParticipants(
+        const WebRtc_Word32 id,
+        const ParticipantStatistics* participantStatistics,
+        const WebRtc_UWord32 size);
+
+    virtual void VADPositiveParticipants(
+        const WebRtc_Word32 id,
+        const ParticipantStatistics* participantStatistics,
+        const WebRtc_UWord32 size);
+
+    virtual void MixedAudioLevel(const WebRtc_Word32  id,
+                                 const WebRtc_UWord32 level);
+
+public: // For file recording
+    void PlayNotification(const WebRtc_Word32 id,
+                          const WebRtc_UWord32 durationMs);
+
+    void RecordNotification(const WebRtc_Word32 id,
+                            const WebRtc_UWord32 durationMs);
+
+    void PlayFileEnded(const WebRtc_Word32 id);
+    void RecordFileEnded(const WebRtc_Word32 id);
+
+private:
+    OutputMixer(const WebRtc_UWord32 instanceId);
+    int APMAnalyzeReverseStream();
+    int InsertInbandDtmfTone();
+
+private:  // uses
+    Statistics* _engineStatisticsPtr;
+    AudioProcessing* _audioProcessingModulePtr;
+
+private:  // owns
+    CriticalSectionWrapper& _callbackCritSect;
+    // protect the _outputFileRecorderPtr and _outputFileRecording
+    CriticalSectionWrapper& _fileCritSect;
+    AudioConferenceMixer& _mixerModule;
+    AudioFrame _audioFrame;
+    Resampler _resampler;        // converts mixed audio to fit ADM format
+    Resampler _apmResampler;    // converts mixed audio to fit APM rate
+    AudioLevel _audioLevel;    // measures audio level for the combined signal
+    DtmfInband _dtmfGenerator;
+    WebRtc_UWord32 _instanceId;
+    VoEMediaProcess* _externalMediaCallbackPtr;
+    bool _externalMedia;
+    float _panLeft;
+    float _panRight;
+    WebRtc_UWord32 _mixingFrequencyHz;
+    FileRecorder* _outputFileRecorderPtr;
+    bool _outputFileRecording;
+};
+
+}  //  namespace voe
+
+}  //  namespace werbtc
+
+#endif // VOICE_ENGINE_OUTPUT_MIXER_H
diff --git a/voice_engine/main/source/ref_count.cc b/voice_engine/main/source/ref_count.cc
new file mode 100644
index 0000000..f1ed0be
--- /dev/null
+++ b/voice_engine/main/source/ref_count.cc
@@ -0,0 +1,60 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "critical_section_wrapper.h"
+#include "ref_count.h"
+
+namespace webrtc {
+
+namespace voe {
+
+RefCount::RefCount() :
+    _count(0),
+    _crit(*CriticalSectionWrapper::CreateCriticalSection())
+{
+}
+
+RefCount::~RefCount()
+{
+    delete &_crit;
+}
+
+RefCount&
+RefCount::operator++(int)
+{
+    CriticalSectionScoped lock(_crit);
+    _count++;
+    return *this;
+}
+    
+RefCount&
+RefCount::operator--(int)
+{
+    CriticalSectionScoped lock(_crit);
+    _count--;
+    return *this;
+}
+  
+void 
+RefCount::Reset()
+{
+    CriticalSectionScoped lock(_crit);
+    _count = 0;
+}
+
+int 
+RefCount::GetCount() const
+{
+    return _count;
+}
+
+}  // namespace voe
+
+}  //  namespace webrtc
diff --git a/voice_engine/main/source/ref_count.h b/voice_engine/main/source/ref_count.h
new file mode 100644
index 0000000..e8c0a81
--- /dev/null
+++ b/voice_engine/main/source/ref_count.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_REF_COUNT_H
+#define WEBRTC_VOICE_ENGINE_REF_COUNT_H
+
+namespace webrtc {
+class CriticalSectionWrapper;
+
+namespace voe {
+
+class RefCount
+{
+public:
+    RefCount();
+    ~RefCount();
+    RefCount& operator++(int);
+    RefCount& operator--(int);
+    void Reset();
+    int GetCount() const;
+private:
+    volatile int _count;
+    CriticalSectionWrapper& _crit;
+};
+
+}  // namespace voe
+
+}  // namespace webrtc
+#endif    // #ifndef WEBRTC_VOICE_ENGINE_REF_COUNT_H
diff --git a/voice_engine/main/source/shared_data.cc b/voice_engine/main/source/shared_data.cc
new file mode 100644
index 0000000..81b360c
--- /dev/null
+++ b/voice_engine/main/source/shared_data.cc
@@ -0,0 +1,98 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "shared_data.h"
+
+#include "audio_processing.h"
+#include "critical_section_wrapper.h"
+#include "channel.h"
+#include "output_mixer.h"
+#include "trace.h"
+#include "transmit_mixer.h"
+
+namespace webrtc {
+
+namespace voe {
+
+static WebRtc_Word32 _gInstanceCounter = 0;
+
+SharedData::SharedData() :
+    _instanceId(++_gInstanceCounter),
+    _channelManager(_gInstanceCounter),
+    _engineStatistics(_gInstanceCounter),
+    _usingExternalAudioDevice(false),
+    _audioDevicePtr(NULL),
+    _audioProcessingModulePtr(NULL),
+    _moduleProcessThreadPtr(ProcessThread::CreateProcessThread()),
+    _apiCritPtr(CriticalSectionWrapper::CreateCriticalSection()),
+    _externalRecording(false),
+    _externalPlayout(false)
+{
+    Trace::CreateTrace();
+    Trace::SetLevelFilter(WEBRTC_VOICE_ENGINE_DEFAULT_TRACE_FILTER);
+    if (OutputMixer::Create(_outputMixerPtr, _gInstanceCounter) == 0)
+    {
+        _outputMixerPtr->SetEngineInformation(_engineStatistics);
+    }
+    if (TransmitMixer::Create(_transmitMixerPtr, _gInstanceCounter) == 0)
+    {
+        _transmitMixerPtr->SetEngineInformation(*_moduleProcessThreadPtr,
+                                                _engineStatistics,
+                                                _channelManager);
+    }
+    _audioDeviceLayer = AudioDeviceModule::kPlatformDefaultAudio;
+}
+
+SharedData::~SharedData()
+{
+    OutputMixer::Destroy(_outputMixerPtr);
+    TransmitMixer::Destroy(_transmitMixerPtr);
+    if (!_usingExternalAudioDevice)
+    {
+        AudioDeviceModule::Destroy(_audioDevicePtr);
+    }
+    AudioProcessing::Destroy(_audioProcessingModulePtr);
+    delete _apiCritPtr;
+    ProcessThread::DestroyProcessThread(_moduleProcessThreadPtr);
+    Trace::ReturnTrace();
+}
+
+WebRtc_UWord16
+SharedData::NumOfSendingChannels()
+{
+    WebRtc_Word32 numOfChannels = _channelManager.NumOfChannels();
+    if (numOfChannels <= 0)
+    {
+        return 0;
+    }
+	
+    WebRtc_UWord16 nChannelsSending(0);
+    WebRtc_Word32* channelsArray = new WebRtc_Word32[numOfChannels];
+
+    _channelManager.GetChannelIds(channelsArray, numOfChannels);
+    for (int i = 0; i < numOfChannels; i++)
+    {
+        voe::ScopedChannel sc(_channelManager, channelsArray[i]);
+        Channel* chPtr = sc.ChannelPtr();
+        if (chPtr)
+        {
+            if (chPtr->Sending())
+            {
+                nChannelsSending++;
+            }
+        }
+    }
+    delete [] channelsArray;
+    return nChannelsSending;
+}
+
+}  //  namespace voe
+
+}  //  namespace webrtc
diff --git a/voice_engine/main/source/shared_data.h b/voice_engine/main/source/shared_data.h
new file mode 100644
index 0000000..27427d0
--- /dev/null
+++ b/voice_engine/main/source/shared_data.h
@@ -0,0 +1,63 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_SHARED_DATA_H
+#define WEBRTC_VOICE_ENGINE_SHARED_DATA_H
+
+#include "voice_engine_defines.h"
+
+#include "channel_manager.h"
+#include "statistics.h"
+#include "process_thread.h"
+
+#include "audio_device.h"
+#include "audio_processing.h"
+
+class ProcessThread;
+
+namespace webrtc {
+class CriticalSectionWrapper;
+
+namespace voe {
+
+class TransmitMixer;
+class OutputMixer;
+class SharedData
+
+{
+protected:
+    WebRtc_UWord16 NumOfSendingChannels();
+protected:
+    const WebRtc_UWord32 _instanceId;
+    CriticalSectionWrapper* _apiCritPtr;
+    ChannelManager _channelManager;
+    Statistics _engineStatistics;
+    bool _usingExternalAudioDevice;
+    AudioDeviceModule* _audioDevicePtr;
+    OutputMixer* _outputMixerPtr;
+    TransmitMixer* _transmitMixerPtr;
+    AudioProcessing* _audioProcessingModulePtr;
+    ProcessThread* _moduleProcessThreadPtr;
+
+protected:
+    bool _externalRecording;
+    bool _externalPlayout;
+
+    AudioDeviceModule::AudioLayer _audioDeviceLayer;
+
+protected:
+    SharedData();
+    virtual ~SharedData();
+};
+
+} //  namespace voe
+
+} //  namespace webrtc
+#endif // WEBRTC_VOICE_ENGINE_SHARED_DATA_H
diff --git a/voice_engine/main/source/statistics.cc b/voice_engine/main/source/statistics.cc
new file mode 100644
index 0000000..eabf9a0
--- /dev/null
+++ b/voice_engine/main/source/statistics.cc
@@ -0,0 +1,99 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cassert>
+#include <stdio.h>
+
+#include "statistics.h"
+
+#include "trace.h"
+#include "critical_section_wrapper.h"
+
+namespace webrtc {
+
+namespace voe {
+
+Statistics::Statistics(const WebRtc_UWord32 instanceId) :
+    _critPtr(CriticalSectionWrapper::CreateCriticalSection()),
+    _instanceId(instanceId),
+    _isInitialized(false),
+    _lastError(0)
+{
+}
+	
+Statistics::~Statistics()
+{
+    if (_critPtr)
+    {
+        delete _critPtr;
+        _critPtr = NULL;
+    }
+}
+
+WebRtc_Word32 Statistics::SetInitialized()
+{
+    _isInitialized = true;
+    return 0;
+}
+
+WebRtc_Word32 Statistics::SetUnInitialized()
+{
+    _isInitialized = false;
+    return 0;
+}
+
+bool Statistics::Initialized() const
+{
+    return _isInitialized;
+}
+
+WebRtc_Word32 Statistics::SetLastError(const WebRtc_Word32 error) const
+{
+    CriticalSectionScoped cs(*_critPtr);
+    _lastError = error;
+    return 0;
+}
+
+WebRtc_Word32 Statistics::SetLastError(const WebRtc_Word32 error,
+                                       const TraceLevel level) const
+{
+    CriticalSectionScoped cs(*_critPtr);
+    _lastError = error;
+    WEBRTC_TRACE(level, kTraceVoice, VoEId(_instanceId,-1),
+                 "error code is set to %d",
+                 _lastError);
+    return 0;
+}
+
+WebRtc_Word32 Statistics::SetLastError(
+    const WebRtc_Word32 error,
+    const TraceLevel level, const char* msg) const
+{
+    CriticalSectionScoped cs(*_critPtr);
+    char traceMessage[KTraceMaxMessageSize];
+    assert(strlen(msg) < KTraceMaxMessageSize);
+    _lastError = error;
+    sprintf(traceMessage, "%s (error=%d)", msg, error);
+    WEBRTC_TRACE(level, kTraceVoice, VoEId(_instanceId,-1), "%s",
+                 traceMessage);
+    return 0;
+}
+
+WebRtc_Word32 Statistics::LastError() const
+{
+    CriticalSectionScoped cs(*_critPtr);
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+               "LastError() => %d", _lastError);
+    return _lastError;
+}
+
+}  //  namespace voe
+
+}  //  namespace webrtc
diff --git a/voice_engine/main/source/statistics.h b/voice_engine/main/source/statistics.h
new file mode 100644
index 0000000..0c18bf8
--- /dev/null
+++ b/voice_engine/main/source/statistics.h
@@ -0,0 +1,54 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_STATISTICS_H
+#define WEBRTC_VOICE_ENGINE_STATISTICS_H
+
+#include "common_types.h"
+#include "typedefs.h"
+#include "voice_engine_defines.h"
+#include "voe_errors.h"
+
+namespace webrtc {
+class CriticalSectionWrapper;
+
+namespace voe {
+
+class Statistics
+{
+ public:
+    enum {KTraceMaxMessageSize = 256};
+ public:
+    Statistics(const WebRtc_UWord32 instanceId);
+    ~Statistics();
+
+    WebRtc_Word32 SetInitialized();
+    WebRtc_Word32 SetUnInitialized();
+    bool Initialized() const;
+    WebRtc_Word32 SetLastError(const WebRtc_Word32 error) const;
+     WebRtc_Word32 SetLastError(const WebRtc_Word32 error,
+                               const TraceLevel level) const;
+    WebRtc_Word32 SetLastError(const WebRtc_Word32 error,
+                               const TraceLevel level,
+                               const char* msg) const;
+    WebRtc_Word32 LastError() const;
+
+ private:
+    CriticalSectionWrapper* _critPtr;
+    const WebRtc_UWord32 _instanceId;
+    mutable WebRtc_Word32 _lastError;
+    bool _isInitialized;
+};
+
+}  // namespace voe
+
+}  //  namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_STATISTICS_H
diff --git a/voice_engine/main/source/transmit_mixer.cc b/voice_engine/main/source/transmit_mixer.cc
new file mode 100644
index 0000000..d135f44
--- /dev/null
+++ b/voice_engine/main/source/transmit_mixer.cc
@@ -0,0 +1,1435 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "transmit_mixer.h"
+
+#include "audio_frame_operations.h"
+#include "channel.h"
+#include "channel_manager.h"
+#include "critical_section_wrapper.h"
+#include "event_wrapper.h"
+#include "statistics.h"
+#include "trace.h"
+#include "utility.h"
+#include "voe_base_impl.h"
+#include "voe_external_media.h"
+
+#define WEBRTC_ABS(a)	   (((a) < 0) ? -(a) : (a))
+
+namespace webrtc {
+
+namespace voe {
+
+void 
+TransmitMixer::OnPeriodicProcess()
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::OnPeriodicProcess()");
+
+#if defined(WEBRTC_VOICE_ENGINE_TYPING_DETECTION)
+    if (_typingNoiseWarning > 0)
+    {
+        CriticalSectionScoped cs(_callbackCritSect);
+        if (_voiceEngineObserverPtr)
+        {
+            WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                         "TransmitMixer::OnPeriodicProcess() => "
+                         "CallbackOnError(VE_TYPING_NOISE_WARNING)");
+            _voiceEngineObserverPtr->CallbackOnError(-1,
+                                                     VE_TYPING_NOISE_WARNING);
+        }
+        _typingNoiseWarning = 0;
+    }
+#endif
+
+    if (_saturationWarning > 0)
+    {
+        CriticalSectionScoped cs(_callbackCritSect);
+        if (_voiceEngineObserverPtr)
+        {
+            WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                         "TransmitMixer::OnPeriodicProcess() =>"
+                         " CallbackOnError(VE_SATURATION_WARNING)");
+            _voiceEngineObserverPtr->CallbackOnError(-1, VE_SATURATION_WARNING);
+        }
+        _saturationWarning = 0;
+    }
+
+    if (_noiseWarning > 0)
+    {
+        CriticalSectionScoped cs(_callbackCritSect);
+        if (_voiceEngineObserverPtr)
+        {
+            WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                         "TransmitMixer::OnPeriodicProcess() =>"
+                         "CallbackOnError(VE_NOISE_WARNING)");
+            _voiceEngineObserverPtr->CallbackOnError(-1, VE_NOISE_WARNING);
+        }
+        _noiseWarning = 0;
+    }
+}
+
+
+void TransmitMixer::PlayNotification(const WebRtc_Word32 id,
+                                     const WebRtc_UWord32 durationMs)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::PlayNotification(id=%d, durationMs=%d)",
+                 id, durationMs);
+
+    // Not implement yet
+}
+	
+void TransmitMixer::RecordNotification(const WebRtc_Word32 id,
+                                       const WebRtc_UWord32 durationMs)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+                 "TransmitMixer::RecordNotification(id=%d, durationMs=%d)",
+                 id, durationMs);
+
+    // Not implement yet
+}
+
+void TransmitMixer::PlayFileEnded(const WebRtc_Word32 id)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::PlayFileEnded(id=%d)", id);
+
+    assert(id == _filePlayerId);
+
+    CriticalSectionScoped cs(_critSect);
+
+    _filePlaying = false;
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::PlayFileEnded() =>"
+                 "file player module is shutdown");
+}
+
+void 
+TransmitMixer::RecordFileEnded(const WebRtc_Word32 id)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::RecordFileEnded(id=%d)", id);
+
+    if (id == _fileRecorderId)
+    {
+        CriticalSectionScoped cs(_critSect);
+        _fileRecording = false;
+        WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                     "TransmitMixer::RecordFileEnded() => fileRecorder module"
+                     "is shutdown");
+    } else if (id == _fileCallRecorderId)
+    {
+        CriticalSectionScoped cs(_critSect);
+        _fileCallRecording = false;
+        WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                     "TransmitMixer::RecordFileEnded() => fileCallRecorder"
+                     "module is shutdown");
+    }
+}
+
+WebRtc_Word32
+TransmitMixer::Create(TransmitMixer*& mixer, const WebRtc_UWord32 instanceId)
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, -1),
+                 "TransmitMixer::Create(instanceId=%d)", instanceId);
+    mixer = new TransmitMixer(instanceId);
+    if (mixer == NULL)
+    {
+        WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, -1),
+                     "TransmitMixer::Create() unable to allocate memory"
+                     "for mixer");
+        return -1;
+    }
+    return 0;
+}
+
+void
+TransmitMixer::Destroy(TransmitMixer*& mixer)
+{
+    if (mixer)
+    {
+        delete mixer;
+        mixer = NULL;
+    }
+}
+
+TransmitMixer::TransmitMixer(const WebRtc_UWord32 instanceId) :
+    _instanceId(instanceId),
+    _engineStatisticsPtr(NULL),
+    _channelManagerPtr(NULL),
+    _audioProcessingModulePtr(NULL),
+    _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
+    _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
+
+#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+    _timeActive(0),
+    _penaltyCounter(0),
+    _typingNoiseWarning(0),
+#endif
+    _filePlayerPtr(NULL),
+    _fileRecorderPtr(NULL),
+    _fileCallRecorderPtr(NULL),
+    // Avoid conflict with other channels by adding 1024 - 1026,
+    // won't use as much as 1024 channels.
+    _filePlayerId(instanceId + 1024),
+    _fileRecorderId(instanceId + 1025),
+    _fileCallRecorderId(instanceId + 1026),
+    _filePlaying(false),
+    _fileRecording(false),
+    _fileCallRecording(false),
+    _mixFileWithMicrophone(false),
+    _captureLevel(0),
+    _audioLevel(),
+    _externalMedia(false),
+    _externalMediaCallbackPtr(NULL),
+    _mute(false),
+    _remainingMuteMicTimeMs(0),
+    _mixingFrequency(0),
+    _voiceEngineObserverPtr(NULL),
+    _processThreadPtr(NULL),
+    _saturationWarning(0),
+    _noiseWarning(0),
+    _includeAudioLevelIndication(false),
+    _audioLevel_dBov(100)
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::TransmitMixer() - ctor");
+}
+	
+TransmitMixer::~TransmitMixer()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::~TransmitMixer() - dtor");
+    _monitorModule.DeRegisterObserver();
+    if (_processThreadPtr)
+    {
+        _processThreadPtr->DeRegisterModule(&_monitorModule);
+    }
+    if (_externalMedia)
+    {
+        DeRegisterExternalMediaProcessing();
+    }
+    {
+        CriticalSectionScoped cs(_critSect);
+        if (_fileRecorderPtr)
+        {
+            _fileRecorderPtr->RegisterModuleFileCallback(NULL);
+            _fileRecorderPtr->StopRecording();
+            FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
+            _fileRecorderPtr = NULL;
+        }
+        if (_fileCallRecorderPtr)
+        {
+            _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
+            _fileCallRecorderPtr->StopRecording();
+            FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
+            _fileCallRecorderPtr = NULL;
+        }
+        if (_filePlayerPtr)
+        {
+            _filePlayerPtr->RegisterModuleFileCallback(NULL);
+            _filePlayerPtr->StopPlayingFile();
+            FilePlayer::DestroyFilePlayer(_filePlayerPtr);
+            _filePlayerPtr = NULL;
+        }
+    }
+    delete &_critSect;
+    delete &_callbackCritSect;
+}
+
+WebRtc_Word32
+TransmitMixer::SetEngineInformation(ProcessThread& processThread,
+                                    Statistics& engineStatistics,
+                                    ChannelManager& channelManager)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::SetEngineInformation()");
+
+    _processThreadPtr = &processThread;
+    _engineStatisticsPtr = &engineStatistics;
+    _channelManagerPtr = &channelManager;
+
+    if (_processThreadPtr->RegisterModule(&_monitorModule) == -1)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                     "TransmitMixer::SetEngineInformation() failed to"
+                     "register the monitor module");
+    } else
+    {
+        _monitorModule.RegisterObserver(*this);
+    }
+
+    return 0;
+}
+	
+WebRtc_Word32 
+TransmitMixer::RegisterVoiceEngineObserver(VoiceEngineObserver& observer)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::RegisterVoiceEngineObserver()");
+    CriticalSectionScoped cs(_callbackCritSect);
+
+    if (_voiceEngineObserverPtr)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceError,
+            "RegisterVoiceEngineObserver() observer already enabled");
+        return -1;
+    }
+    _voiceEngineObserverPtr = &observer;
+    return 0;
+}
+
+WebRtc_Word32 
+TransmitMixer::SetAudioProcessingModule(AudioProcessing* audioProcessingModule)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::SetAudioProcessingModule("
+                 "audioProcessingModule=0x%x)",
+                 audioProcessingModule);
+    _audioProcessingModulePtr = audioProcessingModule;
+    return 0;
+}
+
+WebRtc_Word32 
+TransmitMixer::PrepareDemux(const WebRtc_Word8* audioSamples,
+                            const WebRtc_UWord32 nSamples,
+                            const WebRtc_UWord8 nChannels,
+                            const WebRtc_UWord32 samplesPerSec,
+                            const WebRtc_UWord16 totalDelayMS,
+                            const WebRtc_Word32 clockDrift,
+                            const WebRtc_UWord16 currentMicLevel)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::PrepareDemux(nSamples=%u, nChannels=%u,"
+                 "samplesPerSec=%u, totalDelayMS=%u, clockDrift=%u,"
+                 "currentMicLevel=%u)", nSamples, nChannels, samplesPerSec,
+                 totalDelayMS, clockDrift, currentMicLevel);
+
+
+    const WebRtc_UWord32 mixingFrequency = _mixingFrequency;
+
+    ScopedChannel sc(*_channelManagerPtr);
+    void* iterator(NULL);
+    Channel* channelPtr = sc.GetFirstChannel(iterator);
+    _mixingFrequency = 8000;
+    while (channelPtr != NULL)
+    {
+        if (channelPtr->Sending())
+        {
+            CodecInst tmpCdc;
+            channelPtr->GetSendCodec(tmpCdc);
+            if ((WebRtc_UWord32) tmpCdc.plfreq > _mixingFrequency)
+                _mixingFrequency = tmpCdc.plfreq;
+        }
+        channelPtr = sc.GetNextChannel(iterator);
+    }
+
+
+    // --- Resample input audio and create/store the initial audio frame
+
+    if (GenerateAudioFrame((const WebRtc_Word16*) audioSamples,
+                           nSamples,
+                           nChannels,
+                           samplesPerSec,
+                           _mixingFrequency) == -1)
+    {
+        return -1;
+    }
+
+    // --- Near-end Voice Quality Enhancement (APM) processing
+
+    APMProcessStream(totalDelayMS, clockDrift, currentMicLevel);
+
+    // --- Annoying typing detection (utilizes the APM/VAD decision)
+
+#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+    TypingDetection();
+#endif
+
+    // --- Mute during DTMF tone if direct feedback is enabled
+
+    if (_remainingMuteMicTimeMs > 0)
+    {
+        AudioFrameOperations::Mute(_audioFrame);
+        _remainingMuteMicTimeMs -= 10;
+        if (_remainingMuteMicTimeMs < 0)
+        {
+            _remainingMuteMicTimeMs = 0;
+        }
+    }
+
+    // --- Mute signal
+
+    if (_mute)
+    {
+        AudioFrameOperations::Mute(_audioFrame);
+        _audioLevel_dBov = 100;
+    }
+
+    // --- Measure audio level of speech after APM processing
+
+    _audioLevel.ComputeLevel(_audioFrame);
+
+    // --- Mix with file (does not affect the mixing frequency)
+
+    if (_filePlaying)
+    {
+        MixOrReplaceAudioWithFile(_mixingFrequency);
+    }
+
+    // --- Record to file
+
+    if (_fileRecording)
+    {
+        RecordAudioToFile(_mixingFrequency);
+    }
+
+    // --- External media processing
+
+    if (_externalMedia)
+    {
+        CriticalSectionScoped cs(_callbackCritSect);
+        const bool isStereo = (_audioFrame._audioChannel == 2);
+        if (_externalMediaCallbackPtr)
+        {
+            _externalMediaCallbackPtr->Process(
+                -1,
+                kRecordingAllChannelsMixed,
+                (WebRtc_Word16*) _audioFrame._payloadData,
+                _audioFrame._payloadDataLengthInSamples,
+                _audioFrame._frequencyInHz,
+                isStereo);
+        }
+    }
+
+    if (_mixingFrequency != mixingFrequency)
+    {
+        WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+                     "TransmitMixer::TransmitMixer::PrepareDemux() => "
+                     "mixing frequency = %d",
+                     _mixingFrequency);
+    }
+
+    return 0;
+}
+
+
+	
+WebRtc_Word32 
+TransmitMixer::DemuxAndMix()
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::DemuxAndMix()");
+
+    ScopedChannel sc(*_channelManagerPtr);
+    void* iterator(NULL);
+    Channel* channelPtr = sc.GetFirstChannel(iterator);
+    while (channelPtr != NULL)
+    {
+        if (channelPtr->InputIsOnHold())
+        {
+            channelPtr->UpdateLocalTimeStamp();
+        } else if (channelPtr->Sending())
+        {
+            // load temporary audioframe with current (mixed) microphone signal
+            AudioFrame tmpAudioFrame = _audioFrame;
+
+            channelPtr->Demultiplex(tmpAudioFrame, _audioLevel_dBov);
+            channelPtr->PrepareEncodeAndSend(_mixingFrequency);
+        }
+        channelPtr = sc.GetNextChannel(iterator);
+    }
+				
+	return 0;
+}
+	
+WebRtc_Word32 
+TransmitMixer::EncodeAndSend()
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::EncodeAndSend()");
+
+    ScopedChannel sc(*_channelManagerPtr);
+    void* iterator(NULL);
+    Channel* channelPtr = sc.GetFirstChannel(iterator);
+    while (channelPtr != NULL)
+    {
+        if (channelPtr->Sending() && !channelPtr->InputIsOnHold())
+        {
+            channelPtr->EncodeAndSend();
+        }
+        channelPtr = sc.GetNextChannel(iterator);
+    }
+    return 0;
+}
+
+WebRtc_UWord32 TransmitMixer::CaptureLevel() const
+{
+    return _captureLevel;
+}
+
+void
+TransmitMixer::UpdateMuteMicrophoneTime(const WebRtc_UWord32 lengthMs)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+               "TransmitMixer::UpdateMuteMicrophoneTime(lengthMs=%d)",
+               lengthMs);
+    _remainingMuteMicTimeMs = lengthMs;
+}
+
+WebRtc_Word32 
+TransmitMixer::StopSend()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+               "TransmitMixer::StopSend()");
+    _audioLevel.Clear();
+    return 0;
+}
+
+int TransmitMixer::StartPlayingFileAsMicrophone(const char* fileName,
+                                                const bool loop,
+                                                const FileFormats format,
+                                                const int startPosition,
+                                                const float volumeScaling,
+                                                const int stopPosition,
+                                                const CodecInst* codecInst)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::StartPlayingFileAsMicrophone("
+                 "fileNameUTF8[]=%s,loop=%d, format=%d, volumeScaling=%5.3f,"
+                 " startPosition=%d, stopPosition=%d)", fileName, loop,
+                 format, volumeScaling, startPosition, stopPosition);
+
+    if (_filePlaying)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_ALREADY_PLAYING, kTraceWarning,
+            "StartPlayingFileAsMicrophone() is already playing");
+        return 0;
+    }
+
+    CriticalSectionScoped cs(_critSect);
+
+    // Destroy the old instance
+    if (_filePlayerPtr)
+    {
+        _filePlayerPtr->RegisterModuleFileCallback(NULL);
+        FilePlayer::DestroyFilePlayer(_filePlayerPtr);
+        _filePlayerPtr = NULL;
+    }
+
+    // Dynamically create the instance
+    _filePlayerPtr
+        = FilePlayer::CreateFilePlayer(_filePlayerId,
+                                       (const FileFormats) format);
+
+    if (_filePlayerPtr == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "StartPlayingFileAsMicrophone() filePlayer format isnot correct");
+        return -1;
+    }
+
+    const WebRtc_UWord32 notificationTime(0);
+
+    if (_filePlayerPtr->StartPlayingFile(
+        fileName,
+        loop,
+        startPosition,
+        volumeScaling,
+        notificationTime,
+        stopPosition,
+        (const CodecInst*) codecInst) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_FILE, kTraceError,
+            "StartPlayingFile() failed to start file playout");
+        _filePlayerPtr->StopPlayingFile();
+        FilePlayer::DestroyFilePlayer(_filePlayerPtr);
+        _filePlayerPtr = NULL;
+        return -1;
+    }
+
+    _filePlayerPtr->RegisterModuleFileCallback(this);
+    _filePlaying = true;
+
+    return 0;
+}
+
+int TransmitMixer::StartPlayingFileAsMicrophone(InStream* stream,
+                                                const FileFormats format,
+                                                const int startPosition,
+                                                const float volumeScaling,
+                                                const int stopPosition,
+                                                const CodecInst* codecInst)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "TransmitMixer::StartPlayingFileAsMicrophone(format=%d,"
+                 " volumeScaling=%5.3f, startPosition=%d, stopPosition=%d)",
+                 format, volumeScaling, startPosition, stopPosition);
+    
+    if (stream == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_FILE, kTraceError,
+            "StartPlayingFileAsMicrophone() NULL as input stream");
+        return -1;
+    }
+
+    if (_filePlaying)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_ALREADY_PLAYING, kTraceWarning,
+            "StartPlayingFileAsMicrophone() is already playing");
+        return 0;
+    }
+
+    CriticalSectionScoped cs(_critSect);
+
+    // Destroy the old instance
+    if (_filePlayerPtr)
+    {
+        _filePlayerPtr->RegisterModuleFileCallback(NULL);
+        FilePlayer::DestroyFilePlayer(_filePlayerPtr);
+        _filePlayerPtr = NULL;
+    }
+
+    // Dynamically create the instance
+    _filePlayerPtr
+        = FilePlayer::CreateFilePlayer(_filePlayerId,
+                                       (const FileFormats) format);
+
+    if (_filePlayerPtr == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceWarning,
+            "StartPlayingFileAsMicrophone() filePlayer format isnot correct");
+        return -1;
+    }
+
+    const WebRtc_UWord32 notificationTime(0);
+
+    if (_filePlayerPtr->StartPlayingFile(
+        (InStream&) *stream,
+        startPosition,
+        volumeScaling,
+        notificationTime,
+        stopPosition,
+        (const CodecInst*) codecInst) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_FILE, kTraceError,
+            "StartPlayingFile() failed to start file playout");
+        _filePlayerPtr->StopPlayingFile();
+        FilePlayer::DestroyFilePlayer(_filePlayerPtr);
+        _filePlayerPtr = NULL;
+        return -1;
+    }
+    _filePlayerPtr->RegisterModuleFileCallback(this);
+    _filePlaying = true;
+
+    return 0;
+}
+
+int TransmitMixer::StopPlayingFileAsMicrophone()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "TransmitMixer::StopPlayingFileAsMicrophone()");
+
+    if (!_filePlaying)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceWarning,
+            "StopPlayingFileAsMicrophone() isnot playing");
+        return 0;
+    }
+
+    CriticalSectionScoped cs(_critSect);
+
+    if (_filePlayerPtr->StopPlayingFile() != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_CANNOT_STOP_PLAYOUT, kTraceError,
+            "StopPlayingFile() couldnot stop playing file");
+        return -1;
+    }
+
+    _filePlayerPtr->RegisterModuleFileCallback(NULL);
+    FilePlayer::DestroyFilePlayer(_filePlayerPtr);
+    _filePlayerPtr = NULL;
+    _filePlaying = false;
+
+    return 0;
+}
+
+int TransmitMixer::IsPlayingFileAsMicrophone() const
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::IsPlayingFileAsMicrophone()");
+    return _filePlaying;
+}
+
+int TransmitMixer::ScaleFileAsMicrophonePlayout(const float scale)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::ScaleFileAsMicrophonePlayout(scale=%5.3f)",
+                 scale);
+
+    CriticalSectionScoped cs(_critSect);
+
+    if (!_filePlaying)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_OPERATION, kTraceError,
+            "ScaleFileAsMicrophonePlayout() isnot playing file");
+        return -1;
+    }
+
+    if ((_filePlayerPtr == NULL) ||
+        (_filePlayerPtr->SetAudioScaling(scale) != 0))
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_ARGUMENT, kTraceError,
+            "SetAudioScaling() failed to scale playout");
+        return -1;
+    }
+
+    return 0;
+}
+
+int TransmitMixer::StartRecordingMicrophone(const WebRtc_Word8* fileName,
+                                            const CodecInst* codecInst)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::StartRecordingMicrophone(fileName=%s)",
+                 fileName);
+
+    if (_fileRecording)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                     "StartRecordingMicrophone() is already recording");
+        return 0;
+    }
+
+    FileFormats format;
+    const WebRtc_UWord32 notificationTime(0); // Not supported in VoE
+    CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
+
+    if (codecInst != NULL && codecInst->channels != 1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_ARGUMENT, kTraceError,
+            "StartRecordingMicrophone() invalid compression");
+        return (-1);
+    }
+    if (codecInst == NULL)
+    {
+        format = kFileFormatPcm16kHzFile;
+        codecInst = &dummyCodec;
+    } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
+        (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
+        (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
+    {
+        format = kFileFormatWavFile;
+    } else
+    {
+        format = kFileFormatCompressedFile;
+    }
+
+    CriticalSectionScoped cs(_critSect);
+
+    // Destroy the old instance
+    if (_fileRecorderPtr)
+    {
+        _fileRecorderPtr->RegisterModuleFileCallback(NULL);
+        FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
+        _fileRecorderPtr = NULL;
+    }
+
+    _fileRecorderPtr =
+        FileRecorder::CreateFileRecorder(_fileRecorderId,
+                                         (const FileFormats) format);
+    if (_fileRecorderPtr == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "StartRecordingMicrophone() fileRecorder format isnot correct");
+        return -1;
+    }
+
+    if (_fileRecorderPtr->StartRecordingAudioFile(
+        fileName,
+        (const CodecInst&) *codecInst,
+        notificationTime) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_FILE, kTraceError,
+            "StartRecordingAudioFile() failed to start file recording");
+        _fileRecorderPtr->StopRecording();
+        FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
+        _fileRecorderPtr = NULL;
+        return -1;
+    }
+    _fileRecorderPtr->RegisterModuleFileCallback(this);
+    _fileRecording = true;
+
+    return 0;
+}
+
+int TransmitMixer::StartRecordingMicrophone(OutStream* stream,
+                                            const CodecInst* codecInst)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+               "TransmitMixer::StartRecordingMicrophone()");
+
+    if (_fileRecording)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                   "StartRecordingMicrophone() is already recording");
+        return 0;
+    }
+
+    FileFormats format;
+    const WebRtc_UWord32 notificationTime(0); // Not supported in VoE
+    CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
+
+    if (codecInst != NULL && codecInst->channels != 1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_ARGUMENT, kTraceError,
+            "StartRecordingMicrophone() invalid compression");
+        return (-1);
+    }
+    if (codecInst == NULL)
+    {
+        format = kFileFormatPcm16kHzFile;
+        codecInst = &dummyCodec;
+    } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
+        (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
+        (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
+    {
+        format = kFileFormatWavFile;
+    } else
+    {
+        format = kFileFormatCompressedFile;
+    }
+
+    CriticalSectionScoped cs(_critSect);
+
+    // Destroy the old instance
+    if (_fileRecorderPtr)
+    {
+        _fileRecorderPtr->RegisterModuleFileCallback(NULL);
+        FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
+        _fileRecorderPtr = NULL;
+    }
+
+    _fileRecorderPtr =
+        FileRecorder::CreateFileRecorder(_fileRecorderId,
+                                         (const FileFormats) format);
+    if (_fileRecorderPtr == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "StartRecordingMicrophone() fileRecorder format isnot correct");
+        return -1;
+    }
+
+    if (_fileRecorderPtr->StartRecordingAudioFile(*stream,
+                                                  *codecInst,
+                                                  notificationTime) != 0)
+    {
+    _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
+      "StartRecordingAudioFile() failed to start file recording");
+    _fileRecorderPtr->StopRecording();
+    FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
+    _fileRecorderPtr = NULL;
+    return -1;
+    }
+
+    _fileRecorderPtr->RegisterModuleFileCallback(this);
+    _fileRecording = true;
+
+    return 0;
+}
+
+
+int TransmitMixer::StopRecordingMicrophone()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::StopRecordingMicrophone()");
+
+    if (!_fileRecording)
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
+                   "StopRecordingMicrophone() isnot recording");
+        return -1;
+    }
+
+    CriticalSectionScoped cs(_critSect);
+
+    if (_fileRecorderPtr->StopRecording() != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_STOP_RECORDING_FAILED, kTraceError,
+            "StopRecording(), could not stop recording");
+        return -1;
+    }
+    _fileRecorderPtr->RegisterModuleFileCallback(NULL);
+    FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
+    _fileRecorderPtr = NULL;
+    _fileRecording = false;
+
+    return 0;
+}
+
+int TransmitMixer::StartRecordingCall(const WebRtc_Word8* fileName,
+                                      const CodecInst* codecInst)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::StartRecordingCall(fileName=%s)", fileName);
+
+    if (_fileCallRecording)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                     "StartRecordingCall() is already recording");
+        return 0;
+    }
+
+    FileFormats format;
+    const WebRtc_UWord32 notificationTime(0); // Not supported in VoE
+    CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
+
+    if (codecInst != NULL && codecInst->channels != 1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_ARGUMENT, kTraceError,
+            "StartRecordingCall() invalid compression");
+        return (-1);
+    }
+    if (codecInst == NULL)
+    {
+        format = kFileFormatPcm16kHzFile;
+        codecInst = &dummyCodec;
+    } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
+        (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
+        (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
+    {
+        format = kFileFormatWavFile;
+    } else
+    {
+        format = kFileFormatCompressedFile;
+    }
+
+    CriticalSectionScoped cs(_critSect);
+
+    // Destroy the old instance
+    if (_fileCallRecorderPtr)
+    {
+        _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
+        FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
+        _fileCallRecorderPtr = NULL;
+    }
+
+    _fileCallRecorderPtr
+        = FileRecorder::CreateFileRecorder(_fileCallRecorderId,
+                                           (const FileFormats) format);
+    if (_fileCallRecorderPtr == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "StartRecordingCall() fileRecorder format isnot correct");
+        return -1;
+    }
+
+    if (_fileCallRecorderPtr->StartRecordingAudioFile(
+        fileName,
+        (const CodecInst&) *codecInst,
+        notificationTime) != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_FILE, kTraceError,
+            "StartRecordingAudioFile() failed to start file recording");
+        _fileCallRecorderPtr->StopRecording();
+        FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
+        _fileCallRecorderPtr = NULL;
+        return -1;
+    }
+    _fileCallRecorderPtr->RegisterModuleFileCallback(this);
+    _fileCallRecording = true;
+
+    return 0;
+}
+
+int TransmitMixer::StartRecordingCall(OutStream* stream,
+                                      const  CodecInst* codecInst)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::StartRecordingCall()");
+
+    if (_fileCallRecording)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                     "StartRecordingCall() is already recording");
+        return 0;
+    }
+
+    FileFormats format;
+    const WebRtc_UWord32 notificationTime(0); // Not supported in VoE
+    CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
+
+    if (codecInst != NULL && codecInst->channels != 1)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_BAD_ARGUMENT, kTraceError,
+            "StartRecordingCall() invalid compression");
+        return (-1);
+    }
+    if (codecInst == NULL)
+    {
+        format = kFileFormatPcm16kHzFile;
+        codecInst = &dummyCodec;
+    } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
+        (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
+        (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
+    {
+        format = kFileFormatWavFile;
+    } else
+    {
+        format = kFileFormatCompressedFile;
+    }
+
+    CriticalSectionScoped cs(_critSect);
+
+    // Destroy the old instance
+    if (_fileCallRecorderPtr)
+    {
+        _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
+        FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
+        _fileCallRecorderPtr = NULL;
+    }
+
+    _fileCallRecorderPtr =
+        FileRecorder::CreateFileRecorder(_fileCallRecorderId,
+                                         (const FileFormats) format);
+    if (_fileCallRecorderPtr == NULL)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "StartRecordingCall() fileRecorder format isnot correct");
+        return -1;
+    }
+
+    if (_fileCallRecorderPtr->StartRecordingAudioFile(*stream,
+                                                      *codecInst,
+                                                      notificationTime) != 0)
+    {
+    _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
+     "StartRecordingAudioFile() failed to start file recording");
+    _fileCallRecorderPtr->StopRecording();
+    FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
+    _fileCallRecorderPtr = NULL;
+    return -1;
+    }
+     
+    _fileCallRecorderPtr->RegisterModuleFileCallback(this);
+    _fileCallRecording = true;
+
+    return 0;
+}
+
+int TransmitMixer::StopRecordingCall()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::StopRecordingCall()");
+
+    if (!_fileCallRecording)
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
+                     "StopRecordingCall() file isnot recording");
+        return -1;
+    }
+
+    CriticalSectionScoped cs(_critSect);
+
+    if (_fileCallRecorderPtr->StopRecording() != 0)
+    {
+        _engineStatisticsPtr->SetLastError(
+            VE_STOP_RECORDING_FAILED, kTraceError,
+            "StopRecording(), could not stop recording");
+        return -1;
+    }
+
+    _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
+    FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
+    _fileCallRecorderPtr = NULL;
+    _fileCallRecording = false;
+
+    return 0;
+}
+
+void 
+TransmitMixer::SetMixWithMicStatus(bool mix)
+{
+    _mixFileWithMicrophone = mix;
+}
+
+int TransmitMixer::RegisterExternalMediaProcessing(
+    VoEMediaProcess& proccess_object)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::RegisterExternalMediaProcessing()");
+
+    CriticalSectionScoped cs(_callbackCritSect);
+    _externalMediaCallbackPtr = &proccess_object;
+    _externalMedia = true;
+
+    return 0;
+}
+
+int TransmitMixer::DeRegisterExternalMediaProcessing()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::DeRegisterExternalMediaProcessing()");
+
+    CriticalSectionScoped cs(_callbackCritSect);
+    _externalMedia = false;
+    _externalMediaCallbackPtr = NULL;
+
+    return 0;
+}
+
+int
+TransmitMixer::SetMute(bool enable)
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::SetMute(enable=%d)", enable);
+    _mute = enable;
+    return 0;
+}
+
+bool
+TransmitMixer::Mute() const
+{
+    return _mute;
+}
+
+WebRtc_Word8 TransmitMixer::AudioLevel() const
+{
+    // Speech + file level [0,9]
+    return _audioLevel.Level();
+}
+
+WebRtc_Word16 TransmitMixer::AudioLevelFullRange() const
+{
+    // Speech + file level [0,32767]
+    return _audioLevel.LevelFullRange();
+}
+
+bool TransmitMixer::IsRecordingCall()
+{
+    return _fileCallRecording;
+}
+
+bool TransmitMixer::IsRecordingMic()
+{
+
+    return _fileRecording;
+}
+
+WebRtc_Word32 
+TransmitMixer::GenerateAudioFrame(const WebRtc_Word16 audioSamples[],
+                                  const WebRtc_UWord32 nSamples,
+                                  const WebRtc_UWord8 nChannels,
+                                  const WebRtc_UWord32 samplesPerSec,
+                                  const WebRtc_UWord32 mixingFrequency)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+                 "TransmitMixer::GenerateAudioFrame(nSamples=%u,"
+                 "samplesPerSec=%u, mixingFrequency=%u)",
+                 nSamples, samplesPerSec, mixingFrequency);
+
+    if (_audioResampler.ResetIfNeeded(samplesPerSec,
+                                        mixingFrequency,
+                                        kResamplerSynchronous) != 0)
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
+                     "TransmitMixer::GenerateAudioFrame() unable to resample");
+        return -1;
+    }
+    if (_audioResampler.Push(
+        (WebRtc_Word16*) audioSamples,
+        nSamples,
+        _audioFrame._payloadData,
+        AudioFrame::kMaxAudioFrameSizeSamples,
+        (int&) _audioFrame._payloadDataLengthInSamples) == -1)
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
+                     "TransmitMixer::GenerateAudioFrame() resampling failed");
+        return -1;
+    }
+
+    _audioFrame._id = _instanceId;
+    _audioFrame._timeStamp = -1;
+    _audioFrame._frequencyInHz = mixingFrequency;
+    _audioFrame._speechType = AudioFrame::kNormalSpeech;
+    _audioFrame._vadActivity = AudioFrame::kVadUnknown;
+    _audioFrame._audioChannel = nChannels;
+
+    return 0;
+}
+
+WebRtc_Word32 TransmitMixer::RecordAudioToFile(
+    const WebRtc_UWord32 mixingFrequency)
+{
+    assert(_audioFrame._audioChannel == 1);
+
+    CriticalSectionScoped cs(_critSect);
+    if (_fileRecorderPtr == NULL)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                     "TransmitMixer::RecordAudioToFile() filerecorder doesnot"
+                     "exist");
+        return -1;
+    }
+
+    if (_fileRecorderPtr->RecordAudioToFile(_audioFrame) != 0)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                     "TransmitMixer::RecordAudioToFile() file recording"
+                     "failed");
+        return -1;
+    }
+
+    return 0;
+}
+
+WebRtc_Word32 TransmitMixer::MixOrReplaceAudioWithFile(
+    const WebRtc_UWord32 mixingFrequency)
+{
+    WebRtc_Word16 fileBuffer[320];
+
+    WebRtc_UWord32 fileSamples(0);
+    WebRtc_Word32 outSamples(0);
+
+    {
+        CriticalSectionScoped cs(_critSect);
+        if (_filePlayerPtr == NULL)
+        {
+            WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                         VoEId(_instanceId, -1),
+                         "TransmitMixer::MixOrReplaceAudioWithFile()"
+                         "fileplayer doesnot exist");
+            return -1;
+        }
+
+        if (_filePlayerPtr->Get10msAudioFromFile(fileBuffer,
+                                                 fileSamples,
+                                                 mixingFrequency) == -1)
+        {
+            WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                         "TransmitMixer::MixOrReplaceAudioWithFile() file"
+                         " mixing failed");
+            return -1;
+        }
+    }
+
+    if (_mixFileWithMicrophone)
+    {
+        Utility::MixWithSat(_audioFrame._payloadData,
+                             fileBuffer,
+                             (WebRtc_UWord16) fileSamples);
+        assert(_audioFrame._payloadDataLengthInSamples == fileSamples);
+    } else
+    {
+        // replace ACM audio with file
+        _audioFrame.UpdateFrame(-1,
+                                -1,
+                                fileBuffer,
+                                (WebRtc_UWord16) fileSamples, mixingFrequency,
+                                AudioFrame::kNormalSpeech,
+                                AudioFrame::kVadUnknown,
+                                1);
+
+    }
+    return 0;
+}
+
+WebRtc_Word32 TransmitMixer::APMProcessStream(
+    const WebRtc_UWord16 totalDelayMS,
+    const WebRtc_Word32 clockDrift,
+    const WebRtc_UWord16 currentMicLevel)
+{
+    WebRtc_UWord16 captureLevel(currentMicLevel);
+
+    // If the frequency has changed we need to change APM settings
+    // Sending side is "master"
+    if (_audioProcessingModulePtr->sample_rate_hz()
+        != _audioFrame._frequencyInHz)
+    {
+        if (_audioProcessingModulePtr->set_sample_rate_hz(
+            _audioFrame._frequencyInHz))
+        {
+            WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                         "AudioProcessingModule::set_sample_rate_hz("
+                         "_frequencyInHz=%u) => error",
+                         _audioFrame._frequencyInHz);
+        }
+    }
+
+    if (_audioProcessingModulePtr->set_stream_delay_ms(totalDelayMS) == -1)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                     "AudioProcessingModule::set_stream_delay_ms("
+                     "totalDelayMS=%u) => error",
+                     totalDelayMS);
+    }
+    if (_audioProcessingModulePtr->gain_control()->set_stream_analog_level(
+        captureLevel) == -1)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                   "AudioProcessingModule::set_stream_analog_level"
+                   "(captureLevel=%u,) => error",
+                   captureLevel);
+    }
+    if (_audioProcessingModulePtr->echo_cancellation()->
+        is_drift_compensation_enabled())
+    {
+        if (_audioProcessingModulePtr->echo_cancellation()->
+            set_stream_drift_samples(clockDrift) == -1)
+        {
+            WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                       "AudioProcessingModule::set_stream_drift_samples("
+                       "clockDrift=%u,) => error",
+                       clockDrift);
+        }
+    }
+    if (_audioProcessingModulePtr->ProcessStream(&_audioFrame) == -1)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                   "AudioProcessingModule::ProcessStream() => error");
+    }
+    captureLevel
+        = _audioProcessingModulePtr->gain_control()->stream_analog_level();
+
+    // Store new capture level (only updated when analog AGC is enabled)
+    _captureLevel = captureLevel;
+
+    // Store current audio level (in dBov) if audio-level-indication
+    // functionality has been enabled. This value will be include in an
+    // extended RTP header by the RTP module.
+    if (_includeAudioLevelIndication)
+    {
+        if (_audioProcessingModulePtr->level_estimator()->is_enabled())
+        {
+            LevelEstimator::Metrics metrics;
+            LevelEstimator::Metrics reverseMetrics;
+            _audioProcessingModulePtr->level_estimator()->GetMetrics(
+                &metrics,
+                &reverseMetrics);
+            const WebRtc_Word16 absAudioLevel_dBov =
+                WEBRTC_ABS(metrics.speech.instant);
+            _audioLevel_dBov = static_cast<WebRtc_UWord8> (absAudioLevel_dBov);
+        } else
+        {
+            WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                       "TransmitMixer::APMProcessStream() failed to"
+                       "retrieve level metrics");
+            _audioLevel_dBov = 100;
+        }
+    }
+
+    // Log notifications
+    if (_audioProcessingModulePtr->gain_control()->stream_is_saturated())
+    {
+        if (_saturationWarning == 1)
+        {
+            WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                       "TransmitMixer::APMProcessStream() pending"
+                       "saturation warning exists");
+        }
+        _saturationWarning = 1; // triggers callback from moduleprocess thread
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                   "TransmitMixer::APMProcessStream() VE_SATURATION_WARNING"
+                   "message has been posted for callback");
+    }
+
+    if (_audioProcessingModulePtr->echo_cancellation()->stream_has_echo())
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                   "AudioProcessingModule notification: Echo");
+    }
+
+    return 0;
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+int TransmitMixer::TypingDetection()
+{
+    // We let the VAD determine if we're using this feature or not.
+    if (_audioFrame._vadActivity == AudioFrame::kVadUnknown)
+    {
+        return (0);
+    }
+
+    int keyPressed = EventWrapper::KeyPressed();
+
+    if (keyPressed < 0)
+    {
+        return (-1);
+    }
+    bool vad = (_audioFrame._vadActivity == AudioFrame::kVadActive);
+
+    if (_audioFrame._vadActivity == AudioFrame::kVadActive)
+        _timeActive++;
+    else
+        _timeActive = 0;
+
+    if (keyPressed && (_audioFrame._vadActivity == AudioFrame::kVadActive)
+        && (_timeActive < 10))
+    {
+        _penaltyCounter += 100;
+        if (_penaltyCounter > 300)
+        {
+            if (_typingNoiseWarning == 1)
+            {
+                WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                           VoEId(_instanceId, -1),
+                           "TransmitMixer::TypingDetection() pending "
+                               "noise-saturation warning exists");
+            }
+            // triggers callback from the module process thread
+            _typingNoiseWarning = 1;
+            WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                       "TransmitMixer::TypingDetection() "
+                       "VE_TYPING_NOISE_WARNING message has been posted for"
+                       "callback");
+        }
+    }
+
+    if (_penaltyCounter > 0)
+        _penaltyCounter--;
+
+    return (0);
+}
+#endif
+
+WebRtc_UWord32 TransmitMixer::GetMixingFrequency()
+{
+    assert(_mixingFrequency!=0);
+    return (_mixingFrequency);
+}
+
+}  //  namespace voe
+
+}  //  namespace webrtc
diff --git a/voice_engine/main/source/transmit_mixer.h b/voice_engine/main/source/transmit_mixer.h
new file mode 100644
index 0000000..871521e
--- /dev/null
+++ b/voice_engine/main/source/transmit_mixer.h
@@ -0,0 +1,227 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_TRANSMIT_MIXER_H
+#define WEBRTC_VOICE_ENGINE_TRANSMIT_MIXER_H
+
+#include "common_types.h"
+#include "voe_base.h"
+#include "file_player.h"
+#include "file_recorder.h"
+#include "level_indicator.h"
+#include "module_common_types.h"
+#include "monitor_module.h"
+#include "resampler.h"
+#include "voice_engine_defines.h"
+
+
+namespace webrtc {
+
+class AudioProcessing;
+class ProcessThread;
+class VoEExternalMedia;
+class VoEMediaProcess;
+
+namespace voe {
+
+class ChannelManager;
+class MixedAudio;
+class Statistics;
+
+class TransmitMixer : public MonitorObserver,
+                      public FileCallback
+
+{
+public:
+    static WebRtc_Word32 Create(TransmitMixer*& mixer,
+                                const WebRtc_UWord32 instanceId);
+
+    static void Destroy(TransmitMixer*& mixer);
+
+    WebRtc_Word32 SetEngineInformation(ProcessThread& processThread,
+                                       Statistics& engineStatistics,
+                                       ChannelManager& channelManager);
+
+    WebRtc_Word32 SetAudioProcessingModule(
+        AudioProcessing* audioProcessingModule);
+
+    WebRtc_Word32 PrepareDemux(const WebRtc_Word8* audioSamples,
+                               const WebRtc_UWord32 nSamples,
+                               const WebRtc_UWord8  nChannels,
+                               const WebRtc_UWord32 samplesPerSec,
+                               const WebRtc_UWord16 totalDelayMS,
+                               const WebRtc_Word32  clockDrift,
+                               const WebRtc_UWord16 currentMicLevel);
+
+
+    WebRtc_Word32 DemuxAndMix();
+
+    WebRtc_Word32 EncodeAndSend();
+
+    WebRtc_UWord32 CaptureLevel() const;
+
+    WebRtc_Word32 StopSend();
+
+
+    void SetRTPAudioLevelIndicationStatus(bool enable)
+        { _includeAudioLevelIndication = enable; }
+
+    // VoEDtmf
+    void UpdateMuteMicrophoneTime(const WebRtc_UWord32 lengthMs);
+
+    // VoEExternalMedia
+    int RegisterExternalMediaProcessing(VoEMediaProcess& proccess_object);
+
+    int DeRegisterExternalMediaProcessing();
+
+    WebRtc_UWord32 GetMixingFrequency();
+
+    // VoEVolumeControl
+    int SetMute(const bool enable);
+
+    bool Mute() const;
+
+    WebRtc_Word8 AudioLevel() const;
+
+    WebRtc_Word16 AudioLevelFullRange() const;
+
+    bool IsRecordingCall();
+
+    bool IsRecordingMic();
+
+    int StartPlayingFileAsMicrophone(const char* fileName,
+                                     const bool loop,
+                                     const FileFormats format,
+                                     const int startPosition,
+                                     const float volumeScaling,
+                                     const int stopPosition,
+                                     const CodecInst* codecInst);
+
+    int StartPlayingFileAsMicrophone(InStream* stream,
+                                     const FileFormats format,
+                                     const int startPosition,
+                                     const float volumeScaling,
+                                     const int stopPosition,
+                                     const CodecInst* codecInst);
+
+    int StopPlayingFileAsMicrophone();
+
+    int IsPlayingFileAsMicrophone() const;
+
+    int ScaleFileAsMicrophonePlayout(const float scale);
+
+    int StartRecordingMicrophone(const char* fileName,
+                                 const CodecInst* codecInst);
+
+    int StartRecordingMicrophone(OutStream* stream,
+                                 const CodecInst* codecInst);
+
+    int StopRecordingMicrophone();
+
+    int StartRecordingCall(const char* fileName, const CodecInst* codecInst);
+
+    int StartRecordingCall(OutStream* stream, const CodecInst* codecInst);
+
+    int StopRecordingCall();
+
+    void SetMixWithMicStatus(bool mix);
+
+    WebRtc_Word32 RegisterVoiceEngineObserver(VoiceEngineObserver& observer);
+
+    virtual ~TransmitMixer();
+
+public:	// MonitorObserver
+    void OnPeriodicProcess();
+
+
+public: // FileCallback
+    void PlayNotification(const WebRtc_Word32 id,
+                          const WebRtc_UWord32 durationMs);
+
+    void RecordNotification(const WebRtc_Word32 id,
+                            const WebRtc_UWord32 durationMs);
+
+    void PlayFileEnded(const WebRtc_Word32 id);
+
+    void RecordFileEnded(const WebRtc_Word32 id);
+
+private:
+    TransmitMixer(const WebRtc_UWord32 instanceId);
+
+private:
+    WebRtc_Word32 GenerateAudioFrame(const WebRtc_Word16 audioSamples[],
+                                     const WebRtc_UWord32 nSamples,
+                                     const WebRtc_UWord8 nChannels,
+                                     const WebRtc_UWord32 samplesPerSec,
+                                     const WebRtc_UWord32 mixingFrequency);
+    WebRtc_Word32 RecordAudioToFile(const WebRtc_UWord32 mixingFrequency);
+
+    WebRtc_Word32 MixOrReplaceAudioWithFile(
+        const WebRtc_UWord32 mixingFrequency);
+
+    WebRtc_Word32 APMProcessStream(const WebRtc_UWord16 totalDelayMS,
+                                   const WebRtc_Word32 clockDrift,
+                                   const WebRtc_UWord16 currentMicLevel);
+
+#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+    int TypingDetection();
+#endif
+
+private:  // uses
+    Statistics* _engineStatisticsPtr;
+    ChannelManager* _channelManagerPtr;
+    AudioProcessing* _audioProcessingModulePtr;
+    VoiceEngineObserver* _voiceEngineObserverPtr;
+    ProcessThread* _processThreadPtr;
+
+private:  // owns
+    MonitorModule _monitorModule;
+    AudioFrame _audioFrame;
+    Resampler _audioResampler;		// ADM sample rate -> mixing rate
+    FilePlayer*	_filePlayerPtr;
+    FileRecorder* _fileRecorderPtr;
+    FileRecorder* _fileCallRecorderPtr;
+    WebRtc_UWord32 _filePlayerId;
+    WebRtc_UWord32 _fileRecorderId;
+    WebRtc_UWord32 _fileCallRecorderId;
+    bool _filePlaying;
+    bool _fileRecording;
+    bool _fileCallRecording;
+    voe::AudioLevel _audioLevel;
+    // protect file instances and their variables in MixedParticipants()
+    CriticalSectionWrapper& _critSect;
+    CriticalSectionWrapper& _callbackCritSect;
+
+#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+    WebRtc_Word32 _timeActive;
+    WebRtc_Word32 _penaltyCounter;
+    WebRtc_UWord32 _typingNoiseWarning;
+#endif
+    WebRtc_UWord32 _saturationWarning;
+    WebRtc_UWord32 _noiseWarning;
+
+private:
+    WebRtc_UWord32 _instanceId;
+    bool _mixFileWithMicrophone;
+    WebRtc_UWord32 _captureLevel;
+    bool _externalMedia;
+    VoEMediaProcess* _externalMediaCallbackPtr;
+    bool _mute;
+    WebRtc_Word32 _remainingMuteMicTimeMs;
+    WebRtc_UWord32 _mixingFrequency;
+    bool _includeAudioLevelIndication;
+    WebRtc_UWord8 _audioLevel_dBov;
+};
+
+#endif // WEBRTC_VOICE_ENGINE_TRANSMIT_MIXER_H
+
+}  //  namespace voe
+
+}  // namespace webrtc
diff --git a/voice_engine/main/source/utility.cc b/voice_engine/main/source/utility.cc
new file mode 100644
index 0000000..0513af3
--- /dev/null
+++ b/voice_engine/main/source/utility.cc
@@ -0,0 +1,120 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "utility.h"
+
+#include "module.h"
+#include "trace.h"
+
+namespace webrtc
+{
+
+namespace voe
+{
+
+void Utility::MixWithSat(WebRtc_Word16 target[],
+                         const WebRtc_Word16 source[],
+                         WebRtc_UWord16 len)
+{
+    WebRtc_Word32 temp(0);
+    for (int i = 0; i < len; i++)
+    {
+        temp = source[i] + target[i];
+        if (temp > 32767)
+            target[i] = 32767;
+        else if (temp < -32768)
+            target[i] = -32768;
+        else
+            target[i] = (WebRtc_Word16) temp;
+    }
+}
+
+void Utility::MixSubtractWithSat(WebRtc_Word16 target[],
+                                 const WebRtc_Word16 source[],
+                                 WebRtc_UWord16 len)
+{
+    WebRtc_Word32 temp(0);
+    for (int i = 0; i < len; i++)
+    {
+        temp = target[i] - source[i];
+        if (temp > 32767)
+            target[i] = 32767;
+        else if (temp < -32768)
+            target[i] = -32768;
+        else
+            target[i] = (WebRtc_Word16) temp;
+    }
+}
+
+void Utility::MixAndScaleWithSat(WebRtc_Word16 target[],
+                                 const WebRtc_Word16 source[], float scale,
+                                 WebRtc_UWord16 len)
+{
+    WebRtc_Word32 temp(0);
+    for (int i = 0; i < len; i++)
+    {
+        temp = (WebRtc_Word32) (target[i] + scale * source[i]);
+        if (temp > 32767)
+            target[i] = 32767;
+        else if (temp < -32768)
+            target[i] = -32768;
+        else
+            target[i] = (WebRtc_Word16) temp;
+    }
+}
+
+void Utility::Scale(WebRtc_Word16 vector[], float scale, WebRtc_UWord16 len)
+{
+    for (int i = 0; i < len; i++)
+    {
+        vector[i] = (WebRtc_Word16) (scale * vector[i]);
+    }
+}
+
+void Utility::ScaleWithSat(WebRtc_Word16 vector[], float scale,
+                           WebRtc_UWord16 len)
+{
+    WebRtc_Word32 temp(0);
+    for (int i = 0; i < len; i++)
+    {
+        temp = (WebRtc_Word32) (scale * vector[i]);
+        if (temp > 32767)
+            vector[i] = 32767;
+        else if (temp < -32768)
+            vector[i] = -32768;
+        else
+            vector[i] = (WebRtc_Word16) temp;
+    }
+}
+
+void Utility::TraceModuleVersion(const WebRtc_Word32 id,
+                                 const Module& module)
+{
+    WebRtc_Word8 version[Utility::kMaxVersionSize] = { 0 };
+    WebRtc_UWord32 remainingBufferInBytes = Utility::kMaxVersionSize;
+    WebRtc_UWord32 position = 0;
+    if (module.Version(version, remainingBufferInBytes, position) == 0)
+    {
+        WebRtc_Word8* ptr(NULL);
+        while ((ptr = strchr(version, '\t')) != NULL)
+        {
+            *ptr = ' ';
+        }
+        while ((ptr = strchr(version, '\n')) != NULL)
+        {
+            *ptr = ' ';
+        }
+        WEBRTC_TRACE(kTraceInfo, kTraceVoice, id, "%s", version);
+    }
+}
+
+} // namespace voe
+
+} // namespace webrtc
diff --git a/voice_engine/main/source/utility.h b/voice_engine/main/source/utility.h
new file mode 100644
index 0000000..bb6f6d5
--- /dev/null
+++ b/voice_engine/main/source/utility.h
@@ -0,0 +1,62 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ *  Contains functions often used by different parts of VoiceEngine.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_UTILITY_H
+#define WEBRTC_VOICE_ENGINE_UTILITY_H
+
+#include "typedefs.h"
+#include "voice_engine_defines.h"
+
+namespace webrtc
+{
+
+class Module;
+
+namespace voe
+{
+
+class Utility
+{
+public:
+    static void MixWithSat(WebRtc_Word16 target[],
+                           const WebRtc_Word16 source[],
+                           WebRtc_UWord16 len);
+
+    static void MixSubtractWithSat(WebRtc_Word16 target[],
+                                   const WebRtc_Word16 source[],
+                                   WebRtc_UWord16 len);
+
+    static void MixAndScaleWithSat(WebRtc_Word16 target[],
+                                   const WebRtc_Word16 source[],
+                                   float scale,
+                                   WebRtc_UWord16 len);
+
+    static void Scale(WebRtc_Word16 vector[], float scale, WebRtc_UWord16 len);
+
+    static void ScaleWithSat(WebRtc_Word16 vector[],
+                             float scale,
+                             WebRtc_UWord16 len);
+
+    static void TraceModuleVersion(const WebRtc_Word32 id,
+                                   const Module& module);
+
+private:
+    enum {kMaxVersionSize = 640};
+};
+
+} // namespace voe
+
+} // namespace webrtc
+
+#endif  // WEBRTC_VOICE_ENGINE_UTILITY_H
diff --git a/voice_engine/main/source/voe_audio_processing_impl.cc b/voice_engine/main/source/voe_audio_processing_impl.cc
new file mode 100644
index 0000000..9094e0f
--- /dev/null
+++ b/voice_engine/main/source/voe_audio_processing_impl.cc
@@ -0,0 +1,1245 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_audio_processing_impl.h"
+
+#include "audio_processing.h"
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "trace.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+namespace webrtc {
+
+VoEAudioProcessing* VoEAudioProcessing::GetInterface(
+    VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
+    return NULL;
+#else
+    if (NULL == voiceEngine)
+    {
+        return NULL;
+    }
+    VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
+    VoEAudioProcessingImpl* d = s;
+    (*d)++;
+    return (d);
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
+VoEAudioProcessingImpl::VoEAudioProcessingImpl():
+    _isAecMode(WEBRTC_VOICE_ENGINE_EC_DEFAULT_MODE == EcAec?
+        true : false)
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoEAudioProcessingImpl::VoEAudioProcessingImpl() - ctor");
+}
+
+VoEAudioProcessingImpl::~VoEAudioProcessingImpl()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoEAudioProcessingImpl::~VoEAudioProcessingImpl() - dtor");
+}
+
+int VoEAudioProcessingImpl::Release()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoEAudioProcessing::Release()");
+    (*this)--;
+    int refCount = GetCount();
+    if (refCount < 0)
+    {
+        Reset();  // reset reference counter to zero => OK to delete VE
+        _engineStatistics.SetLastError(
+            VE_INTERFACE_NOT_FOUND, kTraceWarning);
+        return (-1);
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoEAudioProcessing reference counter = %d", refCount);
+    return (refCount);
+}
+
+int VoEAudioProcessingImpl::SetNsStatus(bool enable, NsModes mode)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetNsStatus(enable=%d, mode=%d)", enable, mode);
+#ifdef WEBRTC_VOICE_ENGINE_NR
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    NoiseSuppression::Level nsLevel(
+        (NoiseSuppression::Level)WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE);
+    switch (mode)
+    {
+    case kNsDefault:
+        nsLevel = (NoiseSuppression::Level)WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE;
+        break; 
+    case kNsUnchanged:
+        nsLevel = _audioProcessingModulePtr->noise_suppression()->level();
+        break;
+    case kNsConference:
+        nsLevel = NoiseSuppression::kHigh;
+        break;
+    case kNsLowSuppression:
+        nsLevel = NoiseSuppression::kLow;
+        break;
+    case kNsModerateSuppression:
+        nsLevel = NoiseSuppression::kModerate;
+        break;
+    case kNsHighSuppression:
+        nsLevel = NoiseSuppression::kHigh;
+        break;
+    case kNsVeryHighSuppression:
+        nsLevel = NoiseSuppression::kVeryHigh;
+        break;
+    default:
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "SetNsStatus() invalid Ns mode");
+        return -1;
+    }
+
+    if (_audioProcessingModulePtr->noise_suppression()->set_level(nsLevel) != 0)
+    {
+        _engineStatistics.SetLastError(VE_APM_ERROR, kTraceError,
+                                       "SetNsStatus() failed to set Ns mode");
+        return -1;
+    }
+    if (_audioProcessingModulePtr->noise_suppression()->Enable(enable) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_APM_ERROR, kTraceError,
+            "SetNsStatus() failed to set Ns state");
+        return -1;
+    }
+
+    return 0;
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceError,
+        "SetNsStatus() Ns is not supported");
+    return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::GetNsStatus(bool& enabled, NsModes& mode)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetNsStatus(enabled=?, mode=?)");
+#ifdef WEBRTC_VOICE_ENGINE_NR
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    bool enable(false);
+    NoiseSuppression::Level nsLevel(
+        (NoiseSuppression::Level)WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE);
+
+    enable = _audioProcessingModulePtr->noise_suppression()->is_enabled();
+    nsLevel = _audioProcessingModulePtr->noise_suppression()->level();
+
+    enabled = enable;
+
+    switch (nsLevel)
+    {
+        case NoiseSuppression::kLow:
+            mode = kNsLowSuppression;
+            break;
+        case NoiseSuppression::kModerate:
+            mode = kNsModerateSuppression;
+            break;
+        case NoiseSuppression::kHigh:
+            mode = kNsHighSuppression;
+            break;
+        case NoiseSuppression::kVeryHigh:
+        mode = kNsVeryHighSuppression;
+        break;
+        default:
+            _engineStatistics.SetLastError(
+                            VE_APM_ERROR, kTraceError,
+                            "GetNsStatus() invalid Ns mode");
+            return -1;
+    }
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetNsStatus() => enabled=% d, mode=%d",enabled, mode);
+    return 0;
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceError,
+        "GetNsStatus() Ns is not supported");
+    return -1;
+#endif	
+}
+
+int VoEAudioProcessingImpl::SetAgcStatus(bool enable, AgcModes mode)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetAgcStatus(enable=%d, mode=%d)", enable, mode);
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+#if defined(MAC_IPHONE) || defined(ATA) || defined(ANDROID)
+    if (mode == kAgcAdaptiveAnalog)
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "SetAgcStatus() invalid Agc mode for mobile device");
+        return -1;
+    }
+#endif
+
+    GainControl::Mode agcMode(
+        (GainControl::Mode)WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE);
+    switch (mode)
+    {
+    case kAgcDefault:
+        agcMode = (GainControl::Mode)WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE;
+        break; 
+    case kAgcUnchanged:
+        agcMode = _audioProcessingModulePtr->gain_control()->mode();;
+        break;
+    case kAgcFixedDigital:
+        agcMode = GainControl::kFixedDigital;
+        break;
+    case kAgcAdaptiveAnalog:
+        agcMode = GainControl::kAdaptiveAnalog;
+        break;
+    case kAgcAdaptiveDigital:
+        agcMode = GainControl::kAdaptiveDigital;
+        break;
+    default:
+        _engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                                       "SetAgcStatus() invalid Agc mode");
+        return -1;
+    }
+
+    if (_audioProcessingModulePtr->gain_control()->set_mode(agcMode) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_APM_ERROR, kTraceError,
+            "SetAgcStatus() failed to set Agc mode");
+        return -1;
+    }
+    if (_audioProcessingModulePtr->gain_control()->Enable(enable) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_APM_ERROR, kTraceError,
+            "SetAgcStatus() failed to set Agc state");
+        return -1;
+    }
+
+    if (agcMode != GainControl::kFixedDigital)
+    {
+        // Set Agc state in the ADM when adaptive Agc mode has been selected.
+        // Note that we also enable the ADM Agc when Adaptive Digital mode is
+        // used since we want to be able to provide the APM with updated mic
+        // levels when the user modifies the mic level manually.
+        if (_audioDevicePtr->SetAGC(enable) != 0)
+        {
+            _engineStatistics.SetLastError(
+                VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning,
+                "SetAgcStatus() failed to set Agc mode");
+        }
+    }
+
+    return 0;
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceError,
+        "SetAgcStatus() Agc is not supported");
+    return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::GetAgcStatus(bool& enabled, AgcModes& mode)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetAgcStatus(enabled=?, mode=?)");
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    bool enable(false);
+    GainControl::Mode agcMode(
+        (GainControl::Mode)WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE);
+
+    enable = _audioProcessingModulePtr->gain_control()->is_enabled();
+    agcMode = _audioProcessingModulePtr->gain_control()->mode();
+
+    enabled = enable;
+
+    switch (agcMode)
+    {
+        case GainControl::kFixedDigital:
+            mode = kAgcFixedDigital;
+            break;
+        case GainControl::kAdaptiveAnalog:
+            mode = kAgcAdaptiveAnalog;
+            break;
+        case GainControl::kAdaptiveDigital:
+            mode = kAgcAdaptiveDigital;
+            break;
+        default:
+            _engineStatistics.SetLastError(VE_APM_ERROR, kTraceError,
+                                           "GetAgcStatus() invalid Agc mode");
+            return -1;
+    }
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetAgcStatus() => enabled=%d, mode=%d", enabled, mode);
+    return 0;
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceError,
+        "GetAgcStatus() Agc is not supported");
+    return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::SetAgcConfig(const AgcConfig config)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetAgcConfig()");
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    if (_audioProcessingModulePtr->gain_control()->set_target_level_dbfs(
+                    config.targetLeveldBOv) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_APM_ERROR, kTraceError,
+            "SetAgcConfig() failed to set target peak |level|"
+            " (or envelope) of the Agc");
+        return -1;
+    }
+    if (_audioProcessingModulePtr->gain_control()->set_compression_gain_db(
+        config.digitalCompressionGaindB) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_APM_ERROR, kTraceError,
+            "SetAgcConfig() failed to set the range in |gain|"
+            "the digital compression stage may apply");
+        return -1;
+    }
+    if (_audioProcessingModulePtr->gain_control()->enable_limiter(
+        config.limiterEnable) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_APM_ERROR, kTraceError,
+            "SetAgcConfig() failed to set hard limiter to the signal");
+        return -1;
+    }
+
+    return 0;
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceError,
+        "SetAgcConfig() EC is not supported");
+    return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::GetAgcConfig(AgcConfig &config)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetAgcConfig(config=?)");
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    config.targetLeveldBOv =
+        _audioProcessingModulePtr->gain_control()->target_level_dbfs();
+    config.digitalCompressionGaindB =
+        _audioProcessingModulePtr->gain_control()->compression_gain_db();
+    config.limiterEnable =
+        _audioProcessingModulePtr->gain_control()->is_limiter_enabled();
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+               "GetAgcConfig() => targetLeveldBOv=%u, "
+               "digitalCompressionGaindB=%u, limiterEnable=%d",
+		config.targetLeveldBOv,
+		config.digitalCompressionGaindB,
+		config.limiterEnable);
+
+    return 0;
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceError,
+        "GetAgcConfig() EC is not supported");
+    return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::SetRxNsStatus(int channel,
+                                          bool enable,
+                                          NsModes mode)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetRxNsStatus(channel=%d, enable=%d, mode=%d)",
+                 channel, (int)enable, (int)mode);
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "SetRxNsStatus() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->SetRxNsStatus(enable, mode);
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceError,
+        "SetRxNsStatus() AGC is not supported");
+    return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::GetRxNsStatus(int channel,
+                                          bool& enabled,
+                                          NsModes& mode)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetRxNsStatus(channel=%d, enable=?, mode=?)", channel);
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetRxNsStatus() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetRxNsStatus(enabled, mode);
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceError,
+        "GetRxNsStatus() Agc is not supported");
+    return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::SetRxAgcStatus(int channel,
+                                           bool enable,
+                                           AgcModes mode)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetRxAgcStatus(channel=%d, enable=%d, mode=%d)",
+                 channel, (int)enable, (int)mode);
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "SetRxAgcStatus() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->SetRxAgcStatus(enable, mode);
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceError,
+        "SetRxAgcStatus() Agc is not supported");
+    return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::GetRxAgcStatus(int channel,
+                                           bool& enabled,
+                                           AgcModes& mode)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetRxAgcStatus(channel=%d, enable=?, mode=?)", channel);
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetRxAgcStatus() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetRxAgcStatus(enabled, mode);
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceError,
+        "GetRxAgcStatus() Agc is not supported");
+    return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::SetRxAgcConfig(int channel, const AgcConfig config)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetRxAgcConfig(channel=%d)", channel);
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "SetRxAgcConfig() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->SetRxAgcConfig(config);
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceError,
+        "SetRxAgcConfig() Agc is not supported");
+    return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::GetRxAgcConfig(int channel, AgcConfig& config)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetRxAgcConfig(channel=%d)", channel);
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetRxAgcConfig() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetRxAgcConfig(config);
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceError,
+        "GetRxAgcConfig() Agc is not supported");
+    return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::SetEcStatus(bool enable, EcModes mode)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetEcStatus(enable=%d, mode=%d)", enable, mode);
+#ifdef WEBRTC_VOICE_ENGINE_ECHO
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    // AEC mode
+    if ((mode == kEcDefault) ||
+        (mode == kEcConference) ||
+        (mode == kEcAec) ||
+        ((mode == kEcUnchanged) &&
+        (_isAecMode == true)))
+    {
+        if (enable)
+        {
+            // Disable the AECM before enable the AEC
+            if (_audioProcessingModulePtr->echo_control_mobile()->is_enabled())
+            {
+                _engineStatistics.SetLastError(
+                    VE_APM_ERROR, kTraceWarning,
+                    "SetEcStatus() disable AECM before enabling AEC");
+                if (_audioProcessingModulePtr->echo_control_mobile()->
+                    Enable(false) != 0)
+                {
+                    _engineStatistics.SetLastError(
+                        VE_APM_ERROR, kTraceError,
+                        "SetEcStatus() failed to disable AECM");
+                    return -1;
+                }
+            }
+        }
+        if (_audioProcessingModulePtr->echo_cancellation()->Enable(enable) != 0)
+        {
+            _engineStatistics.SetLastError(
+                VE_APM_ERROR, kTraceError,
+                "SetEcStatus() failed to set AEC state");
+            return -1;
+        }
+#ifdef CLOCK_SKEW_COMP
+        if (_audioProcessingModulePtr->echo_cancellation()->
+            enable_drift_compensation(true) != 0)
+        {
+            _engineStatistics.SetLastError(
+                VE_APM_ERROR, kTraceError,
+                "SetEcStatus() failed to enable drift compensation");
+            return -1;
+        }
+#else
+        if (_audioProcessingModulePtr->echo_cancellation()->
+            enable_drift_compensation(false) != 0)
+        {
+            _engineStatistics.SetLastError(
+                VE_APM_ERROR, kTraceError,
+                "SetEcStatus() failed to disable drift compensation");
+            return -1;
+        }
+#endif
+        if (mode == kEcConference)
+        {
+            if (_audioProcessingModulePtr->echo_cancellation()->
+                set_suppression_level(EchoCancellation::kHighSuppression) != 0)
+           {
+                _engineStatistics.SetLastError(
+                    VE_APM_ERROR, kTraceError,
+                    "SetEcStatus() failed to set aggressiveness to high");
+                return -1;
+            }
+        }
+        else
+        {
+           if (_audioProcessingModulePtr->echo_cancellation()->
+               set_suppression_level(
+                   EchoCancellation::kModerateSuppression) != 0)
+           {
+                _engineStatistics.SetLastError(
+                    VE_APM_ERROR, kTraceError,
+                    "SetEcStatus() failed to set aggressiveness to moderate");
+                return -1;
+           }
+        }
+
+        _isAecMode = true;
+    }
+    else if ((mode == kEcAecm) ||
+            ((mode == kEcUnchanged) &&
+            (_isAecMode == false)))
+    {
+        if (enable)
+        {
+            // Disable the AEC before enable the AECM
+            if (_audioProcessingModulePtr->echo_cancellation()->is_enabled())
+            {
+                _engineStatistics.SetLastError(
+                    VE_APM_ERROR, kTraceWarning,
+                    "SetEcStatus() disable AEC before enabling AECM");
+                if (_audioProcessingModulePtr->echo_cancellation()->
+                    Enable(false) != 0)
+                {
+                    _engineStatistics.SetLastError(
+                        VE_APM_ERROR, kTraceError,
+                        "SetEcStatus() failed to disable AEC");
+                    return -1;
+                }
+            }
+        }
+        if (_audioProcessingModulePtr->echo_control_mobile()->
+            Enable(enable) != 0)
+        {
+            _engineStatistics.SetLastError(
+                VE_APM_ERROR, kTraceError,
+                "SetEcStatus() failed to set AECM state");
+            return -1;
+        }
+        _isAecMode = false;
+    }
+    else
+    {
+        _engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                                       "SetEcStatus() invalid EC mode");
+        return -1;
+    }
+
+    return 0;
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceError,
+        "SetEcStatus() EC is not supported");
+    return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::GetEcStatus(bool& enabled, EcModes& mode)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetEcStatus()");
+#ifdef WEBRTC_VOICE_ENGINE_ECHO
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    if (_isAecMode == true)
+    {
+        mode = kEcAec;
+        enabled = _audioProcessingModulePtr->echo_cancellation()->is_enabled();
+    }
+    else
+    {
+        mode = kEcAecm;
+        enabled = _audioProcessingModulePtr->echo_control_mobile()->
+            is_enabled();
+    }
+    
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetEcStatus() => enabled=%i, mode=%i",
+                 enabled, (int)mode);
+	return 0;
+#else
+	_engineStatistics.SetLastError(
+	    VE_FUNC_NOT_SUPPORTED, kTraceError,
+	    "GetEcStatus() EC is not supported");
+	return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::SetAecmMode(AecmModes mode, bool enableCNG)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetAECMMode(mode = %d)", mode);
+#ifdef WEBRTC_VOICE_ENGINE_ECHO
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+	
+    EchoControlMobile::RoutingMode aecmMode(
+        EchoControlMobile::kQuietEarpieceOrHeadset);
+
+    switch (mode)
+    {
+        case kAecmQuietEarpieceOrHeadset:
+            aecmMode = EchoControlMobile::kQuietEarpieceOrHeadset;
+            break;
+        case kAecmEarpiece:
+            aecmMode = EchoControlMobile::kEarpiece;
+            break;
+        case kAecmLoudEarpiece:
+            aecmMode = EchoControlMobile::kLoudEarpiece;
+            break;
+        case kAecmSpeakerphone:
+            aecmMode = EchoControlMobile::kSpeakerphone;
+            break;
+        case kAecmLoudSpeakerphone:
+            aecmMode = EchoControlMobile::kLoudSpeakerphone;
+            break;
+        default:
+            _engineStatistics.SetLastError(VE_APM_ERROR, kTraceError,
+                                           "GetEcStatus() invalid EC mode");
+            return -1;
+    }
+
+
+    if (_audioProcessingModulePtr->echo_control_mobile()->
+        set_routing_mode(aecmMode) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_APM_ERROR, kTraceError,
+            "SetAECMMode() failed to set AECM routing mode");
+        return -1;
+    }
+    if (_audioProcessingModulePtr->echo_control_mobile()->
+        enable_comfort_noise(enableCNG) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_APM_ERROR, kTraceError,
+            "SetAECMMode() failed to set comfort noise state for AECM");
+        return -1;
+    }
+
+    return 0;
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceError,
+        "SetAECMMode() EC is not supported");
+    return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::GetAecmMode(AecmModes& mode, bool& enabledCNG)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetAECMMode(mode=?)");
+#ifdef WEBRTC_VOICE_ENGINE_ECHO
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+	
+    enabledCNG = false;
+
+    EchoControlMobile::RoutingMode aecmMode =
+        _audioProcessingModulePtr->echo_control_mobile()->routing_mode();
+    enabledCNG = _audioProcessingModulePtr->echo_control_mobile()->
+        is_comfort_noise_enabled();
+
+    switch (aecmMode)
+    {
+        case EchoControlMobile::kQuietEarpieceOrHeadset:
+            mode = kAecmQuietEarpieceOrHeadset;
+            break;
+        case EchoControlMobile::kEarpiece:
+            mode = kAecmEarpiece;
+            break;
+        case EchoControlMobile::kLoudEarpiece:
+            mode = kAecmLoudEarpiece;
+            break;
+        case EchoControlMobile::kSpeakerphone:
+            mode = kAecmSpeakerphone;
+            break;
+        case EchoControlMobile::kLoudSpeakerphone:
+            mode = kAecmLoudSpeakerphone;
+            break;
+        default:
+            _engineStatistics.SetLastError(
+                VE_APM_ERROR, kTraceError,
+                "GetAECMMode() invalid EC mode");
+            return -1;
+	}
+
+    return 0;
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceError,
+        "GetAECMMode() EC is not supported");
+    return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::RegisterRxVadObserver(
+    int channel,
+    VoERxVadCallback &observer)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "RegisterRxVadObserver()");
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "RegisterRxVadObserver() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->RegisterRxVadObserver(observer);
+}
+
+int VoEAudioProcessingImpl::DeRegisterRxVadObserver(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "DeRegisterRxVadObserver()");
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "DeRegisterRxVadObserver() failed to locate channel");
+        return -1;
+    }
+
+    return channelPtr->DeRegisterRxVadObserver();
+}
+
+int VoEAudioProcessingImpl::VoiceActivityIndicator(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoiceActivityIndicator(channel=%d)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "DeRegisterRxVadObserver() failed to locate channel");
+        return -1;
+    }
+    int activity(-1);
+    channelPtr->VoiceActivityIndicator(activity);
+
+    return activity;
+}
+
+int VoEAudioProcessingImpl::SetMetricsStatus(bool enable)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetMetricsStatus(enable=%d)", enable);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+   
+    if ((_audioProcessingModulePtr->level_estimator()->Enable(enable)!= 0) ||
+        (_audioProcessingModulePtr->echo_cancellation()->enable_metrics(enable)
+            != 0))
+    {
+        _engineStatistics.SetLastError(
+            VE_APM_ERROR, kTraceError,
+            "SetMetricsStatus() unable to set metrics mode");
+        return -1;
+    }
+    return 0;
+}
+
+int VoEAudioProcessingImpl::GetMetricsStatus(bool& enabled)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetMetricsStatus(enabled=?)");
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    bool levelMode =
+        _audioProcessingModulePtr->level_estimator()->is_enabled();
+    bool echoMode =
+        _audioProcessingModulePtr->echo_cancellation()->are_metrics_enabled();
+
+    if (levelMode != echoMode)
+    {
+        _engineStatistics.SetLastError(
+            VE_APM_ERROR, kTraceError,
+            "GetMetricsStatus() level mode and echo mode are not the same");
+        return -1;
+    }
+
+    enabled = levelMode;
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetMetricsStatus() => enabled=%d", enabled);
+    return 0;
+}
+
+int VoEAudioProcessingImpl::GetSpeechMetrics(int& levelTx, int& levelRx)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetSpeechMetrics(levelTx=?, levelRx=?)");
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    LevelEstimator::Metrics levelMetrics;
+    LevelEstimator::Metrics reverseLevelMetrics;
+    bool levelMode = _audioProcessingModulePtr->level_estimator()->is_enabled();  
+
+    if (levelMode == false)
+    {
+        _engineStatistics.SetLastError(
+            VE_APM_ERROR, kTraceWarning,
+            "GetSpeechMetrics() AudioProcessingModule level metrics is "
+            "not enabled");
+        return -1;
+    }
+    if (_audioProcessingModulePtr->level_estimator()->GetMetrics(
+        &levelMetrics, &reverseLevelMetrics))
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                   "GetSpeechMetrics(), AudioProcessingModule level metrics"
+                   " error");
+        return -1;
+    }
+
+    levelTx = levelMetrics.speech.instant;
+    levelRx = reverseLevelMetrics.speech.instant;
+    
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetSpeechMetrics() => levelTx=%d, levelRx=%d",
+                 levelTx, levelRx);
+    return 0;
+}
+
+int VoEAudioProcessingImpl::GetNoiseMetrics(int& levelTx, int& levelRx)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetNoiseMetrics(levelTx=?, levelRx=?)");
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    bool levelMode =
+        _audioProcessingModulePtr->level_estimator()->is_enabled();
+    LevelEstimator::Metrics levelMetrics;
+    LevelEstimator::Metrics reverseLevelMetrics;
+
+    if (levelMode == false)
+    {
+        _engineStatistics.SetLastError(
+            VE_APM_ERROR, kTraceWarning,
+            "GetNoiseMetrics() AudioProcessingModule level metrics is not"
+            "enabled");
+	    return -1;
+    }
+    if (_audioProcessingModulePtr->level_estimator()->GetMetrics(
+        &levelMetrics, &reverseLevelMetrics))
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                   "GetNoiseMetrics(), AudioProcessingModule level metrics"
+                   " error");
+        return -1;
+    }
+
+    levelTx = levelMetrics.noise.instant;
+    levelRx = reverseLevelMetrics.noise.instant;
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetNoiseMetrics() => levelTx=%d, levelRx=%d", levelTx, levelRx);
+    return 0;
+}
+
+int VoEAudioProcessingImpl::GetEchoMetrics(int& ERL,
+                                           int& ERLE,
+                                           int& RERL,
+                                           int& A_NLP)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetEchoMetrics(ERL=?, ERLE=?, RERL=?, A_NLP=?)");
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+#ifdef WEBRTC_VOICE_ENGINE_ECHO
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    bool echoMode =
+        _audioProcessingModulePtr->echo_cancellation()->is_enabled();
+    EchoCancellation::Metrics echoMetrics;
+
+    if (echoMode == false)
+    {
+        _engineStatistics.SetLastError(
+            VE_APM_ERROR, kTraceWarning,
+            "GetEchoMetrics() AudioProcessingModule echo metrics is not"
+            "enabled");
+        return -1;
+    }
+    if (_audioProcessingModulePtr->echo_cancellation()->GetMetrics(
+        &echoMetrics))
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                     "GetEchoMetrics(), AudioProcessingModule echo metrics"
+                     "error");
+        return -1;
+    }
+
+    ERL = echoMetrics.echo_return_loss.instant;
+    ERLE = echoMetrics.echo_return_loss_enhancement.instant;
+    RERL = echoMetrics.residual_echo_return_loss.instant;
+    A_NLP = echoMetrics.a_nlp.instant;
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+               "GetEchoMetrics() => ERL=%d, ERLE=%d, RERL=%d, A_NLP=%d",
+               ERL, ERLE, RERL, A_NLP);
+    return 0;
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceError,
+        "SetEcStatus() EC is not supported");
+    return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::StartDebugRecording(const char* fileNameUTF8)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "StartDebugRecording()");
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    return _audioProcessingModulePtr->StartDebugRecording(fileNameUTF8);
+
+}
+
+int VoEAudioProcessingImpl::StopDebugRecording()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "StopDebugRecording()");
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    return _audioProcessingModulePtr->StopDebugRecording();
+}
+
+int VoEAudioProcessingImpl::SetTypingDetectionStatus(bool enable)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetTypingDetectionStatus()");
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    // Just use the VAD state to determine if we should enable typing detection
+    // or not
+
+    if (_audioProcessingModulePtr->voice_detection()->Enable(enable))
+    {
+        _engineStatistics.SetLastError(
+            VE_APM_ERROR, kTraceWarning,
+            "SetTypingDetectionStatus() failed to set VAD state");
+        return -1;
+    }
+    if (_audioProcessingModulePtr->voice_detection()->set_likelihood(
+        VoiceDetection::kHighLikelihood))
+    {
+        _engineStatistics.SetLastError(
+            VE_APM_ERROR, kTraceWarning,
+            "SetTypingDetectionStatus() failed to set VAD likelihood to high");
+        return -1;
+    }
+
+    return 0;
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceError,
+        "SetTypingDetectionStatus is not supported");
+    return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::GetTypingDetectionStatus(bool& enabled)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetTypingDetectionStatus()");
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+  	// Just use the VAD state to determine if we should enable typing
+    // detection or not
+
+    enabled = _audioProcessingModulePtr->voice_detection()->is_enabled();
+
+    return(0);
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceError,
+        "SetTypingDetectionStatus is not supported");
+    return(-1);
+#endif
+}
+
+#endif  // #ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
+
+}   // namespace webrtc
diff --git a/voice_engine/main/source/voe_audio_processing_impl.h b/voice_engine/main/source/voe_audio_processing_impl.h
new file mode 100644
index 0000000..f57778e
--- /dev/null
+++ b/voice_engine/main/source/voe_audio_processing_impl.h
@@ -0,0 +1,102 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_IMPL_H
+
+#include "voe_audio_processing.h"
+
+#include "ref_count.h"
+#include "shared_data.h"
+
+
+namespace webrtc {
+
+class VoEAudioProcessingImpl : public virtual voe::SharedData,
+                               public VoEAudioProcessing,
+                               public voe::RefCount
+{
+public:
+    virtual int Release();
+
+    virtual int SetNsStatus(bool enable, NsModes mode = kNsUnchanged);
+
+    virtual int GetNsStatus(bool& enabled, NsModes& mode);
+
+    virtual int SetAgcStatus(bool enable, AgcModes mode = kAgcUnchanged);
+
+    virtual int GetAgcStatus(bool& enabled, AgcModes& mode);
+
+    virtual int SetAgcConfig(const AgcConfig config);
+
+    virtual int GetAgcConfig(AgcConfig& config);
+
+    virtual int SetRxNsStatus(int channel,
+                              bool enable,
+                              NsModes mode = kNsUnchanged);
+
+    virtual int GetRxNsStatus(int channel, bool& enabled, NsModes& mode);
+
+    virtual int SetRxAgcStatus(int channel,
+                               bool enable,
+                               AgcModes mode = kAgcUnchanged);
+
+    virtual int GetRxAgcStatus(int channel, bool& enabled, AgcModes& mode);
+
+    virtual int SetRxAgcConfig(int channel, const AgcConfig config);
+
+    virtual int GetRxAgcConfig(int channel, AgcConfig& config);
+
+    virtual int SetEcStatus(bool enable, EcModes mode = kEcUnchanged);
+
+    virtual int GetEcStatus(bool& enabled, EcModes& mode);
+
+    virtual int SetAecmMode(AecmModes mode = kAecmSpeakerphone,
+                            bool enableCNG = true);
+
+    virtual int GetAecmMode(AecmModes& mode, bool& enabledCNG);
+
+    virtual int RegisterRxVadObserver(int channel,
+                                      VoERxVadCallback& observer);
+
+    virtual int DeRegisterRxVadObserver(int channel);
+
+    virtual int VoiceActivityIndicator(int channel);
+
+    virtual int SetMetricsStatus(bool enable);
+
+    virtual int GetMetricsStatus(bool& enabled);
+
+    virtual int GetSpeechMetrics(int& levelTx, int& levelRx);
+
+    virtual int GetNoiseMetrics(int& levelTx, int& levelRx);
+
+    virtual int GetEchoMetrics(int& ERL, int& ERLE, int& RERL, int& A_NLP);
+
+    virtual int StartDebugRecording(const char* fileNameUTF8);
+
+    virtual int StopDebugRecording();
+
+    virtual int SetTypingDetectionStatus(bool enable);
+
+    virtual int GetTypingDetectionStatus(bool& enabled);
+
+protected:
+    VoEAudioProcessingImpl();
+    virtual ~VoEAudioProcessingImpl();
+
+private:
+    bool _isAecMode;
+};
+
+}   //  namespace webrtc
+
+#endif    // WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_IMPL_H
+
diff --git a/voice_engine/main/source/voe_base_impl.cc b/voice_engine/main/source/voe_base_impl.cc
new file mode 100644
index 0000000..478f0b7
--- /dev/null
+++ b/voice_engine/main/source/voe_base_impl.cc
@@ -0,0 +1,1894 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_base_impl.h"
+
+#include "voice_engine_impl.h"
+#include "voe_errors.h"
+#include "trace.h"
+#include "critical_section_wrapper.h"
+#include "file_wrapper.h"
+#include "audio_processing.h"
+
+#include "channel.h"
+#include "output_mixer.h"
+#include "transmit_mixer.h"
+
+#include "audio_coding_module.h"
+#include "signal_processing_library.h"
+#include "utility.h"
+
+#if (defined(_WIN32) && defined(_DLL) && (_MSC_VER == 1400))
+// Fix for VS 2005 MD/MDd link problem
+#include <stdio.h>
+extern "C"
+    { FILE _iob[3] = {   __iob_func()[0], __iob_func()[1], __iob_func()[2]}; }
+#endif
+
+namespace webrtc
+{
+
+VoEBase* VoEBase::GetInterface(VoiceEngine* voiceEngine)
+{
+    if (NULL == voiceEngine)
+    {
+        return NULL;
+    }
+    VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
+    VoEBaseImpl* d = s;
+    (*d)++;
+    return (d);
+}
+
+VoEBaseImpl::VoEBaseImpl() :
+    _voiceEngineObserverPtr(NULL),
+    _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
+    _voiceEngineObserver(false), _oldVoEMicLevel(0), _oldMicLevel(0)
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
+                 "VoEBaseImpl() - ctor");
+}
+
+VoEBaseImpl::~VoEBaseImpl()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
+                 "~VoEBaseImpl() - dtor");
+
+    TerminateInternal();
+
+    delete &_callbackCritSect;
+}
+
+int VoEBaseImpl::Release()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "VoEBaseImpl::Release()");
+    (*this)--;
+    int refCount = GetCount();
+    if (refCount < 0)
+    {
+        Reset();
+        _engineStatistics.SetLastError(VE_INTERFACE_NOT_FOUND, kTraceWarning);
+        return (-1);
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "VoEBaseImpl reference counter = %d", refCount);
+    return (refCount);
+}
+
+void VoEBaseImpl::OnErrorIsReported(const ErrorCode error)
+{
+    CriticalSectionScoped cs(_callbackCritSect);
+    if (_voiceEngineObserver)
+    {
+        if (_voiceEngineObserverPtr)
+        {
+            int errCode(0);
+            if (error == AudioDeviceObserver::kRecordingError)
+            {
+                errCode = VE_RUNTIME_REC_ERROR;
+                WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                             "VoEBaseImpl::OnErrorIsReported() => "
+                                 "VE_RUNTIME_REC_ERROR");
+            }
+            else if (error == AudioDeviceObserver::kPlayoutError)
+            {
+                errCode = VE_RUNTIME_PLAY_ERROR;
+                WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                             "VoEBaseImpl::OnErrorIsReported() => "
+                                 "VE_RUNTIME_PLAY_ERROR");
+            }
+            // Deliver callback (-1 <=> no channel dependency)
+            _voiceEngineObserverPtr->CallbackOnError(-1, errCode);
+        }
+    }
+}
+
+void VoEBaseImpl::OnWarningIsReported(const WarningCode warning)
+{
+    CriticalSectionScoped cs(_callbackCritSect);
+    if (_voiceEngineObserver)
+    {
+        if (_voiceEngineObserverPtr)
+        {
+            int warningCode(0);
+            if (warning == AudioDeviceObserver::kRecordingWarning)
+            {
+                warningCode = VE_RUNTIME_REC_WARNING;
+                WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                             "VoEBaseImpl::OnErrorIsReported() => "
+                                 "VE_RUNTIME_REC_WARNING");
+            }
+            else if (warning == AudioDeviceObserver::kPlayoutWarning)
+            {
+                warningCode = VE_RUNTIME_PLAY_WARNING;
+                WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                             "VoEBaseImpl::OnErrorIsReported() => "
+                                 "VE_RUNTIME_PLAY_WARNING");
+            }
+            // Deliver callback (-1 <=> no channel dependency)
+            _voiceEngineObserverPtr->CallbackOnError(-1, warningCode);
+        }
+    }
+}
+
+WebRtc_Word32 VoEBaseImpl::RecordedDataIsAvailable(
+        const WebRtc_Word8* audioSamples,
+        const WebRtc_UWord32 nSamples,
+        const WebRtc_UWord8 nBytesPerSample,
+        const WebRtc_UWord8 nChannels,
+        const WebRtc_UWord32 samplesPerSec,
+        const WebRtc_UWord32 totalDelayMS,
+        const WebRtc_Word32 clockDrift,
+        const WebRtc_UWord32 currentMicLevel,
+        WebRtc_UWord32& newMicLevel)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+                 "VoEBaseImpl::RecordedDataIsAvailable(nSamples=%u, "
+                     "nBytesPerSample=%u, nChannels=%u, samplesPerSec=%u, "
+                     "totalDelayMS=%u, clockDrift=%d, currentMicLevel=%u)",
+                 nSamples, nBytesPerSample, nChannels, samplesPerSec,
+                 totalDelayMS, clockDrift, currentMicLevel);
+
+    assert(_transmitMixerPtr != NULL);
+    assert(_audioDevicePtr != NULL);
+
+    // Always use mono representation within VoE
+    if (nChannels == 2)
+    {
+        WebRtc_Word16* audio16ptr = (WebRtc_Word16*) audioSamples;
+        WebRtc_Word32 audio32;
+        for (WebRtc_UWord32 i = 0; i < nSamples; i++)
+        {
+            // y(i) = (1/2)*(x(2i) + x(2i+1)) => (1/2)*(left(i) + right(i))
+            audio32 = audio16ptr[2 * i];
+            audio32 += audio16ptr[2 * i + 1];
+            audio32 >>= 1;
+            audio16ptr[i] = static_cast<WebRtc_Word16> (audio32);
+        }
+    }
+
+    bool isAnalogAGC(false);
+    WebRtc_UWord32 maxVolume(0);
+    WebRtc_UWord16 currentVoEMicLevel(0);
+    WebRtc_UWord32 newVoEMicLevel(0);
+
+    if (_audioProcessingModulePtr
+            && (_audioProcessingModulePtr->gain_control()->mode()
+                    == GainControl::kAdaptiveAnalog))
+    {
+        isAnalogAGC = true;
+    }
+
+    // Will only deal with the volume in adaptive analog mode
+    if (isAnalogAGC)
+    {
+        // Scale from ADM to VoE level range
+        if (_audioDevicePtr->MaxMicrophoneVolume(&maxVolume) == 0)
+        {
+            if (0 != maxVolume)
+            {
+                currentVoEMicLevel = (WebRtc_UWord16) ((currentMicLevel
+                        * kMaxVolumeLevel + (int) (maxVolume / 2))
+                        / (maxVolume));
+            }
+        }
+        assert(currentVoEMicLevel <= kMaxVolumeLevel);
+    }
+
+    // Keep track if the MicLevel has been changed by the AGC, if not,
+    // use the old value AGC returns to let AGC continue its trend,
+    // so eventually the AGC is able to change the mic level. This handles
+    // issues with truncation introduced by the scaling.
+    if (_oldMicLevel == currentMicLevel)
+    {
+        currentVoEMicLevel = (WebRtc_UWord16) _oldVoEMicLevel;
+    }
+
+    // Sending side only supports mono
+    const WebRtc_UWord8 nAudioChannels(1);
+
+    // Perform channel-independent operations
+    // (APM, mix with file, record to file, mute, etc.)
+    _transmitMixerPtr->PrepareDemux(audioSamples, nSamples, nAudioChannels,
+                                    samplesPerSec,
+                                    (WebRtc_UWord16) totalDelayMS, clockDrift,
+                                    currentVoEMicLevel);
+
+    // Copy the audio frame to each sending channel and perform
+    // channel-dependent operations (file mixing, mute, etc.) to prepare
+    // for encoding.
+    _transmitMixerPtr->DemuxAndMix();
+    // Do the encoding and packetize+transmit the RTP packet when encoding
+    // is done.
+    _transmitMixerPtr->EncodeAndSend();
+
+    // Will only deal with the volume in adaptive analog mode
+    if (isAnalogAGC)
+    {
+        // Scale from VoE to ADM level range
+        newVoEMicLevel = _transmitMixerPtr->CaptureLevel();
+        if (newVoEMicLevel != currentVoEMicLevel)
+        {
+            // Add (kMaxVolumeLevel/2) to round the value
+            newMicLevel = (WebRtc_UWord32) ((newVoEMicLevel * maxVolume
+                    + (int) (kMaxVolumeLevel / 2)) / (kMaxVolumeLevel));
+        }
+        else
+        {
+            // Pass zero if the level is unchanged
+            newMicLevel = 0;
+        }
+
+        // Keep track of the value AGC returns
+        _oldVoEMicLevel = newVoEMicLevel;
+        _oldMicLevel = currentMicLevel;
+    }
+
+    return 0;
+}
+
+WebRtc_Word32 VoEBaseImpl::NeedMorePlayData(
+        const WebRtc_UWord32 nSamples,
+        const WebRtc_UWord8 nBytesPerSample,
+        const WebRtc_UWord8 nChannels,
+        const WebRtc_UWord32 samplesPerSec,
+        WebRtc_Word8* audioSamples,
+        WebRtc_UWord32& nSamplesOut)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+                 "VoEBaseImpl::NeedMorePlayData(nSamples=%u, "
+                     "nBytesPerSample=%d, nChannels=%d, samplesPerSec=%u)",
+                 nSamples, nBytesPerSample, nChannels, samplesPerSec);
+
+    assert(_outputMixerPtr != NULL);
+
+    AudioFrame audioFrame;
+
+    // Perform mixing of all active participants (channel-based mixing)
+    _outputMixerPtr->MixActiveChannels();
+
+    // Additional operations on the combined signal
+    _outputMixerPtr->DoOperationsOnCombinedSignal();
+
+    // Retrieve the final output mix (resampled to match the ADM)
+    _outputMixerPtr->GetMixedAudio(samplesPerSec, nChannels, audioFrame);
+
+    assert(nSamples == audioFrame._payloadDataLengthInSamples);
+    assert(samplesPerSec == audioFrame._frequencyInHz);
+
+    // Deliver audio (PCM) samples to the ADM
+    memcpy(
+           (WebRtc_Word16*) audioSamples,
+           (const WebRtc_Word16*) audioFrame._payloadData,
+           sizeof(WebRtc_Word16) * (audioFrame._payloadDataLengthInSamples
+                   * audioFrame._audioChannel));
+
+    nSamplesOut = audioFrame._payloadDataLengthInSamples;
+
+    return 0;
+}
+
+int VoEBaseImpl::RegisterVoiceEngineObserver(VoiceEngineObserver& observer)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "RegisterVoiceEngineObserver(observer=0x%d)", &observer);
+    CriticalSectionScoped cs(_callbackCritSect);
+    if (_voiceEngineObserverPtr)
+    {
+        _engineStatistics.SetLastError(VE_INVALID_OPERATION, kTraceError,
+                                       "RegisterVoiceEngineObserver() observer"
+                                       " already enabled");
+        return -1;
+    }
+
+    // Register the observer in all active channels
+    voe::ScopedChannel sc(_channelManager);
+    void* iterator(NULL);
+    voe::Channel* channelPtr = sc.GetFirstChannel(iterator);
+    while (channelPtr != NULL)
+    {
+        channelPtr->RegisterVoiceEngineObserver(observer);
+        channelPtr = sc.GetNextChannel(iterator);
+    }
+    _transmitMixerPtr->RegisterVoiceEngineObserver(observer);
+
+    _voiceEngineObserverPtr = &observer;
+    _voiceEngineObserver = true;
+
+    return 0;
+}
+
+int VoEBaseImpl::DeRegisterVoiceEngineObserver()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "DeRegisterVoiceEngineObserver()");
+    CriticalSectionScoped cs(_callbackCritSect);
+    if (!_voiceEngineObserverPtr)
+    {
+        _engineStatistics.SetLastError(VE_INVALID_OPERATION, kTraceError,
+                                       "DeRegisterVoiceEngineObserver() "
+                                       " observer already disabled");
+        return 0;
+    }
+
+    _voiceEngineObserver = false;
+    _voiceEngineObserverPtr = NULL;
+
+    // Deregister the observer in all active channels
+    voe::ScopedChannel sc(_channelManager);
+    void* iterator(NULL);
+    voe::Channel* channelPtr = sc.GetFirstChannel(iterator);
+    while (channelPtr != NULL)
+    {
+        channelPtr->DeRegisterVoiceEngineObserver();
+        channelPtr = sc.GetNextChannel(iterator);
+    }
+
+    return 0;
+}
+
+int VoEBaseImpl::RegisterAudioDeviceModule(AudioDeviceModule& adm)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "RegisterAudioDeviceModule(adm=%p)", &adm);
+    CriticalSectionScoped cs(*_apiCritPtr);
+
+    if (_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_INVALID_OPERATION, kTraceError,
+                                       "Cannot register ADM when initialized");
+        return -1;
+    }
+
+    _audioDevicePtr = &adm;
+    _usingExternalAudioDevice = true;
+
+    return 0;
+}
+
+int VoEBaseImpl::DeRegisterAudioDeviceModule()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "DeRegisterAudioDeviceModule()");
+    CriticalSectionScoped cs(*_apiCritPtr);
+
+    if (_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_INVALID_OPERATION, kTraceError,
+                                       "Cannot de-register ADM when "
+                                       "initialized");
+        return -1;
+    }
+
+    _audioDevicePtr = NULL;
+    _usingExternalAudioDevice = false;
+
+    return 0;
+}
+
+int VoEBaseImpl::Init()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1), "Init()");
+    CriticalSectionScoped cs(*_apiCritPtr);
+
+    if (_engineStatistics.Initialized())
+    {
+        return 0;
+    }
+
+    if (_moduleProcessThreadPtr)
+    {
+        if (_moduleProcessThreadPtr->Start() != 0)
+        {
+            _engineStatistics.SetLastError(VE_THREAD_ERROR, kTraceError,
+                                           "Init() failed to start module "
+                                           "process thread");
+            return -1;
+        }
+    }
+
+    // Create the AudioProcessing Module if it does not exist.
+
+    if (_audioProcessingModulePtr == NULL)
+    {
+        _audioProcessingModulePtr = AudioProcessing::Create(
+                VoEId(_instanceId, -1));
+        if (_audioProcessingModulePtr == NULL)
+        {
+            _engineStatistics.SetLastError(VE_NO_MEMORY, kTraceCritical,
+                                           "Init() failed to create the AP "
+                                           "module");
+            return -1;
+        }
+        voe::Utility::TraceModuleVersion(VoEId(_instanceId, -1),
+                                         *_audioProcessingModulePtr);
+
+        // Ensure that mixers in both directions has access to the created APM
+        _transmitMixerPtr->SetAudioProcessingModule(_audioProcessingModulePtr);
+        _outputMixerPtr->SetAudioProcessingModule(_audioProcessingModulePtr);
+
+        if (_audioProcessingModulePtr->echo_cancellation()->
+                set_device_sample_rate_hz(
+                        kVoiceEngineAudioProcessingDeviceSampleRateHz))
+        {
+            _engineStatistics.SetLastError(VE_APM_ERROR, kTraceWarning,
+                                           "Init() failed to set the device "
+                                           "sample rate to "
+                                           "48K for AP module");
+        }
+        // Using 8 kHz as inital Fs. Might be changed already at first call.
+        if (_audioProcessingModulePtr->set_sample_rate_hz(8000))
+        {
+            _engineStatistics.SetLastError(VE_APM_ERROR, kTraceWarning,
+                                           "Init() failed to set the sample "
+                                           "rate to 8K for AP"
+                                           "module");
+        }
+
+        if (_audioProcessingModulePtr->set_num_channels(1, 1) != 0)
+        {
+            _engineStatistics.SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
+                                           "Init() failed to set channels for "
+                                           "the primary audio"
+                                           "stream");
+        }
+
+        if (_audioProcessingModulePtr->set_num_reverse_channels(1) != 0)
+        {
+            _engineStatistics.SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
+                                           "Init() failed to set channels for "
+                                           "the primary audio"
+                                           "stream");
+        }
+        // high-pass filter
+        if (_audioProcessingModulePtr->high_pass_filter()->Enable(
+                WEBRTC_VOICE_ENGINE_HP_DEFAULT_STATE) != 0)
+        {
+            _engineStatistics.SetLastError(VE_APM_ERROR, kTraceWarning,
+                                           "Init() failed to set the high-pass "
+                                           "filter for AP"
+                                           " module");
+        }
+        // Echo Cancellation
+        if (_audioProcessingModulePtr->echo_cancellation()->
+                enable_drift_compensation(false) != 0)
+        {
+            _engineStatistics.SetLastError(VE_APM_ERROR, kTraceWarning,
+                                           "Init() failed to set drift "
+                                           "compensation for AP module");
+        }
+        if (_audioProcessingModulePtr->echo_cancellation()->Enable(
+                WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE))
+        {
+            _engineStatistics.SetLastError(VE_APM_ERROR, kTraceWarning,
+                                           "Init() failed to set echo "
+                                           "cancellation state for AP"
+                                           " module");
+        }
+        // Noise Reduction
+        if (_audioProcessingModulePtr->noise_suppression()->set_level(
+                (NoiseSuppression::Level) WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE)
+                != 0)
+        {
+            _engineStatistics.SetLastError(VE_APM_ERROR, kTraceWarning,
+                                           "Init() failed to set noise "
+                                           "reduction level for VP"
+                                           "module");
+        }
+        if (_audioProcessingModulePtr->noise_suppression()->Enable(
+                WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE) != 0)
+        {
+            _engineStatistics.SetLastError(VE_APM_ERROR, kTraceWarning,
+                                           "Init() failed to set noise "
+                                           "reduction state for AP"
+                                           "module");
+        }
+        // Automatic Gain control
+        if (_audioProcessingModulePtr->gain_control()->set_analog_level_limits(
+                kMinVolumeLevel,kMaxVolumeLevel) != 0)
+        {
+            _engineStatistics.SetLastError(VE_APM_ERROR, kTraceWarning,
+                                           "Init() failed to set AGC analog "
+                                           "level for AP module");
+        }
+        if (_audioProcessingModulePtr->gain_control()->set_mode(
+                (GainControl::Mode) WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE)
+                != 0)
+        {
+            _engineStatistics.SetLastError(VE_APM_ERROR, kTraceWarning,
+                                           "Init() failed to set AGC mode for "
+                                           "AP module");
+        }
+        if (_audioProcessingModulePtr->gain_control()->Enable(
+                WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE)
+                != 0)
+        {
+            _engineStatistics.SetLastError(VE_APM_ERROR, kTraceWarning,
+                                           "Init() failed to set AGC state for "
+                                           "AP module");
+        }
+        // Level Metrics
+        if (_audioProcessingModulePtr->level_estimator()->Enable(
+                WEBRTC_VOICE_ENGINE_LEVEL_ESTIMATOR_DEFAULT_STATE)
+                != 0)
+        {
+            _engineStatistics.SetLastError(VE_APM_ERROR, kTraceWarning,
+                                           "Init() failed to set Level "
+                                           "Estimator state for AP"
+                                           "module");
+        }
+        // VAD
+        if (_audioProcessingModulePtr->voice_detection()->Enable(
+                WEBRTC_VOICE_ENGINE_VAD_DEFAULT_STATE)
+                != 0)
+        {
+            _engineStatistics.SetLastError(VE_APM_ERROR, kTraceWarning,
+                                           "Init() failed to set Level "
+                                           "Estimator state for AP"
+                                           "module");
+        }
+    }
+
+    // Create the Audio Device Module (ADM) if it does not already exist
+
+    if (_audioDevicePtr == NULL)
+    {
+        // Create the ADM
+        // _audioDeviceLayer is set by
+        // VoEHardwareImpl::SetAudioDeviceLayer
+        _audioDevicePtr = AudioDeviceModule::Create(VoEId(_instanceId, -1),
+                                                    _audioDeviceLayer);
+        if (_audioDevicePtr == NULL)
+        {
+            _engineStatistics.SetLastError(VE_NO_MEMORY, kTraceCritical,
+                                           "Init() failed to create the ADM");
+            return -1;
+        }
+    }
+
+    // Register the ADM to the process thread, which will drive the error
+    // callback mechanism
+    if (_moduleProcessThreadPtr->RegisterModule(_audioDevicePtr) != 0)
+    {
+        _engineStatistics.SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR,
+                                       kTraceError,
+                                       "Init() failed to register the ADM");
+        return -1;
+    }
+
+    bool available(false);
+    WebRtc_Word32 ret(0);
+
+    // --------------------
+    // Reinitialize the ADM
+
+    // Register the AudioObserver implementation
+    _audioDevicePtr->RegisterEventObserver(this);
+
+    // Register the AudioTransport implementation
+    _audioDevicePtr->RegisterAudioCallback(this);
+
+    // ADM initialization
+    if (_audioDevicePtr->Init() != 0)
+    {
+        _engineStatistics.SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR,
+                                       kTraceError,
+                                       "Init() failed to initialize the ADM");
+        return -1;
+    }
+
+    // Initialize the default speaker
+    if (_audioDevicePtr->SetPlayoutDevice(WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE)
+            != 0)
+    {
+        _engineStatistics.SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR,
+                                       kTraceInfo,
+                                       "Init() failed to set the default "
+                                       "output device");
+    }
+    if (_audioDevicePtr->SpeakerIsAvailable(&available) != 0)
+    {
+        _engineStatistics.SetLastError(VE_CANNOT_ACCESS_SPEAKER_VOL,
+                                       kTraceInfo,
+                                       "Init() failed to check speaker "
+                                       "availability, trying"
+                                       " to initialize speaker anyway");
+    }
+    else if (!available)
+    {
+        _engineStatistics.SetLastError(VE_CANNOT_ACCESS_SPEAKER_VOL,
+                                       kTraceInfo,
+                                       "Init() speaker not available, "
+                                       "trying to initialize"
+                                       "speaker anyway");
+    }
+    if (_audioDevicePtr->InitSpeaker() != 0)
+    {
+        _engineStatistics.SetLastError(VE_CANNOT_ACCESS_SPEAKER_VOL,
+                                       kTraceInfo,
+                                       "Init() failed to initialize the "
+                                       "speaker");
+    }
+
+    // Initialize the default microphone
+    if (_audioDevicePtr->SetRecordingDevice(WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE)
+            != 0)
+    {
+        _engineStatistics.SetLastError(VE_SOUNDCARD_ERROR, kTraceInfo,
+                                       "Init() failed to set the default "
+                                       "input device");
+    }
+    if (_audioDevicePtr->MicrophoneIsAvailable(&available) != 0)
+    {
+        _engineStatistics.SetLastError(VE_CANNOT_ACCESS_MIC_VOL, kTraceInfo,
+                                       "Init() failed to check microphone "
+                                       "availability, trying"
+                                       "to initialize microphone anyway");
+    }
+    else if (!available)
+    {
+        _engineStatistics.SetLastError(VE_CANNOT_ACCESS_MIC_VOL, kTraceInfo,
+                                       "Init() microphone not available, "
+                                       "trying to initialize"
+                                       "microphone anyway");
+    }
+    if (_audioDevicePtr->InitMicrophone() != 0)
+    {
+        _engineStatistics.SetLastError(VE_CANNOT_ACCESS_MIC_VOL, kTraceInfo,
+                                       "Init() failed to initialize the "
+                                       "microphone");
+    }
+
+    // Set default AGC mode for the ADM
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+    bool enable(false);
+    if (_audioProcessingModulePtr->gain_control()->mode()
+            != GainControl::kFixedDigital)
+    {
+        enable = _audioProcessingModulePtr->gain_control()->is_enabled();
+        // Only set the AGC mode for the ADM when Adaptive AGC mode is selected
+        if (_audioDevicePtr->SetAGC(enable) != 0)
+        {
+            _engineStatistics.SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR,
+                                           kTraceWarning,
+                                           "Init() failed to set default AGC "
+                                           "mode in ADM 0");
+        }
+    }
+#endif
+    // Set number of channels
+    _audioDevicePtr->StereoPlayoutIsAvailable(&available);
+    if (_audioDevicePtr->SetStereoPlayout(available ? true : false) != 0)
+    {
+        _engineStatistics.SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
+                                       "Init() failed to set stereo playout "
+                                       "mode");
+    }
+    _audioDevicePtr->StereoRecordingIsAvailable(&available);
+    if (_audioDevicePtr->SetStereoRecording(available ? true : false) != 0)
+    {
+        _engineStatistics.SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
+                                       "Init() failed to set mono recording "
+                                       "mode");
+    }
+
+    return _engineStatistics.SetInitialized();
+}
+
+int VoEBaseImpl::Terminate()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "Terminate()");
+    CriticalSectionScoped cs(*_apiCritPtr);
+    return TerminateInternal();
+}
+
+int VoEBaseImpl::MaxNumOfChannels()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "MaxNumOfChannels()");
+    WebRtc_Word32 maxNumOfChannels = _channelManager.MaxNumOfChannels();
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "MaxNumOfChannels() => %d", maxNumOfChannels);
+    return (maxNumOfChannels);
+}
+
+int VoEBaseImpl::CreateChannel()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "CreateChannel()");
+    CriticalSectionScoped cs(*_apiCritPtr);
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    WebRtc_Word32 channelId = -1;
+
+    if (!_channelManager.CreateChannel(channelId))
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_CREATED, kTraceError,
+                                       "CreateChannel() failed to allocate "
+                                           "memory for channel");
+        return -1;
+    }
+
+    bool destroyChannel(false);
+    {
+        voe::ScopedChannel sc(_channelManager, channelId);
+        voe::Channel* channelPtr = sc.ChannelPtr();
+        if (channelPtr == NULL)
+        {
+            _engineStatistics.SetLastError(VE_CHANNEL_NOT_CREATED, kTraceError,
+                                           "CreateChannel() failed to allocate"
+                                           " memory for channel");
+            return -1;
+        }
+        else if (channelPtr->SetEngineInformation(_engineStatistics,
+                                                  *_outputMixerPtr,
+                                                  *_transmitMixerPtr,
+                                                  *_moduleProcessThreadPtr,
+                                                  *_audioDevicePtr,
+                                                  _voiceEngineObserverPtr,
+                                                  &_callbackCritSect) != 0)
+        {
+            destroyChannel = true;
+            _engineStatistics.SetLastError(VE_CHANNEL_NOT_CREATED, kTraceError,
+                                           "CreateChannel() failed to "
+                                           "associate engine and channel."
+                                           " Destroying channel.");
+        }
+        else if (channelPtr->Init() != 0)
+        {
+            destroyChannel = true;
+            _engineStatistics.SetLastError(VE_CHANNEL_NOT_CREATED, kTraceError,
+                                           "CreateChannel() failed to "
+                                           "initialize channel. Destroying"
+                                           " channel.");
+        }
+    }
+    if (destroyChannel)
+    {
+        _channelManager.DestroyChannel(channelId);
+        return -1;
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "CreateChannel() => %d", channelId);
+    return channelId;
+}
+
+int VoEBaseImpl::DeleteChannel(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "DeleteChannel(channel=%d)", channel);
+    CriticalSectionScoped cs(*_apiCritPtr);
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    {
+        voe::ScopedChannel sc(_channelManager, channel);
+        voe::Channel* channelPtr = sc.ChannelPtr();
+        if (channelPtr == NULL)
+        {
+            _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                           "DeleteChannel() failed to locate "
+                                           "channel");
+            return -1;
+        }
+    }
+
+    if (_channelManager.DestroyChannel(channel) != 0)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "DeleteChannel() failed to destroy "
+                                       "channel");
+        return -1;
+    }
+
+    if (StopSend() != 0)
+    {
+        return -1;
+    }
+
+    if (StopPlayout() != 0)
+    {
+        return -1;
+    }
+
+    return 0;
+}
+
+int VoEBaseImpl::SetLocalReceiver(int channel, int port, int RTCPport,
+                                  const char ipAddr[64],
+                                  const char multiCastAddr[64])
+{
+    //  Inititialize local receive sockets (RTP and RTCP).
+    //
+    //  The sockets are always first closed and then created again by this
+    //  function call. The created sockets are by default also used for
+    // transmission (unless source port is set in SetSendDestination).
+    //
+    //  Note that, sockets can also be created automatically if a user calls
+    //  SetSendDestination and StartSend without having called SetLocalReceiver
+    // first. The sockets are then created at the first packet transmission.
+
+    CriticalSectionScoped cs(*_apiCritPtr);
+    if (ipAddr == NULL && multiCastAddr == NULL)
+    {
+        WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                     "SetLocalReceiver(channel=%d, port=%d, RTCPport=%d)",
+                     channel, port, RTCPport);
+    }
+    else if (ipAddr != NULL && multiCastAddr == NULL)
+    {
+        WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                     "SetLocalReceiver(channel=%d, port=%d, RTCPport=%d, "
+                         "ipAddr=%s)", channel, port, RTCPport, ipAddr);
+    }
+    else if (ipAddr == NULL && multiCastAddr != NULL)
+    {
+        WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                     "SetLocalReceiver(channel=%d, port=%d, RTCPport=%d, "
+                         "multiCastAddr=%s)", channel, port, RTCPport,
+                     multiCastAddr);
+    }
+    else
+    {
+        WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                     "SetLocalReceiver(channel=%d, port=%d, RTCPport=%d, "
+                         "ipAddr=%s, multiCastAddr=%s)", channel, port,
+                     RTCPport, ipAddr, multiCastAddr);
+    }
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if ((port < 0) || (port > 65535))
+    {
+        _engineStatistics.SetLastError(VE_INVALID_PORT_NMBR, kTraceError,
+                                       "SetLocalReceiver() invalid RTP port");
+        return -1;
+    }
+    if (((RTCPport != kVoEDefault) && (RTCPport < 0)) || ((RTCPport
+            != kVoEDefault) && (RTCPport > 65535)))
+    {
+        _engineStatistics.SetLastError(VE_INVALID_PORT_NMBR, kTraceError,
+                                       "SetLocalReceiver() invalid RTCP port");
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "SetLocalReceiver() failed to locate "
+                                           "channel");
+        return -1;
+    }
+
+    // Cast RTCP port. In the RTP module 0 corresponds to RTP port + 1 in
+    // the module, which is the default.
+    WebRtc_UWord16 rtcpPortUW16(0);
+    if (RTCPport != kVoEDefault)
+    {
+        rtcpPortUW16 = static_cast<WebRtc_UWord16> (RTCPport);
+    }
+
+    return channelPtr->SetLocalReceiver(port, rtcpPortUW16, ipAddr,
+                                        multiCastAddr);
+#else
+    _engineStatistics.SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED,
+            kTraceWarning,
+            "SetLocalReceiver() VoE is built for "
+            "external transport");
+    return -1;
+#endif
+}
+
+int VoEBaseImpl::GetLocalReceiver(int channel, int& port, int& RTCPport,
+                                  char ipAddr[64])
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetLocalReceiver(channel=%d, ipAddr[]=?)", channel);
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "SetLocalReceiver() failed to locate "
+                                       "channel");
+        return -1;
+    }
+    WebRtc_Word32 ret = channelPtr->GetLocalReceiver(port, RTCPport, ipAddr);
+    if (ipAddr != NULL)
+    {
+        WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                     "GetLocalReceiver() => port=%d, RTCPport=%d, ipAddr=%s",
+                     port, RTCPport, ipAddr);
+    }
+    else
+    {
+        WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                     "GetLocalReceiver() => port=%d, RTCPport=%d", port,
+                     RTCPport);
+    }
+    return ret;
+#else
+    _engineStatistics.SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED,
+                                   kTraceWarning,
+                                   "SetLocalReceiver() VoE is built for "
+                                   "external transport");
+    return -1;
+#endif
+}
+
+int VoEBaseImpl::SetSendDestination(int channel, int port, const char* ipaddr,
+                                    int sourcePort, int RTCPport)
+{
+    WEBRTC_TRACE(
+                 kTraceApiCall,
+                 kTraceVoice,
+                 VoEId(_instanceId, -1),
+                 "SetSendDestination(channel=%d, port=%d, ipaddr=%s,"
+                 "sourcePort=%d, RTCPport=%d)",
+                 channel, port, ipaddr, sourcePort, RTCPport);
+    CriticalSectionScoped cs(*_apiCritPtr);
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "SetSendDestination() failed to locate "
+                                       "channel");
+        return -1;
+    }
+    if ((port < 0) || (port > 65535))
+    {
+        _engineStatistics.SetLastError(VE_INVALID_PORT_NMBR, kTraceError,
+                                       "SetSendDestination() invalid RTP port");
+        return -1;
+    }
+    if (((RTCPport != kVoEDefault) && (RTCPport < 0)) || ((RTCPport
+            != kVoEDefault) && (RTCPport > 65535)))
+    {
+        _engineStatistics.SetLastError(VE_INVALID_PORT_NMBR, kTraceError,
+                                       "SetSendDestination() invalid RTCP "
+                                       "port");
+        return -1;
+    }
+    if (((sourcePort != kVoEDefault) && (sourcePort < 0)) || ((sourcePort
+            != kVoEDefault) && (sourcePort > 65535)))
+    {
+        _engineStatistics.SetLastError(VE_INVALID_PORT_NMBR, kTraceError,
+                                       "SetSendDestination() invalid source "
+                                       "port");
+        return -1;
+    }
+
+    // Cast RTCP port. In the RTP module 0 corresponds to RTP port + 1 in the
+    // module, which is the default.
+    WebRtc_UWord16 rtcpPortUW16(0);
+    if (RTCPport != kVoEDefault)
+    {
+        rtcpPortUW16 = static_cast<WebRtc_UWord16> (RTCPport);
+        WEBRTC_TRACE(
+                     kTraceInfo,
+                     kTraceVoice,
+                     VoEId(_instanceId, channel),
+                     "SetSendDestination() non default RTCP port %u will be "
+                     "utilized",
+                     rtcpPortUW16);
+    }
+
+    return channelPtr->SetSendDestination(port, ipaddr, sourcePort,
+                                          rtcpPortUW16);
+#else
+    _engineStatistics.SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED,
+                                   kTraceWarning,
+                                   "SetSendDestination() VoE is built for "
+                                   "external transport");
+    return -1;
+#endif
+}
+
+int VoEBaseImpl::GetSendDestination(int channel, int& port, char ipAddr[64],
+                                    int& sourcePort, int& RTCPport)
+{
+    WEBRTC_TRACE(
+                 kTraceApiCall,
+                 kTraceVoice,
+                 VoEId(_instanceId, -1),
+                 "GetSendDestination(channel=%d, ipAddr[]=?, sourcePort=?,"
+                 "RTCPport=?)",
+                 channel);
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "GetSendDestination() failed to locate "
+                                       "channel");
+        return -1;
+    }
+    WebRtc_Word32 ret = channelPtr->GetSendDestination(port, ipAddr,
+                                                       sourcePort, RTCPport);
+    if (ipAddr != NULL)
+    {
+        WEBRTC_TRACE(
+                     kTraceStateInfo,
+                     kTraceVoice,
+                     VoEId(_instanceId, -1),
+                     "GetSendDestination() => port=%d, RTCPport=%d, ipAddr=%s, "
+                     "sourcePort=%d, RTCPport=%d",
+                     port, RTCPport, ipAddr, sourcePort, RTCPport);
+    }
+    else
+    {
+        WEBRTC_TRACE(
+                     kTraceStateInfo,
+                     kTraceVoice,
+                     VoEId(_instanceId, -1),
+                     "GetSendDestination() => port=%d, RTCPport=%d, "
+                     "sourcePort=%d, RTCPport=%d",
+                     port, RTCPport, sourcePort, RTCPport);
+    }
+    return ret;
+#else
+    _engineStatistics.SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED,
+                                   kTraceWarning,
+                                   "GetSendDestination() VoE is built for "
+                                   "external transport");
+    return -1;
+#endif
+}
+
+int VoEBaseImpl::StartReceive(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "StartReceive(channel=%d)", channel);
+    CriticalSectionScoped cs(*_apiCritPtr);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "StartReceive() failed to locate "
+                                       "channel");
+        return -1;
+    }
+    return channelPtr->StartReceiving();
+}
+
+int VoEBaseImpl::StopReceive(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "StopListen(channel=%d)", channel);
+    CriticalSectionScoped cs(*_apiCritPtr);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "SetLocalReceiver() failed to locate "
+                                       "channel");
+        return -1;
+    }
+    return channelPtr->StopReceiving();
+}
+
+int VoEBaseImpl::StartPlayout(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "StartPlayout(channel=%d)", channel);
+    CriticalSectionScoped cs(*_apiCritPtr);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "StartPlayout() failed to locate "
+                                       "channel");
+        return -1;
+    }
+    if (channelPtr->Playing())
+    {
+        return 0;
+    }
+    if (StartPlayout() != 0)
+    {
+        _engineStatistics.SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR,
+                                       kTraceError,
+                                       "StartPlayout() failed to start "
+                                       "playout");
+        return -1;
+    }
+    return channelPtr->StartPlayout();
+}
+
+int VoEBaseImpl::StopPlayout(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "StopPlayout(channel=%d)", channel);
+    CriticalSectionScoped cs(*_apiCritPtr);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "StopPlayout() failed to locate "
+                                       "channel");
+        return -1;
+    }
+    if (channelPtr->StopPlayout() != 0)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                     "StopPlayout() failed to stop playout for channel %d",
+                     channel);
+    }
+    return StopPlayout();
+}
+
+int VoEBaseImpl::StartSend(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "StartSend(channel=%d)", channel);
+    CriticalSectionScoped cs(*_apiCritPtr);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "StartSend() failed to locate channel");
+        return -1;
+    }
+    if (channelPtr->Sending())
+    {
+        return 0;
+    }
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    if (!channelPtr->ExternalTransport()
+            && !channelPtr->SendSocketsInitialized())
+    {
+        _engineStatistics.SetLastError(VE_DESTINATION_NOT_INITED, kTraceError,
+                                       "StartSend() must set send destination "
+                                       "first");
+        return -1;
+    }
+#endif
+    if (StartSend() != 0)
+    {
+        _engineStatistics.SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR,
+                                       kTraceError,
+                                       "StartSend() failed to start recording");
+        return -1;
+    }
+    return channelPtr->StartSend();
+}
+
+int VoEBaseImpl::StopSend(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "StopSend(channel=%d)", channel);
+    CriticalSectionScoped cs(*_apiCritPtr);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "StopSend() failed to locate channel");
+        return -1;
+    }
+    if (channelPtr->StopSend() != 0)
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                     "StopSend() failed to stop sending for channel %d",
+                     channel);
+    }
+    return StopSend();
+}
+
+int VoEBaseImpl::GetVersion(char version[1024])
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetVersion(version=?)");
+    assert(kVoiceEngineVersionMaxMessageSize == 1024);
+
+    if (version == NULL)
+    {
+        _engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError);
+        return (-1);
+    }
+
+    char versionBuf[kVoiceEngineVersionMaxMessageSize];
+    char* versionPtr = versionBuf;
+
+    WebRtc_Word32 len = 0;
+    WebRtc_Word32 accLen = 0;
+
+    len = AddVoEVersion(versionPtr);
+    if (len == -1)
+    {
+        return -1;
+    }
+    versionPtr += len;
+    accLen += len;
+    assert(accLen < kVoiceEngineVersionMaxMessageSize);
+
+    len = AddBuildInfo(versionPtr);
+    if (len == -1)
+    {
+        return -1;
+    }
+    versionPtr += len;
+    accLen += len;
+    assert(accLen < kVoiceEngineVersionMaxMessageSize);
+
+#ifdef WEBRTC_EXTERNAL_TRANSPORT
+    len = AddExternalTransportBuild(versionPtr);
+    if (len == -1)
+    {
+        return -1;
+    }
+    versionPtr += len;
+    accLen += len;
+    assert(accLen < kVoiceEngineVersionMaxMessageSize);
+#endif
+
+#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+    len = AddExternalRecAndPlayoutBuild(versionPtr);
+    if (len == -1)
+    {
+        return -1;
+    }
+    versionPtr += len;
+    accLen += len;
+    assert(accLen < kVoiceEngineVersionMaxMessageSize);
+#endif
+
+    len = AddADMVersion(versionPtr);
+    if (len == -1)
+    {
+        return -1;
+    }
+    versionPtr += len;
+    accLen += len;
+    assert(accLen < kVoiceEngineVersionMaxMessageSize);
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    len = AddSocketModuleVersion(versionPtr);
+    if (len == -1)
+    {
+        return -1;
+    }
+    versionPtr += len;
+    accLen += len;
+    assert(accLen < kVoiceEngineVersionMaxMessageSize);
+#endif
+
+#ifdef WEBRTC_SRTP
+    len = AddSRTPModuleVersion(versionPtr);
+    if (len == -1)
+    {
+        return -1;
+    }
+    versionPtr += len;
+    accLen += len;
+    assert(accLen < kVoiceEngineVersionMaxMessageSize);
+#endif
+
+    len = AddRtpRtcpModuleVersion(versionPtr);
+    if (len == -1)
+    {
+        return -1;
+    }
+    versionPtr += len;
+    accLen += len;
+    assert(accLen < kVoiceEngineVersionMaxMessageSize);
+
+    len = AddConferenceMixerVersion(versionPtr);
+    if (len == -1)
+    {
+        return -1;
+    }
+    versionPtr += len;
+    accLen += len;
+    assert(accLen < kVoiceEngineVersionMaxMessageSize);
+
+    len = AddAudioProcessingModuleVersion(versionPtr);
+    if (len == -1)
+    {
+        return -1;
+    }
+    versionPtr += len;
+    accLen += len;
+    assert(accLen < kVoiceEngineVersionMaxMessageSize);
+
+    len = AddACMVersion(versionPtr);
+    if (len == -1)
+    {
+        return -1;
+    }
+    versionPtr += len;
+    accLen += len;
+    assert(accLen < kVoiceEngineVersionMaxMessageSize);
+
+    len = AddSPLIBVersion(versionPtr);
+    if (len == -1)
+    {
+        return -1;
+    }
+    versionPtr += len;
+    accLen += len;
+    assert(accLen < kVoiceEngineVersionMaxMessageSize);
+
+    memcpy(version, versionBuf, accLen);
+    version[accLen] = '\0';
+
+    // to avoid the truncation in the trace, split the string into parts
+    char partOfVersion[256];
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetVersion() =>");
+    for (int partStart = 0; partStart < accLen;)
+    {
+        memset(partOfVersion, 0, sizeof(partOfVersion));
+        int partEnd = partStart + 180;
+        while (version[partEnd] != '\n' && version[partEnd] != '\0')
+        {
+            partEnd--;
+        }
+        if (partEnd < accLen)
+        {
+            memcpy(partOfVersion, &version[partStart], partEnd - partStart);
+        }
+        else
+        {
+            memcpy(partOfVersion, &version[partStart], accLen - partStart);
+        }
+        partStart = partEnd;
+        WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                     "%s", partOfVersion);
+    }
+
+    return 0;
+}
+
+WebRtc_Word32 VoEBaseImpl::AddBuildInfo(char* str) const
+{
+    return sprintf(str, "Build: %s\n", BUILDINFO);
+}
+
+WebRtc_Word32 VoEBaseImpl::AddVoEVersion(char* str) const
+{
+    return sprintf(str, "VoiceEngine 4.1.0\n");
+}
+
+WebRtc_Word32 VoEBaseImpl::AddSPLIBVersion(char* str) const
+{
+    char version[16];
+    unsigned int len(16);
+    WebRtcSpl_get_version(version, len);
+    return sprintf(str, "SPLIB\t%s\n", version);
+}
+
+#ifdef WEBRTC_EXTERNAL_TRANSPORT
+WebRtc_Word32 VoEBaseImpl::AddExternalTransportBuild(char* str) const
+{
+    return sprintf(str, "External transport build\n");
+}
+#endif
+
+#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+WebRtc_Word32 VoEBaseImpl::AddExternalRecAndPlayoutBuild(char* str) const
+{
+    return sprintf(str, "External recording and playout build\n");
+}
+#endif
+
+WebRtc_Word32 VoEBaseImpl::AddModuleVersion(Module* module, char* str) const
+{
+    WebRtc_Word8 version[kVoiceEngineMaxModuleVersionSize];
+    WebRtc_UWord32 remainingBufferInBytes(kVoiceEngineMaxModuleVersionSize);
+    WebRtc_UWord32 position(0);
+    if (module->Version(version, remainingBufferInBytes, position) == 0)
+    {
+        return sprintf(str, "%s\n", version);
+    }
+    return -1;
+}
+
+WebRtc_Word32 VoEBaseImpl::AddADMVersion(char* str) const
+{
+    AudioDeviceModule* admPtr(_audioDevicePtr);
+    if (_audioDevicePtr == NULL)
+    {
+        admPtr = AudioDeviceModule::Create(-1);
+    }
+    int len = AddModuleVersion(admPtr, str);
+    if (_audioDevicePtr == NULL)
+    {
+        AudioDeviceModule::Destroy(admPtr);
+    }
+    return len;
+}
+
+int VoEBaseImpl::AddAudioProcessingModuleVersion(char* str) const
+{
+    AudioProcessing* vpmPtr(_audioProcessingModulePtr);
+    if (_audioProcessingModulePtr == NULL)
+    {
+        vpmPtr = AudioProcessing::Create(-1);
+    }
+    int len = AddModuleVersion(vpmPtr, str);
+    if (_audioProcessingModulePtr == NULL)
+    {
+        AudioProcessing::Destroy(vpmPtr);
+    }
+    return len;
+}
+
+WebRtc_Word32 VoEBaseImpl::AddACMVersion(char* str) const
+{
+    AudioCodingModule* acmPtr = AudioCodingModule::Create(-1);
+    int len = AddModuleVersion(acmPtr, str);
+    AudioCodingModule::Destroy(acmPtr);
+    return len;
+}
+
+WebRtc_Word32 VoEBaseImpl::AddConferenceMixerVersion(char* str) const
+{
+    AudioConferenceMixer* mixerPtr =
+            AudioConferenceMixer::CreateAudioConferenceMixer(-1);
+    int len = AddModuleVersion(mixerPtr, str);
+    delete mixerPtr;
+    return len;
+}
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+WebRtc_Word32 VoEBaseImpl::AddSocketModuleVersion(char* str) const
+{
+    WebRtc_UWord8 numSockThreads(1);
+    UdpTransport* socketPtr = UdpTransport::Create(-1, numSockThreads);
+    int len = AddModuleVersion(socketPtr, str);
+    UdpTransport::Destroy(socketPtr);
+    return len;
+}
+#endif
+
+#ifdef WEBRTC_SRTP
+WebRtc_Word32 VoEBaseImpl::AddSRTPModuleVersion(char* str) const
+{
+    SrtpModule* srtpPtr = SrtpModule::CreateSrtpModule(-1);
+    int len = AddModuleVersion(srtpPtr, str);
+    SrtpModule::DestroySrtpModule(srtpPtr);
+    return len;
+}
+#endif
+
+WebRtc_Word32 VoEBaseImpl::AddRtpRtcpModuleVersion(char* str) const
+{
+    RtpRtcp* rtpRtcpPtr = RtpRtcp::CreateRtpRtcp(-1, true);
+    int len = AddModuleVersion(rtpRtcpPtr, str);
+    RtpRtcp::DestroyRtpRtcp(rtpRtcpPtr);
+    return len;
+}
+
+int VoEBaseImpl::LastError()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "LastError()");
+    return (_engineStatistics.LastError());
+}
+
+
+int VoEBaseImpl::SetNetEQPlayoutMode(int channel, NetEqModes mode)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetNetEQPlayoutMode(channel=%i, mode=%i)", channel, mode);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "SetNetEQPlayoutMode() failed to locate"
+                                       " channel");
+        return -1;
+    }
+    return channelPtr->SetNetEQPlayoutMode(mode);
+}
+
+int VoEBaseImpl::GetNetEQPlayoutMode(int channel, NetEqModes& mode)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetNetEQPlayoutMode(channel=%i, mode=?)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "GetNetEQPlayoutMode() failed to locate"
+                                       " channel");
+        return -1;
+    }
+    return channelPtr->GetNetEQPlayoutMode(mode);
+}
+
+int VoEBaseImpl::SetNetEQBGNMode(int channel, NetEqBgnModes mode)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetNetEQBGNMode(channel=%i, mode=%i)", channel, mode);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "SetNetEQBGNMode() failed to locate "
+                                       "channel");
+        return -1;
+    }
+    return channelPtr->SetNetEQBGNMode(mode);
+}
+
+int VoEBaseImpl::GetNetEQBGNMode(int channel, NetEqBgnModes& mode)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetNetEQBGNMode(channel=%i, mode=?)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "GetNetEQBGNMode() failed to locate "
+                                       "channel");
+        return -1;
+    }
+    return channelPtr->GetNetEQBGNMode(mode);
+}
+
+int VoEBaseImpl::SetOnHoldStatus(int channel, bool enable, OnHoldModes mode)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetOnHoldStatus(channel=%d, enable=%d, mode=%d)", channel,
+                 enable, mode);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "SetOnHoldStatus() failed to locate "
+                                       "channel");
+        return -1;
+    }
+    return channelPtr->SetOnHoldStatus(enable, mode);
+}
+
+int VoEBaseImpl::GetOnHoldStatus(int channel, bool& enabled, OnHoldModes& mode)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetOnHoldStatus(channel=%d, enabled=?, mode=?)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "GetOnHoldStatus() failed to locate "
+                                       "channel");
+        return -1;
+    }
+    return channelPtr->GetOnHoldStatus(enabled, mode);
+}
+
+WebRtc_Word32 VoEBaseImpl::StartPlayout()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "VoEBaseImpl::StartPlayout()");
+    if (_audioDevicePtr->Playing())
+    {
+        return 0;
+    }
+    if (!_externalPlayout)
+    {
+        if (_audioDevicePtr->InitPlayout() != 0)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
+                         "StartPlayout() failed to initialize playout");
+            return -1;
+        }
+        if (_audioDevicePtr->StartPlayout() != 0)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
+                         "StartPlayout() failed to start playout");
+            return -1;
+        }
+    }
+    return 0;
+}
+
+WebRtc_Word32 VoEBaseImpl::StopPlayout()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "VoEBaseImpl::StopPlayout()");
+
+    WebRtc_Word32 numOfChannels = _channelManager.NumOfChannels();
+    if (numOfChannels <= 0)
+    {
+        return 0;
+    }
+
+    WebRtc_UWord16 nChannelsPlaying(0);
+    WebRtc_Word32* channelsArray = new WebRtc_Word32[numOfChannels];
+
+    // Get number of playing channels
+    _channelManager.GetChannelIds(channelsArray, numOfChannels);
+    for (int i = 0; i < numOfChannels; i++)
+    {
+        voe::ScopedChannel sc(_channelManager, channelsArray[i]);
+        voe::Channel* chPtr = sc.ChannelPtr();
+        if (chPtr)
+        {
+            if (chPtr->Playing())
+            {
+                nChannelsPlaying++;
+            }
+        }
+    }
+    delete[] channelsArray;
+
+    // Stop audio-device playing if no channel is playing out
+    if (nChannelsPlaying == 0)
+    {
+        if (_audioDevicePtr->StopPlayout() != 0)
+        {
+            _engineStatistics.SetLastError(VE_CANNOT_STOP_PLAYOUT, kTraceError,
+                                           "StopPlayout() failed to stop "
+                                           "playout");
+            return -1;
+        }
+    }
+    return 0;
+}
+
+WebRtc_Word32 VoEBaseImpl::StartSend()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "VoEBaseImpl::StartSend()");
+    if (_audioDevicePtr->Recording())
+    {
+        return 0;
+    }
+    if (!_externalRecording)
+    {
+        if (_audioDevicePtr->InitRecording() != 0)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
+                         "StartSend() failed to initialize recording");
+            return -1;
+        }
+        if (_audioDevicePtr->StartRecording() != 0)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
+                         "StartSend() failed to start recording");
+            return -1;
+        }
+    }
+
+    return 0;
+}
+
+WebRtc_Word32 VoEBaseImpl::StopSend()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "VoEBaseImpl::StopSend()");
+
+    if ((NumOfSendingChannels() == 0) && !_transmitMixerPtr->IsRecordingMic())
+    {
+        // Stop audio-device recording if no channel is recording
+        if (_audioDevicePtr->StopRecording() != 0)
+        {
+            _engineStatistics.SetLastError(VE_CANNOT_STOP_RECORDING,
+                                           kTraceError,
+                                           "StopSend() failed to stop "
+                                           "recording");
+            return -1;
+        }
+        _transmitMixerPtr->StopSend();
+    }
+
+    return 0;
+}
+
+WebRtc_Word32 VoEBaseImpl::TerminateInternal()
+{
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "VoEBaseImpl::TerminateInternal()");
+
+    // Delete any remaining channel objects
+    WebRtc_Word32 numOfChannels = _channelManager.NumOfChannels();
+    if (numOfChannels > 0)
+    {
+        WebRtc_Word32* channelsArray = new WebRtc_Word32[numOfChannels];
+        _channelManager.GetChannelIds(channelsArray, numOfChannels);
+        for (int i = 0; i < numOfChannels; i++)
+        {
+            DeleteChannel(channelsArray[i]);
+        }
+        delete[] channelsArray;
+    }
+
+    if (_moduleProcessThreadPtr)
+    {
+        if (_audioDevicePtr)
+        {
+            if (_moduleProcessThreadPtr->DeRegisterModule(_audioDevicePtr) != 0)
+            {
+                _engineStatistics.SetLastError(VE_THREAD_ERROR, kTraceError,
+                                               "TerminateInternal() failed to "
+                                               "deregister ADM");
+            }
+        }
+        if (_moduleProcessThreadPtr->Stop() != 0)
+        {
+            _engineStatistics.SetLastError(VE_THREAD_ERROR, kTraceError,
+                                           "TerminateInternal() failed to stop "
+                                           "module process thread");
+        }
+    }
+
+    // Audio Device Module
+
+    if (_audioDevicePtr != NULL)
+    {
+        if (_audioDevicePtr->StopPlayout() != 0)
+        {
+            _engineStatistics.SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
+                                           "TerminateInternal() failed to stop "
+                                           "playout");
+        }
+        if (_audioDevicePtr->StopRecording() != 0)
+        {
+            _engineStatistics.SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
+                                           "TerminateInternal() failed to stop "
+                                           "recording");
+        }
+        _audioDevicePtr->RegisterEventObserver(NULL);
+        _audioDevicePtr->RegisterAudioCallback(NULL);
+        if (_audioDevicePtr->Terminate() != 0)
+        {
+            _engineStatistics.SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR,
+                                           kTraceError,
+                                           "TerminateInternal() failed to "
+                                           "terminate the ADM");
+        }
+        if (!_usingExternalAudioDevice)
+        {
+            AudioDeviceModule::Destroy(_audioDevicePtr);
+            _audioDevicePtr = NULL;
+        }
+    }
+
+    // AP module
+
+    if (_audioProcessingModulePtr != NULL)
+    {
+        _transmitMixerPtr->SetAudioProcessingModule(NULL);
+        AudioProcessing::Destroy(_audioProcessingModulePtr);
+        _audioProcessingModulePtr = NULL;
+    }
+
+    return _engineStatistics.SetUnInitialized();
+}
+
+} // namespace webrtc
diff --git a/voice_engine/main/source/voe_base_impl.h b/voice_engine/main/source/voe_base_impl.h
new file mode 100644
index 0000000..a7c9faf
--- /dev/null
+++ b/voice_engine/main/source/voe_base_impl.h
@@ -0,0 +1,166 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_BASE_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_BASE_IMPL_H
+
+#include "voe_base.h"
+
+#include "ref_count.h"
+#include "shared_data.h"
+
+namespace webrtc
+{
+
+class ProcessThread;
+
+class VoEBaseImpl: public virtual voe::SharedData,
+                   public VoEBase,
+                   public voe::RefCount,
+                   public AudioTransport,
+                   public AudioDeviceObserver
+{
+public:
+    virtual int Release();
+
+    virtual int RegisterVoiceEngineObserver(VoiceEngineObserver& observer);
+
+    virtual int DeRegisterVoiceEngineObserver();
+
+    virtual int RegisterAudioDeviceModule(AudioDeviceModule& adm);
+
+    virtual int DeRegisterAudioDeviceModule();
+
+    virtual int Init();
+
+    virtual int Terminate();
+
+    virtual int MaxNumOfChannels();
+
+    virtual int CreateChannel();
+
+    virtual int DeleteChannel(int channel);
+
+    virtual int SetLocalReceiver(int channel, int port,
+                                 int RTCPport = kVoEDefault,
+                                 const char ipAddr[64] = NULL,
+                                 const char multiCastAddr[64] = NULL);
+
+    virtual int GetLocalReceiver(int channel, int& port, int& RTCPport,
+                                 char ipAddr[64]);
+
+    virtual int SetSendDestination(int channel, int port,
+                                   const char ipAddr[64],
+                                   int sourcePort = kVoEDefault,
+                                   int RTCPport = kVoEDefault);
+
+    virtual int GetSendDestination(int channel,
+                                   int& port,
+                                   char ipAddr[64],
+                                   int& sourcePort,
+                                   int& RTCPport);
+
+    virtual int StartReceive(int channel);
+
+    virtual int StartPlayout(int channel);
+
+    virtual int StartSend(int channel);
+
+    virtual int StopReceive(int channel);
+
+    virtual int StopPlayout(int channel);
+
+    virtual int StopSend(int channel);
+
+    virtual int SetNetEQPlayoutMode(int channel, NetEqModes mode);
+
+    virtual int GetNetEQPlayoutMode(int channel, NetEqModes& mode);
+
+    virtual int SetNetEQBGNMode(int channel, NetEqBgnModes mode);
+
+    virtual int GetNetEQBGNMode(int channel, NetEqBgnModes& mode);
+
+
+    virtual int SetOnHoldStatus(int channel,
+                                bool enable,
+                                OnHoldModes mode = kHoldSendAndPlay);
+
+    virtual int GetOnHoldStatus(int channel, bool& enabled, OnHoldModes& mode);
+
+    virtual int GetVersion(char version[1024]);
+
+    virtual int LastError();
+
+    // AudioTransport
+    virtual WebRtc_Word32
+        RecordedDataIsAvailable(const WebRtc_Word8* audioSamples,
+                                const WebRtc_UWord32 nSamples,
+                                const WebRtc_UWord8 nBytesPerSample,
+                                const WebRtc_UWord8 nChannels,
+                                const WebRtc_UWord32 samplesPerSec,
+                                const WebRtc_UWord32 totalDelayMS,
+                                const WebRtc_Word32 clockDrift,
+                                const WebRtc_UWord32 currentMicLevel,
+                                WebRtc_UWord32& newMicLevel);
+
+    virtual WebRtc_Word32 NeedMorePlayData(const WebRtc_UWord32 nSamples,
+                                           const WebRtc_UWord8 nBytesPerSample,
+                                           const WebRtc_UWord8 nChannels,
+                                           const WebRtc_UWord32 samplesPerSec,
+                                           WebRtc_Word8* audioSamples,
+                                           WebRtc_UWord32& nSamplesOut);
+
+    // AudioDeviceObserver
+    virtual void OnErrorIsReported(const ErrorCode error);
+    virtual void OnWarningIsReported(const WarningCode warning);
+
+protected:
+    VoEBaseImpl();
+    virtual ~VoEBaseImpl();
+
+private:
+    WebRtc_Word32 StartPlayout();
+    WebRtc_Word32 StopPlayout();
+    WebRtc_Word32 StartSend();
+    WebRtc_Word32 StopSend();
+    WebRtc_Word32 TerminateInternal();
+
+    WebRtc_Word32 AddBuildInfo(char* str) const;
+    WebRtc_Word32 AddVoEVersion(char* str) const;
+#ifdef WEBRTC_EXTERNAL_TRANSPORT
+    WebRtc_Word32 AddExternalTransportBuild(char* str) const;
+#else
+    WebRtc_Word32 AddSocketModuleVersion(char* str) const;
+#endif
+#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+    WebRtc_Word32 AddExternalRecAndPlayoutBuild(char* str) const;
+#endif
+    WebRtc_Word32 AddModuleVersion(Module* module, char* str) const;
+    WebRtc_Word32 AddADMVersion(char* str) const;
+    int AddAudioProcessingModuleVersion(char* str) const;
+    WebRtc_Word32 AddACMVersion(char* str) const;
+    WebRtc_Word32 AddConferenceMixerVersion(char* str) const;
+#ifdef WEBRTC_SRTP
+    WebRtc_Word32 AddSRTPModuleVersion(char* str) const;
+#endif
+    WebRtc_Word32 AddRtpRtcpModuleVersion(char* str) const;
+    WebRtc_Word32 AddSPLIBVersion(char* str) const;
+
+    VoiceEngineObserver* _voiceEngineObserverPtr;
+    CriticalSectionWrapper& _callbackCritSect;
+
+    bool _voiceEngineObserver;
+    WebRtc_UWord32 _oldVoEMicLevel;
+    WebRtc_UWord32 _oldMicLevel;
+};
+
+} // namespace webrtc
+
+#endif  // WEBRTC_VOICE_ENGINE_VOE_BASE_IMPL_H
diff --git a/voice_engine/main/source/voe_call_report_impl.cc b/voice_engine/main/source/voe_call_report_impl.cc
new file mode 100644
index 0000000..20c32e5
--- /dev/null
+++ b/voice_engine/main/source/voe_call_report_impl.cc
@@ -0,0 +1,564 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_call_report_impl.h"
+
+#include "audio_processing.h"
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "file_wrapper.h"
+#include "trace.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+namespace webrtc
+{
+
+VoECallReport* VoECallReport::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
+    return NULL;
+#else
+    if (NULL == voiceEngine)
+    {
+        return NULL;
+    }
+    VoiceEngineImpl* s =
+            reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
+    VoECallReportImpl* d = s;
+    (*d)++;
+    return (d);
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
+
+VoECallReportImpl::VoECallReportImpl() :
+    _file(*FileWrapper::Create())
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
+                 "VoECallReportImpl() - ctor");
+}
+
+VoECallReportImpl::~VoECallReportImpl()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
+                 "~VoECallReportImpl() - dtor");
+    delete &_file;
+}
+
+int VoECallReportImpl::Release()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "VoECallReportImpl::Release()");
+    (*this)--;
+    int refCount = GetCount();
+    if (refCount < 0)
+    {
+        Reset();
+        _engineStatistics.SetLastError(VE_INTERFACE_NOT_FOUND,
+                                       kTraceWarning);
+        return (-1);
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "VoECallReportImpl reference counter = %d", refCount);
+    return (refCount);
+}
+
+int VoECallReportImpl::ResetCallReportStatistics(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "ResetCallReportStatistics(channel=%d)", channel);
+    ANDROID_NOT_SUPPORTED();IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    assert(_audioProcessingModulePtr != NULL);
+
+    int res1(0);
+    int res2(0);
+    bool levelMode =
+        _audioProcessingModulePtr->level_estimator()->is_enabled();
+    bool echoMode =
+        _audioProcessingModulePtr->echo_cancellation()->are_metrics_enabled();
+
+    // We always set the same mode for the level and echo
+    if (levelMode != echoMode)
+    {
+        _engineStatistics.SetLastError(VE_APM_ERROR, kTraceError,
+                                       "ResetCallReportStatistics() level mode "
+                                       "and echo mode are not the same");
+        return -1;
+    }
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "  current AudioProcessingModule metric currentState %d",
+                 levelMode);
+    // Reset the APM statistics
+    if ((_audioProcessingModulePtr->level_estimator()->Enable(true) != 0)
+      || (_audioProcessingModulePtr->echo_cancellation()->enable_metrics(true)
+      != 0))
+    {
+        _engineStatistics.SetLastError(VE_APM_ERROR, kTraceError,
+                                       "ResetCallReportStatistics() unable to "
+                                       "set the AudioProcessingModule metrics "
+                                       "state");
+        return -1;
+    }
+    // Restore metric states
+    _audioProcessingModulePtr->level_estimator()->Enable(levelMode);
+    _audioProcessingModulePtr->echo_cancellation()->enable_metrics(echoMode);
+
+    // Reset channel dependent statistics
+    if (channel != -1)
+    {
+        voe::ScopedChannel sc(_channelManager, channel);
+        voe::Channel* channelPtr = sc.ChannelPtr();
+        if (channelPtr == NULL)
+        {
+            _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                           "ResetCallReportStatistics() failed "
+                                           "to locate channel");
+            return -1;
+        }
+        channelPtr->ResetDeadOrAliveCounters();
+        channelPtr->ResetRTCPStatistics();
+    }
+    else
+    {
+        WebRtc_Word32 numOfChannels = _channelManager.NumOfChannels();
+        if (numOfChannels <= 0)
+        {
+            return 0;
+        }
+        WebRtc_Word32* channelsArray = new WebRtc_Word32[numOfChannels];
+        _channelManager.GetChannelIds(channelsArray, numOfChannels);
+        for (int i = 0; i < numOfChannels; i++)
+        {
+            voe::ScopedChannel sc(_channelManager, channelsArray[i]);
+            voe::Channel* channelPtr = sc.ChannelPtr();
+            if (channelPtr)
+            {
+                channelPtr->ResetDeadOrAliveCounters();
+                channelPtr->ResetRTCPStatistics();
+            }
+        }
+        delete[] channelsArray;
+    }
+
+    return 0;
+}
+
+int VoECallReportImpl::GetSpeechAndNoiseSummary(LevelStatistics& stats)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetSpeechAndNoiseSummary()");
+    ANDROID_NOT_SUPPORTED();IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    assert(_audioProcessingModulePtr != NULL);
+
+    return (GetSpeechAndNoiseSummaryInternal(stats));
+}
+
+int VoECallReportImpl::GetSpeechAndNoiseSummaryInternal(LevelStatistics& stats)
+{
+    int ret(0);
+    bool mode(false);
+    LevelEstimator::Metrics metrics;
+    LevelEstimator::Metrics reverseMetrics;
+
+    // Ensure that level metrics is enabled
+    mode = _audioProcessingModulePtr->level_estimator()->is_enabled();
+    if (mode != false)
+    {
+        ret = _audioProcessingModulePtr->level_estimator()->GetMetrics(
+            &metrics, &reverseMetrics);
+        if (ret != 0)
+        {
+            WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                       "  GetSpeechAndNoiseSummary(), AudioProcessingModule "
+                       "level metrics error");
+        }
+    }
+    else
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                   "  GetSpeechAndNoiseSummary(), AudioProcessingModule level "
+                   "metrics is not enabled");
+    }
+
+    if ((ret != 0) || (mode == false))
+    {
+        // Mark complete struct as invalid (-100 dBm0)
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                   "  unable to retrieve level metrics from the "
+                   "AudioProcessingModule");
+        stats.noise_rx.min = -100;
+        stats.noise_rx.max = -100;
+        stats.noise_rx.average = -100;
+        stats.speech_rx.min = -100;
+        stats.speech_rx.max = -100;
+        stats.speech_rx.average = -100;
+        stats.noise_tx.min = -100;
+        stats.noise_tx.max = -100;
+        stats.noise_tx.average = -100;
+        stats.speech_tx.min = -100;
+        stats.speech_tx.max = -100;
+        stats.speech_tx.average = -100;
+    }
+    else
+    {
+        // Deliver output results to user
+        stats.noise_rx.min = reverseMetrics.noise.minimum;
+        stats.noise_rx.max = reverseMetrics.noise.maximum;
+        stats.noise_rx.average = reverseMetrics.noise.average;
+        WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                   "  noise_rx: min=%d, max=%d, avg=%d", stats.noise_rx.min,
+                   stats.noise_rx.max, stats.noise_rx.average);
+
+        stats.noise_tx.min = metrics.noise.minimum;
+        stats.noise_tx.max = metrics.noise.maximum;
+        stats.noise_tx.average = metrics.noise.average;
+        WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                   "  noise_tx: min=%d, max=%d, avg=%d", stats.noise_tx.min,
+                   stats.noise_tx.max, stats.noise_tx.average);
+
+        stats.speech_rx.min = reverseMetrics.speech.minimum;
+        stats.speech_rx.max = reverseMetrics.speech.maximum;
+        stats.speech_rx.average = reverseMetrics.speech.average;
+        WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                   "  speech_rx: min=%d, max=%d, avg=%d", stats.speech_rx.min,
+                   stats.speech_rx.max, stats.speech_rx.average);
+
+        stats.speech_tx.min = metrics.speech.minimum;
+        stats.speech_tx.max = metrics.speech.maximum;
+        stats.speech_tx.average = metrics.speech.average;
+        WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                   "  speech_tx: min=%d, max=%d, avg=%d", stats.speech_tx.min,
+                   stats.speech_tx.max, stats.speech_tx.average);
+    }
+    return 0;
+}
+
+int VoECallReportImpl::GetEchoMetricSummary(EchoStatistics& stats)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetEchoMetricSummary()");
+    ANDROID_NOT_SUPPORTED();IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    assert(_audioProcessingModulePtr != NULL);
+
+    return (GetEchoMetricSummaryInternal(stats));
+}
+
+int VoECallReportImpl::GetEchoMetricSummaryInternal(EchoStatistics& stats)
+{
+    // Retrieve echo metrics from the AudioProcessingModule
+    int ret(0);
+    bool mode(false);
+    EchoCancellation::Metrics metrics;
+
+    // Ensure that echo metrics is enabled
+
+    mode =
+        _audioProcessingModulePtr->echo_cancellation()->are_metrics_enabled();
+    if (mode != false)
+    {
+        ret =
+          _audioProcessingModulePtr->echo_cancellation()->GetMetrics(&metrics);
+        if (ret != 0)
+        {
+            WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                       "  AudioProcessingModule GetMetrics() => error");
+        }
+    }
+    else
+    {
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                   "  AudioProcessingModule echo metrics is not enabled");
+    }
+
+    if ((ret != 0) || (mode == false))
+    {
+        // Mark complete struct as invalid (-100 dB)
+        WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+                   "  unable to retrieve echo metrics from the "
+                   "AudioProcessingModule");
+        stats.erl.min = -100;
+        stats.erl.max = -100;
+        stats.erl.average = -100;
+        stats.erle.min = -100;
+        stats.erle.max = -100;
+        stats.erle.average = -100;
+        stats.rerl.min = -100;
+        stats.rerl.max = -100;
+        stats.rerl.average = -100;
+        stats.a_nlp.min = -100;
+        stats.a_nlp.max = -100;
+        stats.a_nlp.average = -100;
+    }
+    else
+    {
+
+        // Deliver output results to user
+        stats.erl.min = metrics.echo_return_loss.minimum;
+        stats.erl.max = metrics.echo_return_loss.maximum;
+        stats.erl.average = metrics.echo_return_loss.average;
+        WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                   "  erl: min=%d, max=%d, avg=%d", stats.erl.min,
+                   stats.erl.max, stats.erl.average);
+
+        stats.erle.min = metrics.echo_return_loss_enhancement.minimum;
+        stats.erle.max = metrics.echo_return_loss_enhancement.maximum;
+        stats.erle.average = metrics.echo_return_loss_enhancement.average;
+        WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                   "  erle: min=%d, max=%d, avg=%d", stats.erle.min,
+                   stats.erle.max, stats.erle.average);
+
+        stats.rerl.min = metrics.residual_echo_return_loss.minimum;
+        stats.rerl.max = metrics.residual_echo_return_loss.maximum;
+        stats.rerl.average = metrics.residual_echo_return_loss.average;
+        WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                   "  rerl: min=%d, max=%d, avg=%d", stats.rerl.min,
+                   stats.rerl.max, stats.rerl.average);
+
+        stats.a_nlp.min = metrics.a_nlp.minimum;
+        stats.a_nlp.max = metrics.a_nlp.maximum;
+        stats.a_nlp.average = metrics.a_nlp.average;
+        WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                   "  a_nlp: min=%d, max=%d, avg=%d", stats.a_nlp.min,
+                   stats.a_nlp.max, stats.a_nlp.average);
+    }
+    return 0;
+}
+
+int VoECallReportImpl::GetRoundTripTimeSummary(int channel, StatVal& delaysMs)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetRoundTripTimeSummary()");
+    ANDROID_NOT_SUPPORTED();IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "GetRoundTripTimeSummary() failed to "
+                                       "locate channel");
+        return -1;
+    }
+
+    return channelPtr->GetRoundTripTimeSummary(delaysMs);
+}
+
+int VoECallReportImpl::GetDeadOrAliveSummary(int channel,
+                                             int& numOfDeadDetections,
+                                             int& numOfAliveDetections)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetDeadOrAliveSummary(channel=%d)", channel);
+    ANDROID_NOT_SUPPORTED();IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    return (GetDeadOrAliveSummaryInternal(channel, numOfDeadDetections,
+                                          numOfAliveDetections));
+}
+
+int VoECallReportImpl::GetDeadOrAliveSummaryInternal(int channel,
+                                                     int& numOfDeadDetections,
+                                                     int& numOfAliveDetections)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetDeadOrAliveSummary(channel=%d)", channel);
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "GetRoundTripTimeSummary() failed to "
+                                       "locate channel");
+        return -1;
+    }
+
+    return channelPtr->GetDeadOrAliveCounters(numOfDeadDetections,
+                                              numOfAliveDetections);
+}
+
+int VoECallReportImpl::WriteReportToFile(const char* fileNameUTF8)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "WriteReportToFile(fileNameUTF8=%s)", fileNameUTF8);
+    ANDROID_NOT_SUPPORTED();IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    if (NULL == fileNameUTF8)
+    {
+        _engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                                       "WriteReportToFile() invalid filename");
+        return -1;
+    }
+
+    if (_file.Open())
+    {
+        _file.CloseFile();
+    }
+
+    // Open text file in write mode
+    if (_file.OpenFile(fileNameUTF8, false, false, true) != 0)
+    {
+        _engineStatistics.SetLastError(VE_BAD_FILE, kTraceError,
+                                       "WriteReportToFile() unable to open the "
+                                       "file");
+        return -1;
+    }
+
+    // Summarize information and add it to the open file
+    //
+    _file.WriteText("WebRtc VoiceEngine Call Report\n");
+    _file.WriteText("==============================\n");
+    _file.WriteText("\nNetwork Packet Round Trip Time (RTT)\n");
+    _file.WriteText("------------------------------------\n\n");
+
+    WebRtc_Word32 numOfChannels = _channelManager.NumOfChannels();
+    if (numOfChannels <= 0)
+    {
+        return 0;
+    }
+    WebRtc_Word32* channelsArray = new WebRtc_Word32[numOfChannels];
+    _channelManager.GetChannelIds(channelsArray, numOfChannels);
+    for (int ch = 0; ch < numOfChannels; ch++)
+    {
+        voe::ScopedChannel sc(_channelManager, channelsArray[ch]);
+        voe::Channel* channelPtr = sc.ChannelPtr();
+        if (channelPtr)
+        {
+            StatVal delaysMs;
+            _file.WriteText("channel %d:\n", ch);
+            channelPtr->GetRoundTripTimeSummary(delaysMs);
+            _file.WriteText("  min:%5d [ms]\n", delaysMs.min);
+            _file.WriteText("  max:%5d [ms]\n", delaysMs.max);
+            _file.WriteText("  avg:%5d [ms]\n", delaysMs.average);
+        }
+    }
+
+    _file.WriteText("\nDead-or-Alive Connection Detections\n");
+    _file.WriteText("------------------------------------\n\n");
+
+    for (int ch = 0; ch < numOfChannels; ch++)
+    {
+        voe::ScopedChannel sc(_channelManager, channelsArray[ch]);
+        voe::Channel* channelPtr = sc.ChannelPtr();
+        if (channelPtr)
+        {
+            int nDead(0);
+            int nAlive(0);
+            _file.WriteText("channel %d:\n", ch);
+            GetDeadOrAliveSummary(ch, nDead, nAlive);
+            _file.WriteText("  #dead :%6d\n", nDead);
+            _file.WriteText("  #alive:%6d\n", nAlive);
+        }
+    }
+
+    delete[] channelsArray;
+
+    LevelStatistics stats;
+    GetSpeechAndNoiseSummary(stats);
+
+    _file.WriteText("\nLong-term Speech Levels\n");
+    _file.WriteText("-----------------------\n\n");
+
+    _file.WriteText("Transmitting side:\n");
+    _file.WriteText("  min:%5d [dBm0]\n", stats.speech_tx.min);
+    _file.WriteText("  max:%5d [dBm0]\n", stats.speech_tx.max);
+    _file.WriteText("  avg:%5d [dBm0]\n", stats.speech_tx.average);
+    _file.WriteText("\nReceiving side:\n");
+    _file.WriteText("  min:%5d [dBm0]\n", stats.speech_rx.min);
+    _file.WriteText("  max:%5d [dBm0]\n", stats.speech_rx.max);
+    _file.WriteText("  avg:%5d [dBm0]\n", stats.speech_rx.average);
+
+    _file.WriteText("\nLong-term Noise Levels\n");
+    _file.WriteText("----------------------\n\n");
+
+    _file.WriteText("Transmitting side:\n");
+    _file.WriteText("  min:%5d [dBm0]\n", stats.noise_tx.min);
+    _file.WriteText("  max:%5d [dBm0]\n", stats.noise_tx.max);
+    _file.WriteText("  avg:%5d [dBm0]\n", stats.noise_tx.average);
+    _file.WriteText("\nReceiving side:\n");
+    _file.WriteText("  min:%5d [dBm0]\n", stats.noise_rx.min);
+    _file.WriteText("  max:%5d [dBm0]\n", stats.noise_rx.max);
+    _file.WriteText("  avg:%5d [dBm0]\n", stats.noise_rx.average);
+
+    EchoStatistics echo;
+    GetEchoMetricSummary(echo);
+
+    _file.WriteText("\nEcho Metrics\n");
+    _file.WriteText("------------\n\n");
+
+    _file.WriteText("erl:\n");
+    _file.WriteText("  min:%5d [dB]\n", echo.erl.min);
+    _file.WriteText("  max:%5d [dB]\n", echo.erl.max);
+    _file.WriteText("  avg:%5d [dB]\n", echo.erl.average);
+    _file.WriteText("\nerle:\n");
+    _file.WriteText("  min:%5d [dB]\n", echo.erle.min);
+    _file.WriteText("  max:%5d [dB]\n", echo.erle.max);
+    _file.WriteText("  avg:%5d [dB]\n", echo.erle.average);
+    _file.WriteText("rerl:\n");
+    _file.WriteText("  min:%5d [dB]\n", echo.rerl.min);
+    _file.WriteText("  max:%5d [dB]\n", echo.rerl.max);
+    _file.WriteText("  avg:%5d [dB]\n", echo.rerl.average);
+    _file.WriteText("a_nlp:\n");
+    _file.WriteText("  min:%5d [dB]\n", echo.a_nlp.min);
+    _file.WriteText("  max:%5d [dB]\n", echo.a_nlp.max);
+    _file.WriteText("  avg:%5d [dB]\n", echo.a_nlp.average);
+
+    _file.WriteText("\n<END>");
+
+    _file.Flush();
+    _file.CloseFile();
+
+    return 0;
+}
+
+#endif  // WEBRTC_VOICE_ENGINE_CALL_REPORT_API
+
+} // namespace webrtc
diff --git a/voice_engine/main/source/voe_call_report_impl.h b/voice_engine/main/source/voe_call_report_impl.h
new file mode 100644
index 0000000..a955609
--- /dev/null
+++ b/voice_engine/main/source/voe_call_report_impl.h
@@ -0,0 +1,63 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_CALL_REPORT_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_CALL_REPORT_IMPL_H
+
+#include "voe_call_report.h"
+
+#include "ref_count.h"
+#include "shared_data.h"
+
+
+namespace webrtc
+{
+class FileWrapper;
+
+class VoECallReportImpl: public virtual voe::SharedData,
+                         public VoECallReport,
+                         public voe::RefCount
+{
+public:
+    virtual int Release();
+
+    virtual int ResetCallReportStatistics(int channel);
+
+    virtual int GetSpeechAndNoiseSummary(LevelStatistics& stats);
+
+    virtual int GetEchoMetricSummary(EchoStatistics& stats);
+
+    virtual int GetRoundTripTimeSummary(int channel,
+                                        StatVal& delaysMs);
+
+    virtual int GetDeadOrAliveSummary(int channel, int& numOfDeadDetections,
+                                      int& numOfAliveDetections);
+
+    virtual int WriteReportToFile(const char* fileNameUTF8);
+
+protected:
+    VoECallReportImpl();
+    virtual ~VoECallReportImpl();
+
+private:
+    int GetDeadOrAliveSummaryInternal(int channel,
+                                      int& numOfDeadDetections,
+                                      int& numOfAliveDetections);
+
+    int GetEchoMetricSummaryInternal(EchoStatistics& stats);
+
+    int GetSpeechAndNoiseSummaryInternal(LevelStatistics& stats);
+
+    FileWrapper& _file;
+};
+
+} // namespace webrtc
+
+#endif  // WEBRTC_VOICE_ENGINE_VOE_CALL_REPORT_IMPL_H
diff --git a/voice_engine/main/source/voe_codec_impl.cc b/voice_engine/main/source/voe_codec_impl.cc
new file mode 100644
index 0000000..cf0d2d0
--- /dev/null
+++ b/voice_engine/main/source/voe_codec_impl.cc
@@ -0,0 +1,717 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_codec_impl.h"
+
+#include "audio_coding_module.h"
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "trace.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+namespace webrtc
+{
+
+VoECodec* VoECodec::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_CODEC_API
+    return NULL;
+#else
+    if (NULL == voiceEngine)
+    {
+        return NULL;
+    }
+    VoiceEngineImpl* s =
+            reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
+    VoECodecImpl* d = s;
+    (*d)++;
+    return (d);
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_CODEC_API
+
+VoECodecImpl::VoECodecImpl()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
+                 "VoECodecImpl() - ctor");
+}
+
+VoECodecImpl::~VoECodecImpl()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
+                 "~VoECodecImpl() - dtor");
+}
+
+int VoECodecImpl::Release()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "VoECodecImpl::Release()");
+    (*this)--;
+    int refCount = GetCount();
+    if (refCount < 0)
+    {
+        Reset();
+        _engineStatistics.SetLastError(VE_INTERFACE_NOT_FOUND,
+                                       kTraceWarning);
+        return (-1);
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "VoECodecImpl reference counter = %d", refCount);
+    return (refCount);
+}
+
+int VoECodecImpl::NumOfCodecs()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "NumOfCodecs()");
+
+    // Number of supported codecs in the ACM
+    WebRtc_UWord8 nSupportedCodecs = AudioCodingModule::NumberOfCodecs();
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "NumOfCodecs() => %u", nSupportedCodecs);
+    return (nSupportedCodecs);
+}
+
+int VoECodecImpl::GetCodec(int index, CodecInst& codec)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetCodec(index=%d, codec=?)", index);
+    CodecInst acmCodec;
+    if (AudioCodingModule::Codec(index, (CodecInst&) acmCodec)
+            == -1)
+    {
+        _engineStatistics.SetLastError(VE_INVALID_LISTNR, kTraceError,
+                                       "GetCodec() invalid index");
+        return -1;
+    }
+    ACMToExternalCodecRepresentation(codec, acmCodec);
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetCodec() => plname=%s, pacsize=%d, plfreq=%d, pltype=%d, "
+                 "channels=%d, rate=%d", codec.plname, codec.pacsize,
+                   codec.plfreq, codec.pltype, codec.channels, codec.rate);
+    return 0;
+}
+
+int VoECodecImpl::SetSendCodec(int channel, const CodecInst& codec)
+{
+    CodecInst copyCodec;
+    ExternalToACMCodecRepresentation(copyCodec, codec);
+
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetSendCodec(channel=%d, codec)", channel);
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "codec: plname=%s, pacsize=%d, plfreq=%d, pltype=%d, "
+                 "channels=%d, rate=%d", codec.plname, codec.pacsize,
+                 codec.plfreq, codec.pltype, codec.channels, codec.rate);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    // External sanity checks performed outside the ACM
+    if ((STR_CASE_CMP(copyCodec.plname, "L16") == 0) &&
+            (copyCodec.pacsize >= 960))
+    {
+        _engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                                       "SetSendCodec() invalid L16 packet "
+                                       "size");
+        return -1;
+    }
+    if (!STR_CASE_CMP(copyCodec.plname, "CN")
+            || !STR_CASE_CMP(copyCodec.plname, "TELEPHONE-EVENT")
+            || !STR_CASE_CMP(copyCodec.plname, "RED"))
+    {
+        _engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                                       "SetSendCodec() invalid codec name");
+        return -1;
+    }
+    if (copyCodec.channels != 1)
+    {
+        _engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                                       "SetSendCodec() invalid number of "
+                                       "channels");
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "GetSendCodec() failed to locate "
+                                       "channel");
+        return -1;
+    }
+    if (!AudioCodingModule::IsCodecValid(
+            (CodecInst&) copyCodec))
+    {
+        _engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                                       "SetSendCodec() invalid codec");
+        return -1;
+    }
+    if (channelPtr->SetSendCodec(copyCodec) != 0)
+    {
+        _engineStatistics.SetLastError(VE_CANNOT_SET_SEND_CODEC,
+                                       kTraceError,
+                                       "SetSendCodec() failed to set send "
+                                       "codec");
+        return -1;
+    }
+
+    return 0;
+}
+
+int VoECodecImpl::GetSendCodec(int channel, CodecInst& codec)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetSendCodec(channel=%d, codec=?)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "GetSendCodec() failed to locate "
+                                       "channel");
+        return -1;
+    }
+    CodecInst acmCodec;
+    if (channelPtr->GetSendCodec(acmCodec) != 0)
+    {
+        _engineStatistics.SetLastError(VE_CANNOT_GET_SEND_CODEC, kTraceError,
+                                       "GetSendCodec() failed to get send "
+                                       "codec");
+        return -1;
+    }
+    ACMToExternalCodecRepresentation(codec, acmCodec);
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetSendCodec() => plname=%s, pacsize=%d, plfreq=%d, "
+                 "channels=%d, rate=%d", codec.plname, codec.pacsize,
+                 codec.plfreq, codec.channels, codec.rate);
+    return 0;
+}
+
+int VoECodecImpl::GetRecCodec(int channel, CodecInst& codec)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetRecCodec(channel=%d, codec=?)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "GetRecCodec() failed to locate "
+                                       "channel");
+        return -1;
+    }
+    CodecInst acmCodec;
+    if (channelPtr->GetRecCodec(acmCodec) != 0)
+    {
+        _engineStatistics.SetLastError(VE_CANNOT_GET_REC_CODEC, kTraceError,
+                                       "GetRecCodec() failed to get received "
+                                       "codec");
+        return -1;
+    }
+    ACMToExternalCodecRepresentation(codec, acmCodec);
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetRecCodec() => plname=%s, pacsize=%d, plfreq=%d, "
+                 "channels=%d, rate=%d", codec.plname, codec.pacsize,
+                 codec.plfreq, codec.channels, codec.rate);
+    return 0;
+}
+
+int VoECodecImpl::SetAMREncFormat(int channel, AmrMode mode)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetAMREncFormat(channel=%d, mode=%d)", channel, mode);
+#ifdef WEBRTC_CODEC_GSMAMR
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "SetAMREncFormat() failed to locate "
+                                       "channel");
+        return -1;
+    }
+    return channelPtr->SetAMREncFormat(mode);
+#else
+    _engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+                                   "SetAMREncFormat() AMR codec is not "
+                                   "supported");
+    return -1;
+#endif
+}
+
+int VoECodecImpl::SetAMRDecFormat(int channel, AmrMode mode)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetAMRDecFormat(channel=%i, mode=%i)", channel, mode);
+#ifdef WEBRTC_CODEC_GSMAMR
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "SetAMRDecFormat() failed to locate "
+                                       "channel");
+        return -1;
+    }
+    return channelPtr->SetAMRDecFormat(mode);
+#else
+    _engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+                                   "SetAMRDecFormat() AMR codec is not "
+                                   "supported");
+    return -1;
+#endif
+}
+
+int VoECodecImpl::SetAMRWbEncFormat(int channel, AmrMode mode)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetAMRWbEncFormat(channel=%d, mode=%d)", channel, mode);
+    ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
+#ifdef WEBRTC_CODEC_GSMAMRWB
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "SetAMRWbEncFormat() failed to locate "
+                                       "channel");
+        return -1;
+    }
+    return channelPtr->SetAMRWbEncFormat(mode);
+#else
+    _engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+                                   "SetAMRWbEncFormat() AMR-wb codec is not "
+                                   "supported");
+    return -1;
+#endif
+}
+
+int VoECodecImpl::SetAMRWbDecFormat(int channel, AmrMode mode)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetAMRWbDecFormat(channel=%i, mode=%i)", channel, mode);
+    ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
+#ifdef WEBRTC_CODEC_GSMAMRWB
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "SetAMRWbDecFormat() failed to locate "
+                                       "channel");
+        return -1;
+    }
+    return channelPtr->SetAMRWbDecFormat(mode);
+#else
+    _engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+                                   "SetAMRWbDecFormat() AMR-wb codec is not "
+                                   "supported");
+    return -1;
+#endif
+}
+
+int VoECodecImpl::SetRecPayloadType(int channel, const CodecInst& codec)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetRecPayloadType(channel=%d, codec)", channel);
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+               "codec: plname=%s, plfreq=%d, pltype=%d, channels=%u, "
+               "pacsize=%d, rate=%d", codec.plname, codec.plfreq, codec.pltype,
+               codec.channels, codec.pacsize, codec.rate);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "GetRecPayloadType() failed to locate "
+                                       "channel");
+        return -1;
+    }
+    return channelPtr->SetRecPayloadType(codec);
+}
+
+int VoECodecImpl::GetRecPayloadType(int channel, CodecInst& codec)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetRecPayloadType(channel=%d, codec)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "GetRecPayloadType() failed to locate "
+                                       "channel");
+        return -1;
+    }
+    return channelPtr->GetRecPayloadType(codec);
+}
+
+int VoECodecImpl::SetSendCNPayloadType(int channel, int type,
+                                       PayloadFrequencies frequency)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetSendCNPayloadType(channel=%d, type=%d, frequency=%d)",
+                 channel, type, frequency);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (type < 96 || type > 127)
+    {
+        // Only allow dynamic range: 96 to 127
+        _engineStatistics.SetLastError(VE_INVALID_PLTYPE, kTraceError,
+                                       "SetSendCNPayloadType() invalid payload "
+                                       "type");
+        return -1;
+    }
+    if ((frequency != kFreq16000Hz) && (frequency != kFreq32000Hz))
+    {
+        // It is not possible to modify the payload type for CN/8000.
+        // We only allow modification of the CN payload type for CN/16000
+        // and CN/32000.
+        _engineStatistics.SetLastError(VE_INVALID_PLFREQ, kTraceError,
+                                       "SetSendCNPayloadType() invalid payload"
+                                       " frequency");
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "SetSendCNPayloadType() failed to "
+                                       "locate channel");
+        return -1;
+    }
+    if (channelPtr->Sending())
+    {
+        _engineStatistics.SetLastError(VE_SENDING, kTraceError,
+                                       "SetSendCNPayloadType unable so set "
+                                       "payload type while sending");
+        return -1;
+    }
+    return channelPtr->SetSendCNPayloadType(type, frequency);
+}
+
+int VoECodecImpl::SetISACInitTargetRate(int channel, int rateBps,
+                                        bool useFixedFrameSize)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetISACInitTargetRate(channel=%d, rateBps=%d, "
+                 "useFixedFrameSize=%d)", channel, rateBps, useFixedFrameSize);
+    ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
+#ifdef WEBRTC_CODEC_ISAC
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "SetISACInitTargetRate() failed to "
+                                       "locate channel");
+        return -1;
+    }
+    return channelPtr->SetISACInitTargetRate(rateBps, useFixedFrameSize);
+#else
+    _engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+                                   "SetISACInitTargetRate() iSAC codec is not "
+                                   "supported");
+    return -1;
+#endif
+}
+
+int VoECodecImpl::SetISACMaxRate(int channel, int rateBps)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetISACMaxRate(channel=%d, rateBps=%d)", channel, rateBps);
+    ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
+#ifdef WEBRTC_CODEC_ISAC
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "SetISACMaxRate() failed to locate "
+                                       "channel");
+        return -1;
+    }
+    return channelPtr->SetISACMaxRate(rateBps);
+#else
+    _engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+                                   "SetISACMaxRate() iSAC codec is not "
+                                   "supported");
+    return -1;
+#endif
+}
+
+int VoECodecImpl::SetISACMaxPayloadSize(int channel, int sizeBytes)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetISACMaxPayloadSize(channel=%d, sizeBytes=%d)", channel,
+                 sizeBytes);
+    ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
+#ifdef WEBRTC_CODEC_ISAC
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "SetISACMaxPayloadSize() failed to "
+                                       "locate channel");
+        return -1;
+    }
+    return channelPtr->SetISACMaxPayloadSize(sizeBytes);
+#else
+    _engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+                                   "SetISACMaxPayloadSize() iSAC codec is not "
+                                   "supported");
+    return -1;
+#endif
+    return 0;
+}
+
+int VoECodecImpl::SetVADStatus(int channel, bool enable, VadModes mode,
+                               bool disableDTX)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetVADStatus(channel=%i, enable=%i, mode=%i, disableDTX=%i)",
+                 channel, enable, mode, disableDTX);
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "SetVADStatus failed to locate channel");
+        return -1;
+    }
+
+    ACMVADMode vadMode(VADNormal);
+    switch (mode)
+    {
+        case kVadConventional:
+            vadMode = VADNormal;
+            break;
+        case kVadAggressiveLow:
+            vadMode = VADLowBitrate;
+            break;
+        case kVadAggressiveMid:
+            vadMode = VADAggr;
+            break;
+        case kVadAggressiveHigh:
+            vadMode = VADVeryAggr;
+            break;
+        default:
+            _engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                                           "SetVADStatus() invalid VAD mode");
+            return -1;
+    }
+    return channelPtr->SetVADStatus(enable, vadMode, disableDTX);
+}
+
+int VoECodecImpl::GetVADStatus(int channel, bool& enabled, VadModes& mode,
+                               bool& disabledDTX)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetVADStatus(channel=%i)", channel);
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "GetVADStatus failed to locate channel");
+        return -1;
+    }
+
+    ACMVADMode vadMode;
+    int ret = channelPtr->GetVADStatus(enabled, vadMode, disabledDTX);
+
+    if (ret != 0)
+    {
+        _engineStatistics.SetLastError(VE_INVALID_OPERATION, kTraceError,
+                                       "GetVADStatus failed to get VAD mode");
+        return -1;
+    }
+    switch (vadMode)
+    {
+        case VADNormal:
+            mode = kVadConventional;
+            break;
+        case VADLowBitrate:
+            mode = kVadAggressiveLow;
+            break;
+        case VADAggr:
+            mode = kVadAggressiveMid;
+            break;
+        case VADVeryAggr:
+            mode = kVadAggressiveHigh;
+            break;
+        default:
+            _engineStatistics.SetLastError(VE_AUDIO_CODING_MODULE_ERROR,
+                                           kTraceError,
+                                           "GetVADStatus() invalid VAD mode");
+            return -1;
+    }
+
+    return 0;
+}
+
+void VoECodecImpl::ACMToExternalCodecRepresentation(CodecInst& toInst,
+                                                    const CodecInst& fromInst)
+{
+    toInst = fromInst;
+    if (STR_CASE_CMP(fromInst.plname,"SILK") == 0)
+    {
+        if (fromInst.plfreq == 12000)
+        {
+            if (fromInst.pacsize == 320)
+            {
+                toInst.pacsize = 240;
+            }
+            else if (fromInst.pacsize == 640)
+            {
+                toInst.pacsize = 480;
+            }
+            else if (fromInst.pacsize == 960)
+            {
+                toInst.pacsize = 720;
+            }
+        }
+        else if (fromInst.plfreq == 24000)
+        {
+            if (fromInst.pacsize == 640)
+            {
+                toInst.pacsize = 480;
+            }
+            else if (fromInst.pacsize == 1280)
+            {
+                toInst.pacsize = 960;
+            }
+            else if (fromInst.pacsize == 1920)
+            {
+                toInst.pacsize = 1440;
+            }
+        }
+    }
+}
+
+void VoECodecImpl::ExternalToACMCodecRepresentation(CodecInst& toInst,
+                                                    const CodecInst& fromInst)
+{
+    toInst = fromInst;
+    if (STR_CASE_CMP(fromInst.plname,"SILK") == 0)
+    {
+        if (fromInst.plfreq == 12000)
+        {
+            if (fromInst.pacsize == 240)
+            {
+                toInst.pacsize = 320;
+            }
+            else if (fromInst.pacsize == 480)
+            {
+                toInst.pacsize = 640;
+            }
+            else if (fromInst.pacsize == 720)
+            {
+                toInst.pacsize = 960;
+            }
+        }
+        else if (fromInst.plfreq == 24000)
+        {
+            if (fromInst.pacsize == 480)
+            {
+                toInst.pacsize = 640;
+            }
+            else if (fromInst.pacsize == 960)
+            {
+                toInst.pacsize = 1280;
+            }
+            else if (fromInst.pacsize == 1440)
+            {
+                toInst.pacsize = 1920;
+            }
+        }
+    }
+}
+
+#endif  // WEBRTC_VOICE_ENGINE_CODEC_API
+
+} // namespace webrtc
diff --git a/voice_engine/main/source/voe_codec_impl.h b/voice_engine/main/source/voe_codec_impl.h
new file mode 100644
index 0000000..fde1d41
--- /dev/null
+++ b/voice_engine/main/source/voe_codec_impl.h
@@ -0,0 +1,92 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_CODEC_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_CODEC_IMPL_H
+
+#include "voe_codec.h"
+
+#include "ref_count.h"
+#include "shared_data.h"
+
+namespace webrtc
+{
+
+class VoECodecImpl: public virtual voe::SharedData,
+                    public VoECodec,
+                    public voe::RefCount
+{
+public:
+    virtual int Release();
+
+    virtual int NumOfCodecs();
+
+    virtual int GetCodec(int index, CodecInst& codec);
+
+    virtual int SetSendCodec(int channel, const CodecInst& codec);
+
+    virtual int GetSendCodec(int channel, CodecInst& codec);
+
+    virtual int GetRecCodec(int channel, CodecInst& codec);
+
+    virtual int SetAMREncFormat(int channel,
+                                AmrMode mode = kRfc3267BwEfficient);
+
+    virtual int SetAMRDecFormat(int channel,
+                                AmrMode mode = kRfc3267BwEfficient);
+
+    virtual int SetAMRWbEncFormat(int channel,
+                                  AmrMode mode = kRfc3267BwEfficient);
+
+    virtual int SetAMRWbDecFormat(int channel,
+                                  AmrMode mode = kRfc3267BwEfficient);
+
+    virtual int SetSendCNPayloadType(
+        int channel, int type,
+        PayloadFrequencies frequency = kFreq16000Hz);
+
+    virtual int SetRecPayloadType(int channel,
+                                  const CodecInst& codec);
+
+    virtual int GetRecPayloadType(int channel, CodecInst& codec);
+
+    virtual int SetISACInitTargetRate(int channel,
+                                      int rateBps,
+                                      bool useFixedFrameSize = false);
+
+    virtual int SetISACMaxRate(int channel, int rateBps);
+
+    virtual int SetISACMaxPayloadSize(int channel, int sizeBytes);
+
+    virtual int SetVADStatus(int channel,
+                             bool enable,
+                             VadModes mode = kVadConventional,
+                             bool disableDTX = false);
+
+    virtual int GetVADStatus(int channel,
+                             bool& enabled,
+                             VadModes& mode,
+                             bool& disabledDTX);
+
+protected:
+    VoECodecImpl();
+    virtual ~VoECodecImpl();
+
+private:
+    void ACMToExternalCodecRepresentation(CodecInst& toInst,
+                                          const CodecInst& fromInst);
+
+    void ExternalToACMCodecRepresentation(CodecInst& toInst,
+                                          const CodecInst& fromInst);
+};
+
+} // namespace webrtc
+
+#endif  // WEBRTC_VOICE_ENGINE_VOE_CODEC_IMPL_H
diff --git a/voice_engine/main/source/voe_dtmf_impl.cc b/voice_engine/main/source/voe_dtmf_impl.cc
new file mode 100644
index 0000000..177f08f
--- /dev/null
+++ b/voice_engine/main/source/voe_dtmf_impl.cc
@@ -0,0 +1,473 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_dtmf_impl.h"
+
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "output_mixer.h"
+#include "trace.h"
+#include "transmit_mixer.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+namespace webrtc {
+
+VoEDtmf* VoEDtmf::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_DTMF_API
+    return NULL;
+#else
+    if (NULL == voiceEngine)
+    {
+        return NULL;
+    }
+    VoiceEngineImpl* s =
+        reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
+    VoEDtmfImpl* d = s;
+    ( *d)++;
+    return (d);
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_DTMF_API
+
+VoEDtmfImpl::VoEDtmfImpl() :
+    _dtmfFeedback(true),
+    _dtmfDirectFeedback(false)
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1 ),
+                 "VoEDtmfImpl::VoEDtmfImpl() - ctor");
+}
+
+VoEDtmfImpl::~VoEDtmfImpl()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoEDtmfImpl::~VoEDtmfImpl() - dtor");
+}
+
+int VoEDtmfImpl::Release()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoEDtmf::Release()");
+    (*this)--;
+    int refCount = GetCount();
+    if (refCount < 0)
+    {
+        Reset(); // reset reference counter to zero => OK to delete VE
+        _engineStatistics.SetLastError(
+            VE_INTERFACE_NOT_FOUND, kTraceWarning);
+        return (-1);
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoEDtmf reference counter = %d", refCount);
+    return (refCount);
+}
+
+int VoEDtmfImpl::SendTelephoneEvent(int channel,
+                                    unsigned char eventCode,
+                                    bool outOfBand,
+                                    int lengthMs,
+                                    int attenuationDb)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SendTelephoneEvent(channel=%d, eventCode=%d, outOfBand=%d,"
+                 "length=%d, attenuationDb=%d)",
+                 channel, eventCode, (int)outOfBand, lengthMs, attenuationDb);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "SendTelephoneEvent() failed to locate channel");
+        return -1;
+    }
+    if (!channelPtr->Sending())
+    {
+        _engineStatistics.SetLastError(
+            VE_NOT_SENDING, kTraceError,
+            "SendTelephoneEvent() sending is not active");
+        return -1;
+    }
+
+    // Sanity check
+    const int maxEventCode = outOfBand ?
+        static_cast<int>(kMaxTelephoneEventCode) :
+        static_cast<int>(kMaxDtmfEventCode);
+    const bool testFailed = ((eventCode < 0) ||
+        (eventCode > maxEventCode) ||
+        (lengthMs < kMinTelephoneEventDuration) ||
+        (lengthMs > kMaxTelephoneEventDuration) ||
+        (attenuationDb < kMinTelephoneEventAttenuation) ||
+        (attenuationDb > kMaxTelephoneEventAttenuation));
+    if (testFailed)
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "SendTelephoneEvent() invalid parameter(s)");
+        return -1;
+    }
+
+    const bool isDtmf =
+        (eventCode >= 0) && (eventCode <= kMaxDtmfEventCode);
+    const bool playDtmfToneDirect =
+        isDtmf && (_dtmfFeedback && _dtmfDirectFeedback);
+
+    if (playDtmfToneDirect)
+    {
+        // Mute the microphone signal while playing back the tone directly.
+        // This is to reduce the risk of introducing echo from the added output.
+        _transmitMixerPtr->UpdateMuteMicrophoneTime(lengthMs);
+
+        // Play out local feedback tone directly (same approach for both inband
+        // and outband).
+        // Reduce the length of the the tone with 80ms to reduce risk of echo.
+        // For non-direct feedback, outband and inband cases are handled
+        // differently.
+        _outputMixerPtr->PlayDtmfTone(eventCode, lengthMs-80, attenuationDb);
+    }
+
+    if (outOfBand)
+    {
+        // The RTP/RTCP module will always deliver OnPlayTelephoneEvent when
+        // an event is transmitted. It is up to the VoE to utilize it or not.
+        // This flag ensures that feedback/playout is enabled; however, the
+        // channel object must still parse out the Dtmf events (0-15) from
+        // all possible events (0-255).
+        const bool playDTFMEvent = (_dtmfFeedback && !_dtmfDirectFeedback);
+
+        return channelPtr->SendTelephoneEventOutband(eventCode,
+                                                     lengthMs,
+                                                     attenuationDb,
+                                                     playDTFMEvent);
+    }
+    else
+    {
+        // For Dtmf tones, we want to ensure that inband tones are played out
+        // in sync with the transmitted audio. This flag is utilized by the
+        // channel object to determine if the queued Dtmf e vent shall also
+        // be fed to the output mixer in the same step as input audio is
+        // replaced by inband Dtmf tones.
+        const bool playDTFMEvent =
+            (isDtmf && _dtmfFeedback && !_dtmfDirectFeedback);
+
+        return channelPtr->SendTelephoneEventInband(eventCode,
+                                                    lengthMs,
+                                                    attenuationDb,
+                                                    playDTFMEvent);
+    }
+}
+
+int VoEDtmfImpl::SetSendTelephoneEventPayloadType(int channel,
+                                                  unsigned char type)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetSendTelephoneEventPayloadType(channel=%d, type=%u)",
+                 channel, type);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "SetSendTelephoneEventPayloadType() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->SetSendTelephoneEventPayloadType(type);
+}
+
+int VoEDtmfImpl::GetSendTelephoneEventPayloadType(int channel,
+                                                  unsigned char& type)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetSendTelephoneEventPayloadType(channel=%d)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetSendTelephoneEventPayloadType() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetSendTelephoneEventPayloadType(type);
+}
+
+int VoEDtmfImpl::PlayDtmfTone(unsigned char eventCode,
+                              int lengthMs,
+                              int attenuationDb)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "PlayDtmfTone(eventCode=%d, lengthMs=%d, attenuationDb=%d)",
+                 eventCode, lengthMs, attenuationDb);
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (!_audioDevicePtr->Playing())
+    {
+        _engineStatistics.SetLastError(
+            VE_NOT_PLAYING, kTraceError,
+            "PlayDtmfTone() no channel is playing out");
+        return -1;
+    }
+    if ((eventCode < kMinDtmfEventCode) ||
+        (eventCode > kMaxDtmfEventCode) ||
+        (lengthMs < kMinTelephoneEventDuration) ||
+        (lengthMs > kMaxTelephoneEventDuration) ||
+        (attenuationDb <kMinTelephoneEventAttenuation) ||
+        (attenuationDb > kMaxTelephoneEventAttenuation))
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+        "PlayDtmfTone() invalid tone parameter(s)");
+        return -1;
+    }
+    return _outputMixerPtr->PlayDtmfTone(eventCode, lengthMs, attenuationDb);
+}
+
+int VoEDtmfImpl::StartPlayingDtmfTone(unsigned char eventCode,
+                                      int attenuationDb)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "StartPlayingDtmfTone(eventCode=%d, attenuationDb=%d)",
+                 eventCode, attenuationDb);
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (!_audioDevicePtr->Playing())
+    {
+        _engineStatistics.SetLastError(
+            VE_NOT_PLAYING, kTraceError,
+            "StartPlayingDtmfTone() no channel is playing out");
+        return -1;
+    }
+    if ((eventCode < kMinDtmfEventCode) ||
+        (eventCode > kMaxDtmfEventCode) ||
+        (attenuationDb < kMinTelephoneEventAttenuation) ||
+        (attenuationDb > kMaxTelephoneEventAttenuation))
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "StartPlayingDtmfTone() invalid tone parameter(s)");
+        return -1;
+    }
+    return _outputMixerPtr->StartPlayingDtmfTone(eventCode, attenuationDb);
+}
+
+int VoEDtmfImpl::StopPlayingDtmfTone()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "StopPlayingDtmfTone()");
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    return _outputMixerPtr->StopPlayingDtmfTone();
+}
+
+int VoEDtmfImpl::RegisterTelephoneEventDetection(
+    int channel,
+    TelephoneEventDetectionMethods detectionMethod,
+    VoETelephoneEventObserver& observer)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "RegisterTelephoneEventDetection(channel=%d, detectionMethod=%d,"
+                 "observer=0x%x)", channel, detectionMethod, &observer);
+#ifdef WEBRTC_DTMF_DETECTION
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "RegisterTelephoneEventDetection() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->RegisterTelephoneEventDetection(detectionMethod,
+                                                       observer);
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceError,
+        "SetTelephoneEventDetectionStatus() Dtmf detection is not supported");
+    return -1;
+#endif
+}
+
+int VoEDtmfImpl::DeRegisterTelephoneEventDetection(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+            "DeRegisterTelephoneEventDetection(channel=%d)", channel);
+#ifdef WEBRTC_DTMF_DETECTION
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "DeRegisterTelephoneEventDe tection() failed to locate channel");
+            return -1;
+    }
+    return channelPtr->DeRegisterTelephoneEventDetection();
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceError,
+        "DeRegisterTelephoneEventDetection() Dtmf detection is not supported");
+    return -1;
+#endif
+}
+
+
+int VoEDtmfImpl::GetTelephoneEventDetectionStatus(
+    int channel,
+    bool& enabled,
+    TelephoneEventDetectionMethods& detectionMethod)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "GetTelephoneEventDetectionStatus(channel=%d)", channel);
+#ifdef WEBRTC_DTMF_DETECTION
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetTelephoneEventDetectionStatus() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetTelephoneEventDetectionStatus(enabled, detectionMethod);
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceError,
+        "GetTelephoneEventDetectionStatus() Dtmf detection is not supported");
+    return -1;
+#endif
+}
+
+int VoEDtmfImpl::SetDtmfFeedbackStatus(bool enable, bool directFeedback)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetDtmfFeedbackStatus(enable=%d, directFeeback=%d)",
+                 (int)enable, (int)directFeedback);
+
+    CriticalSectionScoped sc(*_apiCritPtr);
+
+    _dtmfFeedback = enable;
+    _dtmfDirectFeedback = directFeedback;
+
+    return 0;
+}
+
+int VoEDtmfImpl::GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetDtmfFeedbackStatus()");
+
+    CriticalSectionScoped sc(*_apiCritPtr);
+
+    enabled = _dtmfFeedback;
+    directFeedback = _dtmfDirectFeedback;
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetDtmfFeedbackStatus() => enabled=%d, directFeedback=%d",
+                 enabled, directFeedback);
+    return 0;
+}
+
+int VoEDtmfImpl::SetDtmfPlayoutStatus(int channel, bool enable)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetDtmfPlayoutStatus(channel=%d, enable=%d)",
+                 channel, enable);
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "SetDtmfPlayoutStatus() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->SetDtmfPlayoutStatus(enable);
+}
+
+int VoEDtmfImpl::GetDtmfPlayoutStatus(int channel, bool& enabled)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetDtmfPlayoutStatus(channel=%d, enabled=?)", channel);
+    IPHONE_NOT_SUPPORTED();
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetDtmfPlayoutStatus() failed to locate channel");
+        return -1;
+    }
+    enabled = channelPtr->DtmfPlayoutStatus();
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetDtmfPlayoutStatus() => enabled=%d", enabled);
+    return 0;
+}
+
+#endif  // #ifdef WEBRTC_VOICE_ENGINE_DTMF_API
+
+}  // namespace webrtc
diff --git a/voice_engine/main/source/voe_dtmf_impl.h b/voice_engine/main/source/voe_dtmf_impl.h
new file mode 100644
index 0000000..2d7a2e5
--- /dev/null
+++ b/voice_engine/main/source/voe_dtmf_impl.h
@@ -0,0 +1,83 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_DTMF_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_DTMF_IMPL_H
+
+#include "voe_dtmf.h"
+
+#include "ref_count.h"
+#include "shared_data.h"
+
+namespace webrtc
+{
+
+class VoEDtmfImpl : public virtual voe::SharedData,
+                    public VoEDtmf,
+                    public voe::RefCount
+{
+public:
+    virtual int Release();
+
+    virtual int SendTelephoneEvent(
+        int channel,
+        unsigned char eventCode,
+        bool outOfBand = true,
+        int lengthMs = 160,
+        int attenuationDb = 10);
+
+    virtual int SetSendTelephoneEventPayloadType(int channel,
+                                                 unsigned char type);
+
+    virtual int GetSendTelephoneEventPayloadType(int channel,
+                                                 unsigned char& type);
+
+    virtual int SetDtmfFeedbackStatus(bool enable,
+        bool directFeedback = false);
+
+    virtual int GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback);
+
+    virtual int PlayDtmfTone(unsigned char eventCode,
+                             int lengthMs = 200,
+                             int attenuationDb = 10);
+
+    virtual int StartPlayingDtmfTone(unsigned char eventCode,
+                                     int attenuationDb = 10);
+
+    virtual int StopPlayingDtmfTone();
+
+    virtual int RegisterTelephoneEventDetection(
+        int channel,
+        TelephoneEventDetectionMethods detectionMethod,
+        VoETelephoneEventObserver& observer);
+
+    virtual int DeRegisterTelephoneEventDetection(int channel);
+
+    virtual int GetTelephoneEventDetectionStatus(
+        int channel,
+        bool& enabled,
+        TelephoneEventDetectionMethods& detectionMethod);
+
+    virtual int SetDtmfPlayoutStatus(int channel, bool enable);
+
+    virtual int GetDtmfPlayoutStatus(int channel, bool& enabled);
+
+protected:
+    VoEDtmfImpl();
+    virtual ~VoEDtmfImpl();
+
+private:
+    bool _dtmfFeedback;
+    bool _dtmfDirectFeedback;
+};
+
+} // namespace webrtc
+
+#endif  // WEBRTC_VOICE_ENGINE_VOE_DTMF_IMPL_H
diff --git a/voice_engine/main/source/voe_encryption_impl.cc b/voice_engine/main/source/voe_encryption_impl.cc
new file mode 100644
index 0000000..5ba944b
--- /dev/null
+++ b/voice_engine/main/source/voe_encryption_impl.cc
@@ -0,0 +1,275 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_encryption_impl.h"
+
+
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "trace.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+namespace webrtc {
+
+VoEEncryption* VoEEncryption::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
+    return NULL;
+#else
+    if (NULL == voiceEngine)
+    {
+        return NULL;
+    }
+    VoiceEngineImpl* s =
+        reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
+    VoEEncryptionImpl* d = s;
+    (*d)++;
+    return (d);
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
+
+VoEEncryptionImpl::VoEEncryptionImpl()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoEEncryptionImpl::VoEEncryptionImpl() - ctor");
+}
+
+VoEEncryptionImpl::~VoEEncryptionImpl()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoEEncryptionImpl::~VoEEncryptionImpl() - dtor");
+}
+
+int VoEEncryptionImpl::Release()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoEEncryption::Release()");
+    (*this)--;
+    int refCount = GetCount();
+    if (refCount < 0)
+    {
+      // reset reference counter to zero => OK to delete VE
+        Reset();
+        _engineStatistics.SetLastError(VE_INTERFACE_NOT_FOUND,
+                                       kTraceWarning);
+        return (-1);
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoEEncryption reference counter = %d", refCount);
+    return (refCount);
+}
+
+int VoEEncryptionImpl::EnableSRTPSend(
+    int channel,
+    CipherTypes cipherType,
+    int cipherKeyLength,
+    AuthenticationTypes authType,
+    int authKeyLength,
+    int authTagLength,
+    SecurityLevels level,
+    const unsigned char key[kVoiceEngineMaxSrtpKeyLength],
+    bool useForRTCP)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "EnableSRTPSend(channel=%i, cipherType=%i, cipherKeyLength=%i,"
+                 " authType=%i, authKeyLength=%i, authTagLength=%i, level=%i, "
+                 "key=?, useForRTCP=%d)",
+                 channel, cipherType, cipherKeyLength, authType,
+                 authKeyLength, authTagLength, level, useForRTCP);
+#ifdef WEBRTC_SRTP
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "EnableSRTPSend() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->EnableSRTPSend(cipherType,
+                                      cipherKeyLength,
+                                      authType,
+                                      authKeyLength,
+                                      authTagLength,
+                                      level,
+                                      key,
+                                      useForRTCP);
+#else
+   _engineStatistics.SetLastError(
+       VE_FUNC_NOT_SUPPORTED, kTraceError,
+       "EnableSRTPSend() SRTP is not supported");
+    return -1;
+#endif
+}
+
+int VoEEncryptionImpl::DisableSRTPSend(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "DisableSRTPSend(channel=%i)",channel);
+#ifdef WEBRTC_SRTP
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "DisableSRTPSend() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->DisableSRTPSend();
+#else
+   _engineStatistics.SetLastError(
+       VE_FUNC_NOT_SUPPORTED, kTraceError,
+       "DisableSRTPSend() SRTP is not supported");
+    return -1;
+#endif
+}
+
+int VoEEncryptionImpl::EnableSRTPReceive(
+    int channel,
+    CipherTypes cipherType,
+    int cipherKeyLength,
+    AuthenticationTypes authType,
+    int authKeyLength,
+    int authTagLength,
+    SecurityLevels level,
+    const unsigned char key[kVoiceEngineMaxSrtpKeyLength],
+		bool useForRTCP)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "EnableSRTPReceive(channel=%i, cipherType=%i, "
+                 "cipherKeyLength=%i, authType=%i, authKeyLength=%i, "
+                 "authTagLength=%i, level=%i, key=?, useForRTCP=%d)",
+                 channel, cipherType, cipherKeyLength, authType,
+                 authKeyLength, authTagLength, level, useForRTCP);
+#ifdef WEBRTC_SRTP
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "EnableSRTPReceive() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->EnableSRTPReceive(cipherType,
+                                         cipherKeyLength,
+	                                 authType,
+	                                 authKeyLength,
+	                                 authTagLength,
+	                                 level,
+	                                 key,
+	                                 useForRTCP);
+#else
+   _engineStatistics.SetLastError(
+       VE_FUNC_NOT_SUPPORTED, kTraceError,
+       "EnableSRTPReceive() SRTP is not supported");
+    return -1;
+#endif
+}
+
+int VoEEncryptionImpl::DisableSRTPReceive(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "DisableSRTPReceive(channel=%i)", channel);
+#ifdef WEBRTC_SRTP
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "DisableSRTPReceive() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->DisableSRTPReceive();
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceError,
+        "DisableSRTPReceive() SRTP is not supported");
+    return -1;
+#endif
+}
+
+int VoEEncryptionImpl::RegisterExternalEncryption(int channel,
+                                                  Encryption& encryption)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "RegisterExternalEncryption(channel=%d, encryption=0x%x)",
+                 channel, &encryption);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "RegisterExternalEncryption() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->RegisterExternalEncryption(encryption);
+}
+
+int VoEEncryptionImpl::DeRegisterExternalEncryption(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "DeRegisterExternalEncryption(channel=%d)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "DeRegisterExternalEncryption() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->DeRegisterExternalEncryption();
+}
+
+#endif  // #ifdef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
+
+// EOF
+}  // namespace webrtc
diff --git a/voice_engine/main/source/voe_encryption_impl.h b/voice_engine/main/source/voe_encryption_impl.h
new file mode 100644
index 0000000..050dd88
--- /dev/null
+++ b/voice_engine/main/source/voe_encryption_impl.h
@@ -0,0 +1,70 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_ENCRYPTION_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_ENCRYPTION_IMPL_H
+
+#include "voe_encryption.h"
+
+#include "ref_count.h"
+#include "shared_data.h"
+
+namespace webrtc {
+
+class VoEEncryptionImpl : public virtual voe::SharedData,
+                          public VoEEncryption,
+                          public voe::RefCount
+{
+public:
+
+    virtual int Release();
+
+    // SRTP
+    virtual int EnableSRTPSend(
+        int channel,
+        CipherTypes cipherType,
+        int cipherKeyLength,
+        AuthenticationTypes authType,
+        int authKeyLength,
+        int authTagLength,
+        SecurityLevels level,
+        const unsigned char key[kVoiceEngineMaxSrtpKeyLength],
+        bool useForRTCP = false);
+
+    virtual int DisableSRTPSend(int channel);
+
+    virtual int EnableSRTPReceive(
+        int channel,
+        CipherTypes cipherType,
+        int cipherKeyLength,
+        AuthenticationTypes authType,
+        int authKeyLength,
+        int authTagLength,
+        SecurityLevels level,
+        const unsigned char key[kVoiceEngineMaxSrtpKeyLength],
+        bool useForRTCP = false);
+
+    virtual int DisableSRTPReceive(int channel);
+
+    // External encryption
+    virtual int RegisterExternalEncryption(
+        int channel,
+        Encryption& encryption);
+
+    virtual int DeRegisterExternalEncryption(int channel);
+
+protected:
+    VoEEncryptionImpl();
+    virtual ~VoEEncryptionImpl();
+};
+
+}   // namespace webrtc
+
+#endif  // #ifndef WEBRTC_VOICE_ENGINE_VOE_ENCRYPTION_IMPL_H
diff --git a/voice_engine/main/source/voe_external_media_impl.cc b/voice_engine/main/source/voe_external_media_impl.cc
new file mode 100644
index 0000000..bbf5e44
--- /dev/null
+++ b/voice_engine/main/source/voe_external_media_impl.cc
@@ -0,0 +1,406 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_external_media_impl.h"
+
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "output_mixer.h"
+#include "trace.h"
+#include "transmit_mixer.h"
+#include "voice_engine_impl.h"
+#include "voe_errors.h"
+
+namespace webrtc {
+
+VoEExternalMedia* VoEExternalMedia::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
+    return NULL;
+#else
+    if (NULL == voiceEngine)
+    {
+        return NULL;
+    }
+    VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
+    VoEExternalMediaImpl* d = s;
+    (*d)++;
+    return (d);
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
+
+VoEExternalMediaImpl::VoEExternalMediaImpl()
+    : playout_delay_ms_(0)
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoEExternalMediaImpl() - ctor");
+}
+
+VoEExternalMediaImpl::~VoEExternalMediaImpl()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
+                 "~VoEExternalMediaImpl() - dtor");
+}
+
+int VoEExternalMediaImpl::Release()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoEExternalMedia::Release()");
+    (*this)--;
+    int refCount = GetCount();
+    if (refCount < 0)
+    {
+        Reset();
+        _engineStatistics.SetLastError(VE_INTERFACE_NOT_FOUND,
+                                       kTraceWarning);
+        return (-1);
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoEExternalMedia reference counter = %d", refCount);
+    return (refCount);
+}
+
+int VoEExternalMediaImpl::RegisterExternalMediaProcessing(
+    int channel,
+    ProcessingTypes type,
+    VoEMediaProcess& processObject)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "RegisterExternalMediaProcessing(channel=%d, type=%d, "
+                 "processObject=0x%x)", channel, type, &processObject);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    switch (type)
+    {
+        case kPlaybackPerChannel:
+        case kRecordingPerChannel:
+        {
+            voe::ScopedChannel sc(_channelManager, channel);
+            voe::Channel* channelPtr = sc.ChannelPtr();
+            if (channelPtr == NULL)
+            {
+                _engineStatistics.SetLastError(
+                    VE_CHANNEL_NOT_VALID, kTraceError,
+                    "RegisterExternalMediaProcessing() "
+                    "failed to locate channel");
+                return -1;
+            }
+            return channelPtr->RegisterExternalMediaProcessing(type,
+                                                               processObject);
+        }
+        case kPlaybackAllChannelsMixed:
+        {
+            return _outputMixerPtr->RegisterExternalMediaProcessing(
+                processObject);
+        }
+        case kRecordingAllChannelsMixed:
+        {
+            return _transmitMixerPtr->RegisterExternalMediaProcessing(
+                processObject);
+        }
+        default:
+        {
+            _engineStatistics.SetLastError(
+                VE_INVALID_ARGUMENT, kTraceError,
+                "RegisterExternalMediaProcessing() invalid process type");
+            return -1;
+        }
+    }
+    return 0;
+}
+
+int VoEExternalMediaImpl::DeRegisterExternalMediaProcessing(
+    int channel,
+    ProcessingTypes type)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "DeRegisterExternalMediaProcessing(channel=%d)", channel);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    switch (type)
+    {
+        case kPlaybackPerChannel:
+        case kRecordingPerChannel:
+        {
+            voe::ScopedChannel sc(_channelManager, channel);
+            voe::Channel* channelPtr = sc.ChannelPtr();
+            if (channelPtr == NULL)
+            {
+                _engineStatistics.SetLastError(
+                    VE_CHANNEL_NOT_VALID, kTraceError,
+                    "RegisterExternalMediaProcessing() "
+                    "failed to locate channel");
+                return -1;
+            }
+            return channelPtr->DeRegisterExternalMediaProcessing(type);
+        }
+        case kPlaybackAllChannelsMixed:
+        {
+            return _outputMixerPtr->DeRegisterExternalMediaProcessing();
+        }
+        case kRecordingAllChannelsMixed:
+        {
+            return _transmitMixerPtr->DeRegisterExternalMediaProcessing();
+        }
+        default:
+        {
+            _engineStatistics.SetLastError(
+                VE_INVALID_ARGUMENT, kTraceError,
+                "RegisterExternalMediaProcessing() invalid process type");
+            return -1;
+        }
+    }
+}
+
+int VoEExternalMediaImpl::SetExternalRecordingStatus(bool enable)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetExternalRecordingStatus(enable=%d)", enable);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+    if (_audioDevicePtr->Recording())
+    {
+        _engineStatistics.SetLastError(
+            VE_ALREADY_SENDING,
+            kTraceError,
+            "SetExternalRecordingStatus() cannot set state while sending");
+        return -1;
+    }
+    _externalRecording = enable;
+    return 0;
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED,
+        kTraceError,
+        "SetExternalRecordingStatus() external recording is not supported");
+    return -1;
+#endif
+}
+
+int VoEExternalMediaImpl::ExternalRecordingInsertData(
+        const WebRtc_Word16 speechData10ms[],
+        int lengthSamples,
+        int samplingFreqHz,
+        int current_delay_ms)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+                 "ExternalRecordingInsertData(speechData10ms=0x%x,"
+                 " lengthSamples=%u, samplingFreqHz=%d, current_delay_ms=%d)",
+                 &speechData10ms[0], lengthSamples, samplingFreqHz,
+              current_delay_ms);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (!_externalRecording)
+    {
+       _engineStatistics.SetLastError(
+           VE_INVALID_OPERATION,
+           kTraceError,
+           "ExternalRecordingInsertData() external recording is not enabled");
+        return -1;
+    }
+    if (NumOfSendingChannels() == 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_ALREADY_SENDING,
+            kTraceError,
+            "SetExternalRecordingStatus() no channel is sending");
+        return -1;
+    }
+    if ((16000 != samplingFreqHz) && (32000 != samplingFreqHz) &&
+        (48000 != samplingFreqHz) && (44000 != samplingFreqHz))
+    {
+         _engineStatistics.SetLastError(
+             VE_INVALID_ARGUMENT,
+             kTraceError,
+             "SetExternalRecordingStatus() invalid sample rate");
+        return -1;
+    }
+    if ((0 == lengthSamples) ||
+        ((lengthSamples % (samplingFreqHz / 100)) != 0))
+    {
+         _engineStatistics.SetLastError(
+             VE_INVALID_ARGUMENT,
+             kTraceError,
+             "SetExternalRecordingStatus() invalid buffer size");
+        return -1;
+    }
+    if (current_delay_ms < 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT,
+            kTraceError,
+            "SetExternalRecordingStatus() invalid delay)");
+        return -1;
+    }
+
+    WebRtc_UWord16 blockSize = samplingFreqHz / 100;
+    WebRtc_UWord32 nBlocks = lengthSamples / blockSize;
+    WebRtc_Word16 totalDelayMS = 0;
+    WebRtc_UWord16 playoutDelayMS = 0;
+
+    for (WebRtc_UWord32 i = 0; i < nBlocks; i++)
+    {
+        if (!_externalPlayout)
+        {
+            // Use real playout delay if external playout is not enabled.
+            _audioDevicePtr->PlayoutDelay(&playoutDelayMS);
+            totalDelayMS = current_delay_ms + playoutDelayMS;
+        }
+        else
+        {
+            // Use stored delay value given the last call
+            // to ExternalPlayoutGetData.
+            totalDelayMS = current_delay_ms + playout_delay_ms_;
+            // Compensate for block sizes larger than 10ms
+            totalDelayMS -= (WebRtc_Word16)(i*10);
+            if (totalDelayMS < 0)
+                totalDelayMS = 0;
+        }
+        _transmitMixerPtr->PrepareDemux(
+            (const WebRtc_Word8*)(&speechData10ms[i*blockSize]),
+            blockSize,
+            1,
+            samplingFreqHz,
+            totalDelayMS,
+            0,
+            0);
+
+        _transmitMixerPtr->DemuxAndMix();
+        _transmitMixerPtr->EncodeAndSend();
+    }
+    return 0;
+#else
+       _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED,
+        kTraceError,
+        "ExternalRecordingInsertData() external recording is not supported");
+    return -1;
+#endif
+}
+
+int VoEExternalMediaImpl::SetExternalPlayoutStatus(bool enable)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetExternalPlayoutStatus(enable=%d)", enable);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+    if (_audioDevicePtr->Playing())
+    {
+        _engineStatistics.SetLastError(
+            VE_ALREADY_SENDING,
+            kTraceError,
+            "SetExternalPlayoutStatus() cannot set state while playing");
+        return -1;
+    }
+    _externalPlayout = enable;
+    return 0;
+#else
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED,
+        kTraceError,
+        "SetExternalPlayoutStatus() external playout is not supported");
+    return -1;
+#endif
+}
+
+int VoEExternalMediaImpl::ExternalPlayoutGetData(
+    WebRtc_Word16 speechData10ms[],
+    int samplingFreqHz,
+    int current_delay_ms,
+    int& lengthSamples)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+                 "ExternalPlayoutGetData(speechData10ms=0x%x, samplingFreqHz=%d"
+                 ",  current_delay_ms=%d)", &speechData10ms[0], samplingFreqHz,
+                 current_delay_ms);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (!_externalPlayout)
+    {
+       _engineStatistics.SetLastError(
+           VE_INVALID_OPERATION,
+           kTraceError,
+           "ExternalPlayoutGetData() external playout is not enabled");
+        return -1;
+    }
+    if ((16000 != samplingFreqHz) && (32000 != samplingFreqHz) &&
+        (48000 != samplingFreqHz) && (44000 != samplingFreqHz))
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT,
+            kTraceError,
+            "ExternalPlayoutGetData() invalid sample rate");
+        return -1;
+    }
+    if (current_delay_ms < 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT,
+            kTraceError,
+            "ExternalPlayoutGetData() invalid delay)");
+        return -1;
+    }
+
+    AudioFrame audioFrame;
+
+    // Retrieve mixed output at the specified rate
+    _outputMixerPtr->MixActiveChannels();
+    _outputMixerPtr->DoOperationsOnCombinedSignal();
+    _outputMixerPtr->GetMixedAudio(samplingFreqHz, 1, audioFrame);
+
+    // Deliver audio (PCM) samples to the external sink
+    memcpy(speechData10ms,
+           audioFrame._payloadData,
+           sizeof(WebRtc_Word16)*(audioFrame._payloadDataLengthInSamples));
+    lengthSamples = audioFrame._payloadDataLengthInSamples;
+
+    // Store current playout delay (to be used by ExternalRecordingInsertData).
+    playout_delay_ms_ = current_delay_ms;
+
+    return 0;
+#else
+    _engineStatistics.SetLastError(
+       VE_FUNC_NOT_SUPPORTED,
+       kTraceError,
+       "ExternalPlayoutGetData() external playout is not supported");
+    return -1;
+#endif
+}
+
+#endif  // WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
+
+}  // namespace webrtc
diff --git a/voice_engine/main/source/voe_external_media_impl.h b/voice_engine/main/source/voe_external_media_impl.h
new file mode 100644
index 0000000..fa1ff8a
--- /dev/null
+++ b/voice_engine/main/source/voe_external_media_impl.h
@@ -0,0 +1,62 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_IMPL_H
+
+#include "voe_external_media.h"
+
+#include "ref_count.h"
+#include "shared_data.h"
+
+namespace webrtc {
+
+class VoEExternalMediaImpl : public virtual voe::SharedData,
+                             public VoEExternalMedia,
+                             public voe::RefCount
+{
+public:
+    virtual int Release();
+
+    virtual int RegisterExternalMediaProcessing(
+        int channel,
+        ProcessingTypes type,
+        VoEMediaProcess& processObject);
+
+    virtual int DeRegisterExternalMediaProcessing(
+        int channel,
+        ProcessingTypes type);
+
+    virtual int SetExternalRecordingStatus(bool enable);
+
+    virtual int SetExternalPlayoutStatus(bool enable);
+
+    virtual int ExternalRecordingInsertData(
+        const WebRtc_Word16 speechData10ms[],
+        int lengthSamples,
+        int samplingFreqHz,
+        int current_delay_ms);
+
+    virtual int ExternalPlayoutGetData(WebRtc_Word16 speechData10ms[],
+                                       int samplingFreqHz,
+                                       int current_delay_ms,
+                                       int& lengthSamples);
+
+protected:
+    VoEExternalMediaImpl();
+    virtual ~VoEExternalMediaImpl();
+
+private:
+    int playout_delay_ms_;
+};
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_IMPL_H
diff --git a/voice_engine/main/source/voe_file_impl.cc b/voice_engine/main/source/voe_file_impl.cc
new file mode 100644
index 0000000..f782432
--- /dev/null
+++ b/voice_engine/main/source/voe_file_impl.cc
@@ -0,0 +1,1423 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_file_impl.h"
+
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "file_wrapper.h"
+#include "media_file.h"
+#include "output_mixer.h"
+#include "trace.h"
+#include "transmit_mixer.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+namespace webrtc {
+
+VoEFile* VoEFile::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_FILE_API
+    return NULL;
+#else
+    if (NULL == voiceEngine)
+    {
+        return NULL;
+    }
+    VoiceEngineImpl* s =
+        reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
+    VoEFileImpl* d = s;
+    (*d)++;
+    return (d);
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_FILE_API
+
+VoEFileImpl::VoEFileImpl()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoEFileImpl::VoEFileImpl() - ctor");
+}
+
+VoEFileImpl::~VoEFileImpl()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoEFileImpl::~VoEFileImpl() - dtor");
+}
+
+int VoEFileImpl::Release()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoEFile::Release()");
+    (*this)--;
+    int refCount = GetCount();
+    if (refCount < 0)
+    {
+        Reset();
+        _engineStatistics.SetLastError(VE_INTERFACE_NOT_FOUND,
+                                       kTraceWarning);
+        return (-1);
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoEFile reference counter = %d", refCount);
+    return (refCount);
+}
+
+int VoEFileImpl::StartPlayingFileLocally(
+    int channel,
+    const char fileNameUTF8[1024],
+    bool loop, FileFormats format,
+    float volumeScaling,
+    int startPointMs,
+    int stopPointMs)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "StartPlayingFileLocally(channel=%d, fileNameUTF8[]=%s, "
+                 "loop=%d, format=%d, volumeScaling=%5.3f, startPointMs=%d,"
+                 " stopPointMs=%d)",
+                 channel, fileNameUTF8, loop, format, volumeScaling,
+                 startPointMs, stopPointMs);
+    assert(1024 == FileWrapper::kMaxFileNameSize);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "StartPlayingFileLocally() failed to locate channel");
+        return -1;
+    }
+
+    return channelPtr->StartPlayingFileLocally(fileNameUTF8,
+                                               loop,
+                                               format,
+                                               startPointMs,
+                                               volumeScaling,
+                                               stopPointMs,
+                                               NULL);
+}
+
+int VoEFileImpl::StartPlayingFileLocally(int channel,
+                                         InStream* stream,
+                                         FileFormats format,
+                                         float volumeScaling,
+                                         int startPointMs,
+                                         int stopPointMs)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "StartPlayingFileLocally(channel=%d, stream, format=%d, "
+                 "volumeScaling=%5.3f, startPointMs=%d, stopPointMs=%d)",
+                 channel, format, volumeScaling, startPointMs, stopPointMs);
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "StartPlayingFileLocally() failed to locate channel");
+        return -1;
+    }
+
+    return channelPtr->StartPlayingFileLocally(stream,
+                                               format,
+                                               startPointMs,
+                                               volumeScaling,
+                                               stopPointMs,
+                                               NULL);
+}
+
+int VoEFileImpl::StopPlayingFileLocally(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "StopPlayingFileLocally()");
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "StopPlayingFileLocally() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->StopPlayingFileLocally();
+}
+
+int VoEFileImpl::IsPlayingFileLocally(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "IsPlayingFileLocally(channel=%d)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "StopPlayingFileLocally() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->IsPlayingFileLocally();
+}
+
+int VoEFileImpl::ScaleLocalFilePlayout(int channel, float scale)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "ScaleLocalFilePlayout(channel=%d, scale=%5.3f)",
+                 channel, scale);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "StopPlayingFileLocally() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->ScaleLocalFilePlayout(scale);
+}
+
+int VoEFileImpl::StartPlayingFileAsMicrophone(int channel,
+                                              const char fileNameUTF8[1024],
+                                              bool loop,
+                                              bool mixWithMicrophone,
+                                              FileFormats format,
+                                              float volumeScaling)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "StartPlayingFileAsMicrophone(channel=%d, fileNameUTF8=%s, "
+                 "loop=%d, mixWithMicrophone=%d, format=%d, "
+                 "volumeScaling=%5.3f)",
+                 channel, fileNameUTF8, loop, mixWithMicrophone, format,
+                 volumeScaling);
+    assert(1024 == FileWrapper::kMaxFileNameSize);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    const WebRtc_UWord32 startPointMs(0);
+    const WebRtc_UWord32 stopPointMs(0);
+
+    if (channel == -1)
+    {
+        int res = _transmitMixerPtr->StartPlayingFileAsMicrophone(
+            fileNameUTF8,
+            loop,
+            format,
+            startPointMs,
+            volumeScaling,
+            stopPointMs,
+            NULL);
+        if (res)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "StartPlayingFileAsMicrophone() failed to start"
+                         " playing file");
+            return(-1);
+        }
+        else
+        {
+            _transmitMixerPtr->SetMixWithMicStatus(mixWithMicrophone);
+            return(0);
+        }
+    }
+    else
+    {
+        // Add file after demultiplexing <=> affects one channel only
+        voe::ScopedChannel sc(_channelManager, channel);
+        voe::Channel* channelPtr = sc.ChannelPtr();
+        if (channelPtr == NULL)
+        {
+            _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                "StartPlayingFileAsMicrophone() failed to locate channel");
+            return -1;
+        }
+
+        int res = channelPtr->StartPlayingFileAsMicrophone(fileNameUTF8,
+                                                           loop,
+                                                           format,
+                                                           startPointMs,
+                                                           volumeScaling,
+                                                           stopPointMs,
+                                                           NULL);
+        if (res)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "StartPlayingFileAsMicrophone() failed to start "
+                         "playing file");
+            return -1;
+        }
+        else
+        {
+            channelPtr->SetMixWithMicStatus(mixWithMicrophone);
+            return 0;
+        }
+    }
+}
+
+int VoEFileImpl::StartPlayingFileAsMicrophone(int channel,
+                                              InStream* stream,
+                                              bool mixWithMicrophone,
+                                              FileFormats format,
+                                              float volumeScaling)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "StartPlayingFileAsMicrophone(channel=%d, stream,"
+                 " mixWithMicrophone=%d, format=%d, volumeScaling=%5.3f)",
+                 channel, mixWithMicrophone, format, volumeScaling);
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    const WebRtc_UWord32 startPointMs(0);
+    const WebRtc_UWord32 stopPointMs(0);
+
+    if (channel == -1)
+    {
+        int res = _transmitMixerPtr->StartPlayingFileAsMicrophone(
+            stream,
+            format,
+            startPointMs,
+            volumeScaling,
+            stopPointMs,
+            NULL);
+        if (res)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "StartPlayingFileAsMicrophone() failed to start"
+                         " playing stream");
+            return(-1);
+        }
+        else
+        {
+            _transmitMixerPtr->SetMixWithMicStatus(mixWithMicrophone);
+            return(0);
+        }
+    }
+    else
+    {
+        // Add file after demultiplexing <=> affects one channel only
+        voe::ScopedChannel sc(_channelManager, channel);
+        voe::Channel* channelPtr = sc.ChannelPtr();
+        if (channelPtr == NULL)
+        {
+            _engineStatistics.SetLastError(
+                VE_CHANNEL_NOT_VALID, kTraceError,
+                "StartPlayingFileAsMicrophone() failed to locate channel");
+            return -1;
+        }
+
+        int res = channelPtr->StartPlayingFileAsMicrophone(
+            stream, format, startPointMs, volumeScaling, stopPointMs, NULL);
+        if (res)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "StartPlayingFileAsMicrophone() failed to start"
+                         " playing stream");
+            return -1;
+        }
+        else
+        {
+            channelPtr->SetMixWithMicStatus(mixWithMicrophone);
+            return 0;
+        }
+    }
+}
+
+int VoEFileImpl::StopPlayingFileAsMicrophone(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "StopPlayingFileAsMicrophone(channel=%d)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (channel == -1)
+    {
+        // Stop adding file before demultiplexing <=> affects all channels
+        return _transmitMixerPtr->StopPlayingFileAsMicrophone();
+    }
+    else
+    {
+        // Stop adding file after demultiplexing <=> affects one channel only
+        voe::ScopedChannel sc(_channelManager, channel);
+        voe::Channel* channelPtr = sc.ChannelPtr();
+        if (channelPtr == NULL)
+        {
+            _engineStatistics.SetLastError(
+                VE_CHANNEL_NOT_VALID, kTraceError,
+                "StopPlayingFileAsMicrophone() failed to locate channel");
+            return -1;
+        }
+        return channelPtr->StopPlayingFileAsMicrophone();
+    }
+}
+
+int VoEFileImpl::IsPlayingFileAsMicrophone(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "IsPlayingFileAsMicrophone(channel=%d)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (channel == -1)
+    {
+        return _transmitMixerPtr->IsPlayingFileAsMicrophone();
+    }
+    else
+    {
+        // Stop adding file after demultiplexing <=> affects one channel only
+        voe::ScopedChannel sc(_channelManager, channel);
+        voe::Channel* channelPtr = sc.ChannelPtr();
+        if (channelPtr == NULL)
+        {
+            _engineStatistics.SetLastError(
+                VE_CHANNEL_NOT_VALID, kTraceError,
+                "IsPlayingFileAsMicrophone() failed to locate channel");
+            return -1;
+        }
+        return channelPtr->IsPlayingFileAsMicrophone();
+    }
+}
+
+int VoEFileImpl::ScaleFileAsMicrophonePlayout(int channel, float scale)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "ScaleFileAsMicrophonePlayout(channel=%d, scale=%5.3f)",
+                 channel, scale);
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (channel == -1)
+    {
+        return _transmitMixerPtr->ScaleFileAsMicrophonePlayout(scale);
+    }
+    else
+    {
+        // Stop adding file after demultiplexing <=> affects one channel only
+        voe::ScopedChannel sc(_channelManager, channel);
+        voe::Channel* channelPtr = sc.ChannelPtr();
+        if (channelPtr == NULL)
+        {
+            _engineStatistics.SetLastError(
+                VE_CHANNEL_NOT_VALID, kTraceError,
+                "IsPlayingFileAsMicrophone() failed to locate channel");
+            return -1;
+        }
+        return channelPtr->ScaleFileAsMicrophonePlayout(scale);
+    }
+}
+
+int VoEFileImpl::StartRecordingPlayout(
+    int channel, const char* fileNameUTF8, CodecInst* compression,
+    int maxSizeBytes)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "StartRecordingPlayout(channel=%d, fileNameUTF8=%s, "
+                 "compression, maxSizeBytes=%d)",
+                 channel, fileNameUTF8, maxSizeBytes);
+    assert(1024 == FileWrapper::kMaxFileNameSize);
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (channel == -1)
+    {
+        _outputMixerPtr->StartRecordingPlayout(fileNameUTF8, compression);
+        return 0;
+    }
+    else
+    {
+        // Add file after demultiplexing <=> affects one channel only
+        voe::ScopedChannel sc(_channelManager, channel);
+        voe::Channel* channelPtr = sc.ChannelPtr();
+        if (channelPtr == NULL)
+        {
+            _engineStatistics.SetLastError(
+                VE_CHANNEL_NOT_VALID, kTraceError,
+                "StartRecordingPlayout() failed to locate channel");
+            return -1;
+        }
+        return channelPtr->StartRecordingPlayout(fileNameUTF8, compression);
+    }
+}
+
+int VoEFileImpl::StartRecordingPlayout(
+    int channel, OutStream* stream, CodecInst* compression)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "StartRecordingPlayout(channel=%d, stream, compression)",
+                 channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (channel == -1)
+    {
+        return _outputMixerPtr->StartRecordingPlayout(stream, compression);
+    }
+    else
+    {
+        voe::ScopedChannel sc(_channelManager, channel);
+        voe::Channel* channelPtr = sc.ChannelPtr();
+        if (channelPtr == NULL)
+        {
+            _engineStatistics.SetLastError(
+                VE_CHANNEL_NOT_VALID, kTraceError,
+                "StartRecordingPlayout() failed to locate channel");
+            return -1;
+        }
+        return channelPtr->StartRecordingPlayout(stream, compression);
+    }
+}
+
+int VoEFileImpl::StopRecordingPlayout(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "StopRecordingPlayout(channel=%d)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (channel == -1)
+    {
+        return _outputMixerPtr->StopRecordingPlayout();
+    }
+    else
+    {
+        voe::ScopedChannel sc(_channelManager, channel);
+        voe::Channel* channelPtr = sc.ChannelPtr();
+        if (channelPtr == NULL)
+        {
+            _engineStatistics.SetLastError(
+                VE_CHANNEL_NOT_VALID, kTraceError,
+                "StopRecordingPlayout() failed to locate channel");
+            return -1;
+        }
+        return channelPtr->StopRecordingPlayout();
+    }
+}
+
+int VoEFileImpl::StartRecordingMicrophone(
+    const char* fileNameUTF8, CodecInst* compression, int maxSizeBytes)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "StartRecordingMicrophone(fileNameUTF8=%s, compression, "
+                 "maxSizeBytes=%d)", fileNameUTF8, maxSizeBytes);
+    assert(1024 == FileWrapper::kMaxFileNameSize);
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (_transmitMixerPtr->StartRecordingMicrophone(fileNameUTF8, compression))
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                     "StartRecordingMicrophone() failed to start recording");
+        return -1;
+    }
+    if (_audioDevicePtr->Recording())
+    {
+        return 0;
+    }
+    if (!_externalRecording)
+    {
+        if (_audioDevicePtr->InitRecording() != 0)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "StartRecordingMicrophone() failed to initialize"
+                         " recording");
+            return -1;
+        }
+        if (_audioDevicePtr->StartRecording() != 0)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                "StartRecordingMicrophone() failed to start recording");
+            return -1;
+        }
+    }
+    return 0;
+}
+
+int VoEFileImpl::StartRecordingMicrophone(
+    OutStream* stream, CodecInst* compression)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "StartRecordingMicrophone(stream, compression)");
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (_transmitMixerPtr->StartRecordingMicrophone(stream, compression) == -1)
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                     "StartRecordingMicrophone() failed to start recording");
+        return -1;
+    }
+    if (_audioDevicePtr->Recording())
+    {
+        return 0;
+    }
+    if (!_externalRecording)
+    {
+        if (_audioDevicePtr->InitRecording() != 0)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "StartRecordingMicrophone() failed to initialize "
+                         "recording");
+            return -1;
+        }
+        if (_audioDevicePtr->StartRecording() != 0)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "StartRecordingMicrophone() failed to start"
+                         " recording");
+            return -1;
+        }
+    }
+    return 0;
+}
+
+int VoEFileImpl::StopRecordingMicrophone()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "StopRecordingMicrophone()");
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if ((NumOfSendingChannels() == 0)&&!_transmitMixerPtr->IsRecordingMic())
+    {
+        // Stop audio-device recording if no channel is recording
+        if (_audioDevicePtr->StopRecording() != 0)
+        {
+            _engineStatistics.SetLastError(
+                VE_CANNOT_STOP_RECORDING, kTraceError,
+                "StopRecordingMicrophone() failed to stop recording");
+            return -1;
+        }
+    }
+    return _transmitMixerPtr->StopRecordingMicrophone();
+}
+
+int VoEFileImpl::ConvertPCMToWAV(const char* fileNameInUTF8,
+                                 const char* fileNameOutUTF8)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "ConvertPCMToWAV(fileNameInUTF8=%s, fileNameOutUTF8=%s)",
+                 fileNameInUTF8, fileNameOutUTF8);
+
+    // Create file player object
+    FilePlayer& playerObj(*FilePlayer::CreateFilePlayer(
+        -1,
+        kFileFormatPcm16kHzFile));
+
+    int res=playerObj.StartPlayingFile(fileNameInUTF8,false,0,1.0,0,0, NULL);
+    if (res)
+    {
+        _engineStatistics.SetLastError(
+            VE_BAD_FILE, kTraceError,
+            "ConvertPCMToWAV failed to create player object");
+        playerObj.StopPlayingFile();
+        FilePlayer::DestroyFilePlayer(&playerObj);
+        return -1;
+    }
+
+    // Create file recorder object
+    FileRecorder& recObj(*FileRecorder::CreateFileRecorder(
+        -1, kFileFormatWavFile));
+
+    CodecInst codecInst;
+    strncpy(codecInst.plname,"L16",32);
+            codecInst.channels = 1;
+            codecInst.rate     = 256000;
+            codecInst.plfreq   = 16000;
+            codecInst.pltype   = 94;
+            codecInst.pacsize  = 160;
+
+    res = recObj.StartRecordingAudioFile(fileNameOutUTF8,codecInst,0);
+    if (res)
+    {
+        _engineStatistics.SetLastError(
+            VE_BAD_FILE, kTraceError,
+            "ConvertPCMToWAV failed to create recorder object");
+        playerObj.StopPlayingFile();
+        FilePlayer::DestroyFilePlayer(&playerObj);
+        recObj.StopRecording();
+        FileRecorder::DestroyFileRecorder(&recObj);
+        return -1;
+    }
+
+    // Run throught the file
+    AudioFrame audioFrame;
+    WebRtc_Word16 decodedData[160];
+    WebRtc_UWord32 decLength=0;
+    const WebRtc_UWord32 frequency = 16000;
+
+    while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
+    {
+        if(decLength!=frequency/100)
+        {
+            // This is an OK way to end
+            break;
+        }
+
+        res=audioFrame.UpdateFrame(-1, 0, decodedData,
+                                  (WebRtc_UWord16)decLength,
+                                   frequency, AudioFrame::kNormalSpeech,
+                                   AudioFrame::kVadActive);
+        if(res)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "ConvertPCMToWAV failed during conversion "
+                         "(audio frame)");
+            break;
+        }
+
+        res=recObj.RecordAudioToFile(audioFrame);
+        if(res)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "ConvertPCMToWAV failed during converstion "
+                         "(write frame)");
+        }
+    }
+
+    playerObj.StopPlayingFile();
+    recObj.StopRecording();
+    FilePlayer::DestroyFilePlayer(&playerObj);
+    FileRecorder::DestroyFileRecorder(&recObj);
+
+    return res;
+}
+
+int VoEFileImpl::ConvertPCMToWAV(InStream* streamIn, OutStream* streamOut)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "ConvertPCMToWAV(streamIn, streamOut)");
+
+    if ((streamIn == NULL) || (streamOut == NULL))
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+            "invalid stream handles");
+        return (-1);
+    }
+
+    // Create file player object
+    FilePlayer& playerObj(*FilePlayer::CreateFilePlayer(-1,
+        kFileFormatPcm16kHzFile));
+    int res = playerObj.StartPlayingFile(*streamIn,0,1.0,0,0,NULL);
+    if (res)
+    {
+        _engineStatistics.SetLastError(VE_BAD_FILE, kTraceError,
+            "ConvertPCMToWAV failed to create player object");
+        playerObj.StopPlayingFile();
+        FilePlayer::DestroyFilePlayer(&playerObj);
+        return -1;
+    }
+
+    // Create file recorder object
+    FileRecorder& recObj(*FileRecorder::CreateFileRecorder(-1,
+        kFileFormatWavFile));
+    CodecInst codecInst;
+    strncpy(codecInst.plname, "L16", 32);
+            codecInst.channels = 1;
+            codecInst.rate     = 256000;
+            codecInst.plfreq   = 16000;
+            codecInst.pltype   = 94;
+            codecInst.pacsize  = 160;
+    res = recObj.StartRecordingAudioFile(*streamOut,codecInst,0);
+    if (res)
+    {
+        _engineStatistics.SetLastError(VE_BAD_FILE, kTraceError,
+            "ConvertPCMToWAV failed to create recorder object");
+        playerObj.StopPlayingFile();
+        FilePlayer::DestroyFilePlayer(&playerObj);
+        recObj.StopRecording();
+        FileRecorder::DestroyFileRecorder(&recObj);
+        return -1;
+    }
+
+    // Run throught the file
+    AudioFrame audioFrame;
+    WebRtc_Word16 decodedData[160];
+    WebRtc_UWord32 decLength=0;
+    const WebRtc_UWord32 frequency = 16000;
+
+    while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
+    {
+        if(decLength!=frequency/100)
+        {
+            // This is an OK way to end
+            break;
+        }
+
+        res=audioFrame.UpdateFrame(-1, 0, decodedData,
+                                  (WebRtc_UWord16)decLength, frequency,
+                                   AudioFrame::kNormalSpeech,
+                                   AudioFrame::kVadActive);
+        if(res)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "ConvertPCMToWAV failed during conversion "
+                         "(create audio frame)");
+            break;
+        }
+
+        res=recObj.RecordAudioToFile(audioFrame);
+        if(res)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "ConvertPCMToWAV failed during converstion "
+                         "(write frame)");
+        }
+    }
+
+    playerObj.StopPlayingFile();
+    recObj.StopRecording();
+    FilePlayer::DestroyFilePlayer(&playerObj);
+    FileRecorder::DestroyFileRecorder(&recObj);
+
+    return res;
+}
+
+int VoEFileImpl::ConvertWAVToPCM(const char* fileNameInUTF8,
+                                 const char* fileNameOutUTF8)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "ConvertWAVToPCM(fileNameInUTF8=%s, fileNameOutUTF8=%s)",
+                 fileNameInUTF8, fileNameOutUTF8);
+
+    // Create file player object
+    FilePlayer& playerObj(*FilePlayer::CreateFilePlayer(-1,
+                                                        kFileFormatWavFile));
+    int res = playerObj.StartPlayingFile(fileNameInUTF8,false,0,1.0,0,0,NULL);
+    if (res)
+    {
+        _engineStatistics.SetLastError(
+            VE_BAD_FILE, kTraceError,
+            "ConvertWAVToPCM failed to create player object");
+        playerObj.StopPlayingFile();
+        FilePlayer::DestroyFilePlayer(&playerObj);
+        return -1;
+    }
+
+    // Create file recorder object
+    FileRecorder& recObj(*FileRecorder::CreateFileRecorder(
+        -1, kFileFormatPcm16kHzFile));
+
+    CodecInst codecInst;
+    strncpy(codecInst.plname,"L16",32);
+            codecInst.channels = 1;
+            codecInst.rate     = 256000;
+            codecInst.plfreq   = 16000;
+            codecInst.pltype   = 94;
+            codecInst.pacsize  = 160;
+
+    res = recObj.StartRecordingAudioFile(fileNameOutUTF8,codecInst,0);
+    if (res)
+    {
+        _engineStatistics.SetLastError(
+            VE_BAD_FILE, kTraceError,
+            "ConvertWAVToPCM failed to create recorder object");
+        playerObj.StopPlayingFile();
+        FilePlayer::DestroyFilePlayer(&playerObj);
+        recObj.StopRecording();
+        FileRecorder::DestroyFileRecorder(&recObj);
+        return -1;
+    }
+
+    // Run throught the file
+    AudioFrame audioFrame;
+    WebRtc_Word16 decodedData[160];
+    WebRtc_UWord32 decLength=0;
+    const WebRtc_UWord32 frequency = 16000;
+
+    while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
+    {
+        if(decLength!=frequency/100)
+        {
+            // This is an OK way to end
+            break;
+        }
+
+        res=audioFrame.UpdateFrame(-1, 0, decodedData,
+                                   (WebRtc_UWord16)decLength,
+                                   frequency, AudioFrame::kNormalSpeech,
+                                   AudioFrame::kVadActive);
+        if(res)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "ConvertWAVToPCM failed during conversion "
+                         "(audio frame)");
+            break;
+        }
+
+        res=recObj.RecordAudioToFile(audioFrame);
+        if(res)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "ConvertWAVToPCM failed during converstion "
+                         "(write frame)");
+        }
+    }
+
+    playerObj.StopPlayingFile();
+    recObj.StopRecording();
+    FilePlayer::DestroyFilePlayer(&playerObj);
+    FileRecorder::DestroyFileRecorder(&recObj);
+
+    return res;
+}
+
+int VoEFileImpl::ConvertWAVToPCM(InStream* streamIn, OutStream* streamOut)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "ConvertWAVToPCM(streamIn, streamOut)");
+
+    if ((streamIn == NULL) || (streamOut == NULL))
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                     "invalid stream handles");
+        return (-1);
+    }
+
+    // Create file player object
+    FilePlayer& playerObj(*FilePlayer::CreateFilePlayer(-1,
+                                                        kFileFormatWavFile));
+    int res = playerObj.StartPlayingFile(*streamIn,0,1.0,0,0,NULL);
+    if (res)
+    {
+        _engineStatistics.SetLastError(VE_BAD_FILE, kTraceError,
+            "ConvertWAVToPCM failed to create player object");
+        playerObj.StopPlayingFile();
+        FilePlayer::DestroyFilePlayer(&playerObj);
+        return -1;
+    }
+
+    // Create file recorder object
+    FileRecorder& recObj(*FileRecorder::CreateFileRecorder(
+        -1, kFileFormatPcm16kHzFile));
+
+    CodecInst codecInst;
+    strncpy(codecInst.plname,"L16",32);
+            codecInst.channels = 1;
+            codecInst.rate     = 256000;
+            codecInst.plfreq   = 16000;
+            codecInst.pltype   = 94;
+            codecInst.pacsize  = 160;
+
+    res = recObj.StartRecordingAudioFile(*streamOut,codecInst,0);
+    if (res)
+    {
+        _engineStatistics.SetLastError(VE_BAD_FILE, kTraceError,
+            "ConvertWAVToPCM failed to create recorder object");
+        playerObj.StopPlayingFile();
+        FilePlayer::DestroyFilePlayer(&playerObj);
+        recObj.StopRecording();
+        FileRecorder::DestroyFileRecorder(&recObj);
+        return -1;
+    }
+
+    // Run throught the file
+    AudioFrame audioFrame;
+    WebRtc_Word16 decodedData[160];
+    WebRtc_UWord32 decLength=0;
+    const WebRtc_UWord32 frequency = 16000;
+
+    while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
+    {
+        if(decLength!=frequency/100)
+        {
+            // This is an OK way to end
+            break;
+        }
+
+        res=audioFrame.UpdateFrame(-1, 0, decodedData,
+                                  (WebRtc_UWord16)decLength, frequency,
+                                   AudioFrame::kNormalSpeech,
+                                   AudioFrame::kVadActive);
+        if(res)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "ConvertWAVToPCM failed during conversion "
+                         "(audio frame)");
+            break;
+        }
+
+        res=recObj.RecordAudioToFile(audioFrame);
+        if(res)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "ConvertWAVToPCM failed during converstion"
+                         " (write frame)");
+        }
+    }
+
+    playerObj.StopPlayingFile();
+    recObj.StopRecording();
+    FilePlayer::DestroyFilePlayer(&playerObj);
+    FileRecorder::DestroyFileRecorder(&recObj);
+
+    return res;
+}
+
+int VoEFileImpl::ConvertPCMToCompressed(const char* fileNameInUTF8,
+                                        const char* fileNameOutUTF8,
+                                        CodecInst* compression)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "ConvertPCMToCompressed(fileNameInUTF8=%s, fileNameOutUTF8=%s"
+                 ",  compression)", fileNameInUTF8, fileNameOutUTF8);
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "  compression: plname=%s, plfreq=%d, pacsize=%d",
+                 compression->plname, compression->plfreq,
+                 compression->pacsize);
+
+    // Create file player object
+    FilePlayer& playerObj(*FilePlayer::CreateFilePlayer(
+        -1,
+        kFileFormatPcm16kHzFile));
+    int res = playerObj.StartPlayingFile(fileNameInUTF8,false,0,1.0,0,0, NULL);
+    if (res)
+    {
+        _engineStatistics.SetLastError(VE_BAD_FILE, kTraceError,
+            "ConvertPCMToCompressed failed to create player object");
+        // Clean up and shutdown the file player
+        playerObj.StopPlayingFile();
+        FilePlayer::DestroyFilePlayer(&playerObj);
+        return -1;
+    }
+
+    // Create file recorder object
+    FileRecorder& recObj(*FileRecorder::CreateFileRecorder(
+        -1,
+        kFileFormatCompressedFile));
+    res = recObj.StartRecordingAudioFile(fileNameOutUTF8, *compression,0);
+    if (res)
+    {
+        _engineStatistics.SetLastError(VE_BAD_FILE, kTraceError,
+            "ConvertPCMToCompressed failed to create recorder object");
+        playerObj.StopPlayingFile();
+        FilePlayer::DestroyFilePlayer(&playerObj);
+        recObj.StopRecording();
+        FileRecorder::DestroyFileRecorder(&recObj);
+        return -1;
+    }
+
+    // Run throught the file
+    AudioFrame audioFrame;
+    WebRtc_Word16 decodedData[160];
+    WebRtc_UWord32 decLength=0;
+    const WebRtc_UWord32 frequency = 16000;
+
+    while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
+    {
+        if(decLength!=frequency/100)
+        {
+            // This is an OK way to end
+            break;
+        }
+        res=audioFrame.UpdateFrame(-1, 0, decodedData,
+                                  (WebRtc_UWord16)decLength,
+                                  frequency, AudioFrame::kNormalSpeech,
+                                  AudioFrame::kVadActive);
+        if(res)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "ConvertPCMToCompressed failed during conversion "
+                         "(audio frame)");
+            break;
+        }
+
+        res=recObj.RecordAudioToFile(audioFrame);
+        if(res)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "ConvertPCMToCompressed failed during converstion "
+                         "(write frame)");
+        }
+    }
+
+    playerObj.StopPlayingFile();
+    recObj.StopRecording();
+    FilePlayer::DestroyFilePlayer(&playerObj);
+    FileRecorder::DestroyFileRecorder(&recObj);
+
+    return res;
+}
+
+int VoEFileImpl::ConvertPCMToCompressed(InStream* streamIn,
+                                        OutStream* streamOut,
+                                        CodecInst* compression)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "ConvertPCMToCompressed(streamIn, streamOut, compression)");
+
+    if ((streamIn == NULL) || (streamOut == NULL))
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                     "invalid stream handles");
+        return (-1);
+    }
+
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "  compression: plname=%s, plfreq=%d, pacsize=%d",
+                 compression->plname, compression->plfreq,
+                 compression->pacsize);
+
+    // Create file player object
+    FilePlayer& playerObj(*FilePlayer::CreateFilePlayer(
+        -1, kFileFormatPcm16kHzFile));
+
+    int res = playerObj.StartPlayingFile(*streamIn,0,1.0,0,0,NULL);
+    if (res)
+    {
+        _engineStatistics.SetLastError(VE_BAD_FILE, kTraceError,
+            "ConvertPCMToCompressed failed to create player object");
+        playerObj.StopPlayingFile();
+        FilePlayer::DestroyFilePlayer(&playerObj);
+        return -1;
+    }
+
+    // Create file recorder object
+    FileRecorder& recObj(*FileRecorder::CreateFileRecorder(
+        -1, kFileFormatCompressedFile));
+    res = recObj.StartRecordingAudioFile(*streamOut,*compression,0);
+    if (res)
+    {
+        _engineStatistics.SetLastError(VE_BAD_FILE, kTraceError,
+            "ConvertPCMToCompressed failed to create recorder object");
+        playerObj.StopPlayingFile();
+        FilePlayer::DestroyFilePlayer(&playerObj);
+        recObj.StopRecording();
+        FileRecorder::DestroyFileRecorder(&recObj);
+        return -1;
+    }
+
+    // Run throught the file
+    AudioFrame audioFrame;
+    WebRtc_Word16 decodedData[160];
+    WebRtc_UWord32 decLength=0;
+    const WebRtc_UWord32 frequency = 16000;
+
+    while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
+    {
+        if(decLength!=frequency/100)
+        {
+            // This is an OK way to end
+            break;
+        }
+        res=audioFrame.UpdateFrame(-1, 0, decodedData,
+                                  (WebRtc_UWord16)decLength,
+                                   frequency, AudioFrame::kNormalSpeech,
+                                   AudioFrame::kVadActive);
+        if(res)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "ConvertPCMToCompressed failed during conversion"
+                         " (audio frame)");
+            break;
+        }
+
+        res=recObj.RecordAudioToFile(audioFrame);
+        if(res)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "ConvertPCMToCompressed failed during converstion "
+                         "(write frame)");
+        }
+    }
+
+    playerObj.StopPlayingFile();
+    recObj.StopRecording();
+    FilePlayer::DestroyFilePlayer(&playerObj);
+    FileRecorder::DestroyFileRecorder(&recObj);
+
+    return res;
+}
+
+int VoEFileImpl::ConvertCompressedToPCM(const char* fileNameInUTF8,
+                                        const char* fileNameOutUTF8)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "ConvertCompressedToPCM(fileNameInUTF8=%s,"
+                 " fileNameOutUTF8=%s)",
+                 fileNameInUTF8, fileNameOutUTF8);
+
+    // Create file player object
+    FilePlayer& playerObj(*FilePlayer::CreateFilePlayer(
+        -1, kFileFormatCompressedFile));
+
+    int res = playerObj.StartPlayingFile(fileNameInUTF8,false,0,1.0,0,0,NULL);
+    if (res)
+    {
+        _engineStatistics.SetLastError(VE_BAD_FILE, kTraceError,
+            "ConvertCompressedToPCM failed to create player object");
+        playerObj.StopPlayingFile();
+        FilePlayer::DestroyFilePlayer(&playerObj);
+        return -1;
+    }
+
+    // Create file recorder object
+    FileRecorder& recObj(*FileRecorder::CreateFileRecorder(
+        -1, kFileFormatPcm16kHzFile));
+
+    CodecInst codecInst;
+    strncpy(codecInst.plname,"L16",32);
+            codecInst.channels = 1;
+            codecInst.rate     = 256000;
+            codecInst.plfreq   = 16000;
+            codecInst.pltype   = 94;
+            codecInst.pacsize  = 160;
+
+    res = recObj.StartRecordingAudioFile(fileNameOutUTF8,codecInst,0);
+    if (res)
+    {
+        _engineStatistics.SetLastError(VE_BAD_FILE, kTraceError,
+            "ConvertCompressedToPCM failed to create recorder object");
+        playerObj.StopPlayingFile();
+        FilePlayer::DestroyFilePlayer(&playerObj);
+        recObj.StopRecording();
+        FileRecorder::DestroyFileRecorder(&recObj);
+        return -1;
+    }
+
+    // Run throught the file
+    AudioFrame audioFrame;
+    WebRtc_Word16 decodedData[160];
+    WebRtc_UWord32 decLength=0;
+    const WebRtc_UWord32 frequency = 16000;
+
+    while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
+    {
+        if(decLength!=frequency/100)
+        {
+            // This is an OK way to end
+            break;
+        }
+        res=audioFrame.UpdateFrame(-1, 0, decodedData,
+                                  (WebRtc_UWord16)decLength,
+                                   frequency,
+                                   AudioFrame::kNormalSpeech,
+                                   AudioFrame::kVadActive);
+        if(res)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "ConvertCompressedToPCM failed during conversion "
+                         "(create audio frame)");
+            break;
+        }
+
+        res=recObj.RecordAudioToFile(audioFrame);
+        if(res)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "ConvertCompressedToPCM failed during converstion "
+                         "(write frame)");
+        }
+    }
+
+    playerObj.StopPlayingFile();
+    recObj.StopRecording();
+    FilePlayer::DestroyFilePlayer(&playerObj);
+    FileRecorder::DestroyFileRecorder(&recObj);
+
+    return res;
+}
+
+int VoEFileImpl::ConvertCompressedToPCM(InStream* streamIn,
+                                        OutStream* streamOut)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "ConvertCompressedToPCM(file, file);");
+
+    if ((streamIn == NULL) || (streamOut == NULL))
+    {
+        WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+            "invalid stream handles");
+        return (-1);
+    }
+
+    // Create file player object
+    FilePlayer& playerObj(*FilePlayer::CreateFilePlayer(
+        -1, kFileFormatCompressedFile));
+    int res;
+
+    res = playerObj.StartPlayingFile(*streamIn,0,1.0,0,0,NULL);
+    if (res)
+    {
+        _engineStatistics.SetLastError(VE_BAD_FILE, kTraceError,
+            "ConvertCompressedToPCM failed to create player object");
+        playerObj.StopPlayingFile();
+        FilePlayer::DestroyFilePlayer(&playerObj);
+        return -1;
+    }
+
+    // Create file recorder object
+    FileRecorder& recObj(*FileRecorder::CreateFileRecorder(
+        -1, kFileFormatPcm16kHzFile));
+
+    CodecInst codecInst;
+    strncpy(codecInst.plname,"L16",32);
+            codecInst.channels = 1;
+            codecInst.rate     = 256000;
+            codecInst.plfreq   = 16000;
+            codecInst.pltype   = 94;
+            codecInst.pacsize  = 160;
+
+    res = recObj.StartRecordingAudioFile(*streamOut,codecInst,0);
+    if (res)
+    {
+        _engineStatistics.SetLastError(VE_BAD_FILE, kTraceError,
+            "ConvertCompressedToPCM failed to create recorder object");
+        playerObj.StopPlayingFile();
+        FilePlayer::DestroyFilePlayer(&playerObj);
+        recObj.StopRecording();
+        FileRecorder::DestroyFileRecorder(&recObj);
+        return -1;
+    }
+
+    // Run throught the file
+    AudioFrame audioFrame;
+    WebRtc_Word16 decodedData[160];
+    WebRtc_UWord32 decLength=0;
+    const WebRtc_UWord32 frequency = 16000;
+
+    while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
+    {
+        if(decLength!=frequency/100)
+        {
+            // This is an OK way to end
+            break;
+        }
+        res=audioFrame.UpdateFrame(-1, 0, decodedData,
+                                  (WebRtc_UWord16)decLength,
+                                   frequency,
+                                   AudioFrame::kNormalSpeech,
+                                   AudioFrame::kVadActive);
+        if(res)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "ConvertCompressedToPCM failed during conversion"
+                         " (audio frame)");
+            break;
+        }
+
+        res=recObj.RecordAudioToFile(audioFrame);
+        if(res)
+        {
+            WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+                         "ConvertCompressedToPCM failed during converstion"
+                         " (write frame)");
+        }
+    }
+
+    playerObj.StopPlayingFile();
+    recObj.StopRecording();
+    FilePlayer::DestroyFilePlayer(&playerObj);
+    FileRecorder::DestroyFileRecorder(&recObj);
+
+    return res;
+}
+
+
+int VoEFileImpl::GetFileDuration(const char* fileNameUTF8,
+                                 int& durationMs,
+                                 FileFormats format)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetFileDuration(fileNameUTF8=%s, format=%d)",
+                 fileNameUTF8, format);
+
+    // Create a dummy file module for this
+    MediaFile * fileModule=MediaFile::CreateMediaFile(-1);
+
+    // Temp container of the right format
+    WebRtc_UWord32 duration;
+    int res=fileModule->FileDurationMs(fileNameUTF8,duration,format);
+    if (res)
+    {
+        _engineStatistics.SetLastError(VE_BAD_FILE, kTraceError,
+            "GetFileDuration() failed measure file duration");
+        return -1;
+    }
+    durationMs = duration;
+    MediaFile::DestroyMediaFile(fileModule);
+    fileModule = NULL;
+
+    return(res);
+}
+
+int VoEFileImpl::GetPlaybackPosition(int channel, int& positionMs)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetPlaybackPosition(channel=%d)", channel);
+
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetPlaybackPosition() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetLocalPlayoutPosition(positionMs);
+}
+
+}  // namespace webrtc
+
+#endif  // #ifdef WEBRTC_VOICE_ENGINE_FILE_API
diff --git a/voice_engine/main/source/voe_file_impl.h b/voice_engine/main/source/voe_file_impl.h
new file mode 100644
index 0000000..2d93876
--- /dev/null
+++ b/voice_engine/main/source/voe_file_impl.h
@@ -0,0 +1,143 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_FILE_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_FILE_IMPL_H
+
+#include "voe_file.h"
+#include "shared_data.h"
+#include "ref_count.h"
+
+namespace webrtc {
+
+class VoEFileImpl : public virtual voe::SharedData,
+                    public VoEFile, public voe::RefCount
+{
+public:
+    virtual int Release();
+
+    // Playout file locally
+
+    virtual int StartPlayingFileLocally(
+        int channel,
+        const char fileNameUTF8[1024],
+        bool loop = false,
+        FileFormats format = kFileFormatPcm16kHzFile,
+        float volumeScaling = 1.0,
+        int startPointMs = 0,
+        int stopPointMs = 0);
+
+    virtual int StartPlayingFileLocally(
+        int channel,
+        InStream* stream,
+        FileFormats format = kFileFormatPcm16kHzFile,
+        float volumeScaling = 1.0,
+        int startPointMs = 0, int stopPointMs = 0);
+
+    virtual int StopPlayingFileLocally(int channel);
+
+    virtual int IsPlayingFileLocally(int channel);
+
+    virtual int ScaleLocalFilePlayout(int channel, float scale);
+
+    // Use file as microphone input
+
+    virtual int StartPlayingFileAsMicrophone(
+        int channel,
+        const char fileNameUTF8[1024],
+        bool loop = false ,
+        bool mixWithMicrophone = false,
+        FileFormats format = kFileFormatPcm16kHzFile,
+        float volumeScaling = 1.0);
+
+    virtual int StartPlayingFileAsMicrophone(
+        int channel,
+        InStream* stream,
+        bool mixWithMicrophone = false,
+        FileFormats format = kFileFormatPcm16kHzFile,
+        float volumeScaling = 1.0);
+
+    virtual int StopPlayingFileAsMicrophone(int channel);
+
+    virtual int IsPlayingFileAsMicrophone(int channel);
+
+    virtual int ScaleFileAsMicrophonePlayout(int channel, float scale);
+
+    // Record speaker signal to file
+
+    virtual int StartRecordingPlayout(int channel,
+                                      const char* fileNameUTF8,
+                                      CodecInst* compression = NULL,
+                                      int maxSizeBytes = -1);
+
+    virtual int StartRecordingPlayout(int channel,
+                                      OutStream* stream,
+                                      CodecInst* compression = NULL);
+
+    virtual int StopRecordingPlayout(int channel);
+
+    // Record microphone signal to file
+
+    virtual int StartRecordingMicrophone(const char* fileNameUTF8,
+                                         CodecInst* compression = NULL,
+                                         int maxSizeBytes = -1);
+
+    virtual int StartRecordingMicrophone(OutStream* stream,
+                                         CodecInst* compression = NULL);
+
+    virtual int StopRecordingMicrophone();
+
+    // Conversion between different file formats
+
+    virtual int ConvertPCMToWAV(const char* fileNameInUTF8,
+                                const char* fileNameOutUTF8);
+
+    virtual int ConvertPCMToWAV(InStream* streamIn,
+                                OutStream* streamOut);
+
+    virtual int ConvertWAVToPCM(const char* fileNameInUTF8,
+                                const char* fileNameOutUTF8);
+
+    virtual int ConvertWAVToPCM(InStream* streamIn,
+                                OutStream* streamOut);
+
+    virtual int ConvertPCMToCompressed(const char* fileNameInUTF8,
+                                       const char* fileNameOutUTF8,
+                                       CodecInst* compression);
+
+    virtual int ConvertPCMToCompressed(InStream* streamIn,
+                                       OutStream* streamOut,
+                                       CodecInst* compression);
+
+    virtual int ConvertCompressedToPCM(const char* fileNameInUTF8,
+                                       const char* fileNameOutUTF8);
+
+    virtual int ConvertCompressedToPCM(InStream* streamIn,
+                                       OutStream* streamOut);
+
+    // Misc file functions
+
+    virtual int GetFileDuration(
+        const char* fileNameUTF8,
+        int& durationMs,
+        FileFormats format = kFileFormatPcm16kHzFile);
+
+    virtual int GetPlaybackPosition(int channel, int& positionMs);
+
+protected:
+    VoEFileImpl();
+    virtual ~VoEFileImpl();
+
+};
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_VOICE_ENGINE_VOE_FILE_IMPL_H
+
diff --git a/voice_engine/main/source/voe_hardware_impl.cc b/voice_engine/main/source/voe_hardware_impl.cc
new file mode 100644
index 0000000..1966961
--- /dev/null
+++ b/voice_engine/main/source/voe_hardware_impl.cc
@@ -0,0 +1,820 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_hardware_impl.h"
+
+#include <cassert>
+
+#include "cpu_wrapper.h"
+#include "critical_section_wrapper.h"
+#include "trace.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+namespace webrtc
+{
+
+VoEHardware* VoEHardware::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_HARDWARE_API
+    return NULL;
+#else
+    if (NULL == voiceEngine)
+    {
+        return NULL;
+    }
+    VoiceEngineImpl* s =
+            reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
+    VoEHardwareImpl* d = s;
+    (*d)++;
+    return (d);
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_HARDWARE_API
+
+VoEHardwareImpl::VoEHardwareImpl() :
+    _cpu(NULL)
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
+                 "VoEHardwareImpl() - ctor");
+
+    _cpu = CpuWrapper::CreateCpu();
+    if (_cpu)
+    {
+        _cpu->CpuUsage(); // init cpu usage
+    }
+}
+
+VoEHardwareImpl::~VoEHardwareImpl()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
+                 "~VoEHardwareImpl() - dtor");
+
+    if (_cpu)
+    {
+        delete _cpu;
+        _cpu = NULL;
+    }
+}
+
+int VoEHardwareImpl::Release()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "VoEHardwareImpl::Release()");
+    (*this)--;
+    int refCount = GetCount();
+    if (refCount < 0)
+    {
+        Reset();
+        _engineStatistics.SetLastError(VE_INTERFACE_NOT_FOUND,
+                                       kTraceWarning);
+        return (-1);
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "VoEHardwareImpl reference counter = %d", refCount);
+    return (refCount);
+}
+
+int VoEHardwareImpl::SetAudioDeviceLayer(AudioLayers audioLayer)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetAudioDeviceLayer(audioLayer=%d)", audioLayer);
+
+    // Don't allow a change if VoE is initialized
+    if (_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_ALREADY_INITED, kTraceError);
+        return -1;
+    }
+
+    // Map to AudioDeviceModule::AudioLayer
+    AudioDeviceModule::AudioLayer
+        wantedLayer(AudioDeviceModule::kPlatformDefaultAudio);
+    switch (audioLayer)
+    {
+        case kAudioPlatformDefault:
+            // already set above
+            break;
+        case kAudioWindowsCore:
+            wantedLayer = AudioDeviceModule::kWindowsCoreAudio;
+            break;
+        case kAudioWindowsWave:
+            wantedLayer = AudioDeviceModule::kWindowsWaveAudio;
+            break;
+        case kAudioLinuxAlsa:
+            wantedLayer = AudioDeviceModule::kLinuxAlsaAudio;
+            break;
+        case kAudioLinuxPulse:
+            wantedLayer = AudioDeviceModule::kLinuxPulseAudio;
+            break;
+        default:
+            _engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                                           "  invalid audio layer");
+            return -1;
+    }
+
+    // Save the audio device layer for Init()
+    _audioDeviceLayer = wantedLayer;
+
+    return 0;
+}
+
+int VoEHardwareImpl::GetAudioDeviceLayer(AudioLayers& audioLayer)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+               "GetAudioDeviceLayer(devices=?)");
+
+    // Can always be called regardless of VoE state
+
+    AudioDeviceModule::AudioLayer
+        activeLayer(AudioDeviceModule::kPlatformDefaultAudio);
+
+    if (_audioDevicePtr)
+    {
+        // Get active audio layer from ADM
+        if (_audioDevicePtr->ActiveAudioLayer(&activeLayer) != 0)
+        {
+            _engineStatistics.SetLastError(VE_UNDEFINED_SC_ERR, kTraceError,
+                                           "  Audio Device error");
+            return -1;
+        }
+    }
+    else
+    {
+        // Return VoE's internal layer setting
+        activeLayer = _audioDeviceLayer;
+    }
+
+    // Map to AudioLayers
+    switch (activeLayer)
+    {
+        case AudioDeviceModule::kPlatformDefaultAudio:
+            audioLayer = kAudioPlatformDefault;
+            break;
+        case AudioDeviceModule::kWindowsCoreAudio:
+            audioLayer = kAudioWindowsCore;
+            break;
+        case AudioDeviceModule::kWindowsWaveAudio:
+            audioLayer = kAudioWindowsWave;
+            break;
+        case AudioDeviceModule::kLinuxAlsaAudio:
+            audioLayer = kAudioLinuxAlsa;
+            break;
+        case AudioDeviceModule::kLinuxPulseAudio:
+            audioLayer = kAudioLinuxPulse;
+            break;
+        default:
+            _engineStatistics.SetLastError(VE_UNDEFINED_SC_ERR, kTraceError,
+                                           "  unknown audio layer");
+    }
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+               "  Output: audioLayer=%d", audioLayer);
+
+    return 0;
+}
+int VoEHardwareImpl::GetNumOfRecordingDevices(int& devices)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetNumOfRecordingDevices(devices=?)");
+    ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    devices = static_cast<int> (_audioDevicePtr->RecordingDevices());
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "  Output: devices=%d", devices);
+
+    return 0;
+}
+
+int VoEHardwareImpl::GetNumOfPlayoutDevices(int& devices)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetNumOfPlayoutDevices(devices=?)");
+    ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    devices = static_cast<int> (_audioDevicePtr->PlayoutDevices());
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "  Output: devices=%d", devices);
+
+    return 0;
+}
+
+int VoEHardwareImpl::GetRecordingDeviceName(int index,
+                                            char strNameUTF8[128],
+                                            char strGuidUTF8[128])
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetRecordingDeviceName(index=%d)", index);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (strNameUTF8 == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "GetRecordingDeviceName() invalid argument");
+        return -1;
+    }
+
+    // Note that strGuidUTF8 is allowed to be NULL
+
+    // Init len variable to length of supplied vectors
+    const WebRtc_UWord16 strLen = 128;
+
+    // Check if length has been changed in module
+    assert(strLen == kAdmMaxDeviceNameSize);
+    assert(strLen == kAdmMaxGuidSize);
+
+    WebRtc_Word8 name[strLen];
+    WebRtc_Word8 guid[strLen];
+
+    // Get names from module
+    if (_audioDevicePtr->RecordingDeviceName(index, name, guid) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_CANNOT_RETRIEVE_DEVICE_NAME, kTraceError,
+            "GetRecordingDeviceName() failed to get device name");
+        return -1;
+    }
+
+    // Copy to vectors supplied by user
+    strncpy(strNameUTF8, name, strLen);
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "  Output: strNameUTF8=%s", strNameUTF8);
+
+    if (strGuidUTF8 != NULL)
+    {
+        strncpy(strGuidUTF8, name, strLen);
+        WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                     "  Output: strGuidUTF8=%s", strGuidUTF8);
+    }
+
+    return 0;
+}
+
+int VoEHardwareImpl::GetPlayoutDeviceName(int index,
+                                          char strNameUTF8[128],
+                                          char strGuidUTF8[128])
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetPlayoutDeviceName(index=%d)", index);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (strNameUTF8 == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "GetPlayoutDeviceName() invalid argument");
+        return -1;
+    }
+
+    // Note that strGuidUTF8 is allowed to be NULL
+
+    // Init len variable to length of supplied vectors
+    const WebRtc_UWord16 strLen = 128;
+
+    // Check if length has been changed in module
+    assert(strLen == kAdmMaxDeviceNameSize);
+    assert(strLen == kAdmMaxGuidSize);
+
+    WebRtc_Word8 name[strLen];
+    WebRtc_Word8 guid[strLen];
+
+    // Get names from module
+    if (_audioDevicePtr->PlayoutDeviceName(index, name, guid) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_CANNOT_RETRIEVE_DEVICE_NAME, kTraceError,
+            "GetPlayoutDeviceName() failed to get device name");
+        return -1;
+    }
+
+    // Copy to vectors supplied by user
+    strncpy(strNameUTF8, name, strLen);
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "  Output: strNameUTF8=%s", strNameUTF8);
+
+    if (strGuidUTF8 != NULL)
+    {
+        strncpy(strGuidUTF8, name, strLen);
+        WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                     "  Output: strGuidUTF8=%s", strGuidUTF8);
+    }
+
+    return 0;
+}
+
+int VoEHardwareImpl::SetRecordingDevice(int index,
+                                        StereoChannel recordingChannel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetRecordingDevice(index=%d, recordingChannel=%d)",
+                 index, (int) recordingChannel);
+    CriticalSectionScoped cs(*_apiCritPtr);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    bool isRecording(false);
+
+    // Store state about activated recording to be able to restore it after the
+    // recording device has been modified.
+    if (_audioDevicePtr->Recording())
+    {
+        WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                     "SetRecordingDevice() device is modified while recording"
+                     " is active...");
+        isRecording = true;
+        if (_audioDevicePtr->StopRecording() == -1)
+        {
+            _engineStatistics.SetLastError(
+                VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
+                "SetRecordingDevice() unable to stop recording");
+            return -1;
+        }
+    }
+
+    // We let the module do the index sanity
+
+    // Set recording channel
+    AudioDeviceModule::ChannelType recCh =
+        AudioDeviceModule::kChannelBoth;
+    switch (recordingChannel)
+    {
+        case kStereoLeft:
+            recCh = AudioDeviceModule::kChannelLeft;
+            break;
+        case kStereoRight:
+            recCh = AudioDeviceModule::kChannelRight;
+            break;
+        case kStereoBoth:
+            // default setting kChannelBoth (<=> mono)
+            break;
+        default:
+            _engineStatistics.SetLastError(
+                VE_INVALID_ARGUMENT, kTraceError,
+                "SetRecordingDevice() unknown recording channel");
+            return -1;
+    }
+
+    // Cannot return error because of sanity above
+    _audioDevicePtr->RecordingChannel(&recCh);
+
+    // Map indices to unsigned since underlying functions need that
+    WebRtc_UWord16 indexU = static_cast<WebRtc_UWord16> (index);
+
+    WebRtc_Word32 res(0);
+
+    if (index == -1)
+    {
+        res = _audioDevicePtr->SetRecordingDevice(
+            AudioDeviceModule::kDefaultCommunicationDevice);
+    }
+    else if (index == -2)
+    {
+        res = _audioDevicePtr->SetRecordingDevice(
+            AudioDeviceModule::kDefaultDevice);
+    }
+    else
+    {
+        res = _audioDevicePtr->SetRecordingDevice(indexU);
+    }
+
+    if (res != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
+            "SetRecordingDevice() unable to set the recording device");
+        return -1;
+    }
+
+    // Init microphone, so user can do volume settings etc
+    if (_audioDevicePtr->InitMicrophone() == -1)
+    {
+        _engineStatistics.SetLastError(
+            VE_CANNOT_ACCESS_MIC_VOL, kTraceWarning,
+            "SetRecordingDevice() cannot access microphone");
+    }
+
+    // Set number of channels
+    bool available(false);
+    _audioDevicePtr->StereoRecordingIsAvailable(&available);
+    if (_audioDevicePtr->SetStereoRecording(available ? true : false) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_SOUNDCARD_ERROR, kTraceWarning,
+            "SetRecordingDevice() failed to set mono recording mode");
+    }
+
+    // Restore recording if it was enabled already when calling this function.
+    if (isRecording)
+    {
+        if (!_externalRecording)
+        {
+            WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                         "SetRecordingDevice() recording is now being "
+                         "restored...");
+            if (_audioDevicePtr->InitRecording() != 0)
+            {
+                WEBRTC_TRACE(kTraceError, kTraceVoice,
+                             VoEId(_instanceId, -1),
+                             "SetRecordingDevice() failed to initialize "
+                             "recording");
+                return -1;
+            }
+            if (_audioDevicePtr->StartRecording() != 0)
+            {
+                WEBRTC_TRACE(kTraceError, kTraceVoice,
+                             VoEId(_instanceId, -1),
+                             "SetRecordingDevice() failed to start recording");
+                return -1;
+            }
+        }
+    }
+
+    return 0;
+}
+
+int VoEHardwareImpl::SetPlayoutDevice(int index)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetPlayoutDevice(index=%d)", index);
+    CriticalSectionScoped cs(*_apiCritPtr);
+    ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    bool isPlaying(false);
+
+    // Store state about activated playout to be able to restore it after the
+    // playout device has been modified.
+    if (_audioDevicePtr->Playing())
+    {
+        WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                     "SetPlayoutDevice() device is modified while playout is "
+                     "active...");
+        isPlaying = true;
+        if (_audioDevicePtr->StopPlayout() == -1)
+        {
+            _engineStatistics.SetLastError(
+                VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
+                "SetPlayoutDevice() unable to stop playout");
+            return -1;
+        }
+    }
+
+    // We let the module do the index sanity
+
+    // Map indices to unsigned since underlying functions need that
+    WebRtc_UWord16 indexU = static_cast<WebRtc_UWord16> (index);
+
+    WebRtc_Word32 res(0);
+
+    if (index == -1)
+    {
+        res = _audioDevicePtr->SetPlayoutDevice(
+            AudioDeviceModule::kDefaultCommunicationDevice);
+    }
+    else if (index == -2)
+    {
+        res = _audioDevicePtr->SetPlayoutDevice(
+            AudioDeviceModule::kDefaultDevice);
+    }
+    else
+    {
+        res = _audioDevicePtr->SetPlayoutDevice(indexU);
+    }
+
+    if (res != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_SOUNDCARD_ERROR, kTraceError,
+            "SetPlayoutDevice() unable to set the playout device");
+        return -1;
+    }
+
+    // Init speaker, so user can do volume settings etc
+    if (_audioDevicePtr->InitSpeaker() == -1)
+    {
+        _engineStatistics.SetLastError(
+            VE_CANNOT_ACCESS_SPEAKER_VOL, kTraceWarning,
+            "SetPlayoutDevice() cannot access speaker");
+    }
+
+    // Set number of channels
+    bool available(false);
+    _audioDevicePtr->StereoPlayoutIsAvailable(&available);
+    if (_audioDevicePtr->SetStereoPlayout(available ? true : false) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_SOUNDCARD_ERROR, kTraceWarning,
+            "SetPlayoutDevice() failed to set stereo playout mode");
+    }
+
+    // Restore playout if it was enabled already when calling this function.
+    if (isPlaying)
+    {
+        if (!_externalPlayout)
+        {
+            WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+                         "SetPlayoutDevice() playout is now being restored...");
+            if (_audioDevicePtr->InitPlayout() != 0)
+            {
+                WEBRTC_TRACE(kTraceError, kTraceVoice,
+                             VoEId(_instanceId, -1),
+                             "SetPlayoutDevice() failed to initialize playout");
+                return -1;
+            }
+            if (_audioDevicePtr->StartPlayout() != 0)
+            {
+                WEBRTC_TRACE(kTraceError, kTraceVoice,
+                             VoEId(_instanceId, -1),
+                             "SetPlayoutDevice() failed to start playout");
+                return -1;
+            }
+        }
+    }
+
+    return 0;
+}
+
+int VoEHardwareImpl::GetRecordingDeviceStatus(bool& isAvailable)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetRecordingDeviceStatus()");
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    // We let the module do isRecording sanity
+
+    bool available(false);
+
+    // Check availability
+    if (_audioDevicePtr->RecordingIsAvailable(&available) != 0)
+    {
+        _engineStatistics.SetLastError(VE_UNDEFINED_SC_REC_ERR, kTraceError,
+                                       "  Audio Device error");
+        return -1;
+    }
+
+    isAvailable = available;
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "  Output: isAvailable = %d)", (int) isAvailable);
+
+    return 0;
+}
+
+int VoEHardwareImpl::GetPlayoutDeviceStatus(bool& isAvailable)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetPlayoutDeviceStatus()");
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    // We let the module do isPlaying sanity
+
+    bool available(false);
+
+    // Check availability
+    if (_audioDevicePtr->PlayoutIsAvailable(&available) != 0)
+    {
+        _engineStatistics.SetLastError(VE_PLAY_UNDEFINED_SC_ERR,
+                                       kTraceError, "  Audio Device error");
+        return -1;
+    }
+
+    isAvailable = available;
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "  Output: isAvailable = %d)", (int) isAvailable);
+
+    return 0;
+}
+
+int VoEHardwareImpl::ResetAudioDevice()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "ResetAudioDevice()");
+    ANDROID_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+#if defined(MAC_IPHONE)
+    if (_audioDevicePtr->ResetAudioDevice() < 0)
+    {
+        _engineStatistics.SetLastError(VE_SOUNDCARD_ERROR, kTraceError,
+                                       "  Failed to reset sound device");
+        return -1;
+    }
+#else
+    _engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+                                   "  no support for resetting sound device");
+    return -1;
+#endif
+
+    return 0;
+}
+
+int VoEHardwareImpl::AudioDeviceControl(unsigned int par1, unsigned int par2,
+                                        unsigned int par3)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "AudioDeviceControl(%i, %i, %i)", par1, par2, par3);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    _engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+                                   "  no support for resetting sound device");
+    return -1;
+}
+
+int VoEHardwareImpl::SetLoudspeakerStatus(bool enable)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetLoudspeakerStatus(enable=%i)", (int) enable);
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+#if defined(ANDROID)
+    if (_audioDevicePtr->SetLoudspeakerStatus(enable) < 0)
+    {
+        _engineStatistics.SetLastError(VE_IGNORED_FUNCTION, kTraceError,
+                                       "  Failed to set loudspeaker status");
+        return -1;
+    }
+
+    return 0;
+#else
+    _engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+                                   "  no support for setting loudspeaker"
+                                   " status");
+    return -1;
+#endif
+}
+
+int VoEHardwareImpl::GetLoudspeakerStatus(bool& enabled)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetLoudspeakerStatus()");
+    IPHONE_NOT_SUPPORTED();
+
+#if defined(ANDROID)
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    if (_audioDevicePtr->GetLoudspeakerStatus(enabled) < 0)
+    {
+        _engineStatistics.SetLastError(VE_IGNORED_FUNCTION, kTraceError,
+                                       "  Failed to get loudspeaker status");
+        return -1;
+    }
+
+    return 0;
+#else
+    _engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+                                   "  no support for setting loudspeaker "
+                                   "status");
+    return -1;
+#endif
+}
+
+int VoEHardwareImpl::GetCPULoad(int& loadPercent)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetCPULoad()");
+    ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    // Get CPU load from ADM
+    WebRtc_UWord16 load(0);
+    if (_audioDevicePtr->CPULoad(&load) != 0)
+    {
+        _engineStatistics.SetLastError(VE_CPU_INFO_ERROR, kTraceError,
+                                       "  error getting system CPU load");
+        return -1;
+    }
+
+    loadPercent = static_cast<int> (load);
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+               "  Output: loadPercent = %d", loadPercent);
+
+    return 0;
+}
+
+int VoEHardwareImpl::GetSystemCPULoad(int& loadPercent)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetSystemCPULoad(loadPercent=?)");
+    ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    // Check if implemented for this platform
+    if (!_cpu)
+    {
+        _engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+                                       "  no support for getting system CPU "
+                                       "load");
+        return -1;
+    }
+
+    // Get CPU load
+    WebRtc_Word32 load = _cpu->CpuUsage();
+    if (load < 0)
+    {
+        _engineStatistics.SetLastError(VE_CPU_INFO_ERROR, kTraceError,
+                                       "  error getting system CPU load");
+        return -1;
+    }
+
+    loadPercent = static_cast<int> (load);
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "  Output: loadPercent = %d", loadPercent);
+
+    return 0;
+}
+
+#endif  // WEBRTC_VOICE_ENGINE_HARDWARE_API
+
+} // namespace webrtc
diff --git a/voice_engine/main/source/voe_hardware_impl.h b/voice_engine/main/source/voe_hardware_impl.h
new file mode 100644
index 0000000..c1feb26
--- /dev/null
+++ b/voice_engine/main/source/voe_hardware_impl.h
@@ -0,0 +1,80 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_HARDWARE_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_HARDWARE_IMPL_H
+
+#include "voe_hardware.h"
+
+#include "ref_count.h"
+#include "shared_data.h"
+
+namespace webrtc
+{
+class CpuWrapper;
+
+class VoEHardwareImpl: public virtual voe::SharedData,
+                       public VoEHardware,
+                       public voe::RefCount
+{
+public:
+    virtual int Release();
+
+    virtual int GetNumOfRecordingDevices(int& devices);
+
+    virtual int GetNumOfPlayoutDevices(int& devices);
+
+    virtual int GetRecordingDeviceName(int index,
+                                       char strNameUTF8[128],
+                                       char strGuidUTF8[128]);
+
+    virtual int GetPlayoutDeviceName(int index,
+                                     char strNameUTF8[128],
+                                     char strGuidUTF8[128]);
+
+    virtual int GetRecordingDeviceStatus(bool& isAvailable);
+
+    virtual int GetPlayoutDeviceStatus(bool& isAvailable);
+
+    virtual int SetRecordingDevice(
+        int index,
+        StereoChannel recordingChannel = kStereoBoth);
+
+    virtual int SetPlayoutDevice(int index);
+
+    virtual int SetAudioDeviceLayer(AudioLayers audioLayer);
+
+    virtual int GetAudioDeviceLayer(AudioLayers& audioLayer);
+
+    virtual int GetCPULoad(int& loadPercent);
+
+    virtual int GetSystemCPULoad(int& loadPercent);
+
+    virtual int ResetAudioDevice();
+
+    virtual int AudioDeviceControl(unsigned int par1,
+                                   unsigned int par2,
+                                   unsigned int par3);
+
+    virtual int SetLoudspeakerStatus(bool enable);
+
+    virtual int GetLoudspeakerStatus(bool& enabled);
+
+protected:
+    VoEHardwareImpl();
+    virtual ~VoEHardwareImpl();
+
+private:
+    CpuWrapper*  _cpu;
+};
+
+} // namespace webrtc
+
+#endif  // WEBRTC_VOICE_ENGINE_VOE_HARDWARE_IMPL_H
diff --git a/voice_engine/main/source/voe_neteq_stats_impl.cc b/voice_engine/main/source/voe_neteq_stats_impl.cc
new file mode 100644
index 0000000..adb934a
--- /dev/null
+++ b/voice_engine/main/source/voe_neteq_stats_impl.cc
@@ -0,0 +1,178 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_neteq_stats_impl.h"
+
+#include "audio_coding_module.h"
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "trace.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+
+namespace webrtc {
+
+VoENetEqStats* VoENetEqStats::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
+    return NULL;
+#else
+    if (NULL == voiceEngine)
+    {
+        return NULL;
+    }
+    VoiceEngineImpl* s =
+        reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
+    VoENetEqStatsImpl* d = s;
+    (*d)++;
+    return (d);
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
+
+VoENetEqStatsImpl::VoENetEqStatsImpl()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoENetEqStatsImpl::VoENetEqStatsImpl() - ctor");
+}
+
+VoENetEqStatsImpl::~VoENetEqStatsImpl()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoENetEqStatsImpl::~VoENetEqStatsImpl() - dtor");
+}
+
+int VoENetEqStatsImpl::Release()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoENetEqStats::Release()");
+    (*this)--;
+    int refCount = GetCount();
+    if (refCount < 0)
+    {
+        Reset();  // reset reference counter to zero => OK to delete VE
+        _engineStatistics.SetLastError(
+            VE_INTERFACE_NOT_FOUND, kTraceWarning);
+        return (-1);
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoENetEqStats reference counter = %d", refCount);
+    return (refCount);
+}
+
+int VoENetEqStatsImpl::GetNetworkStatistics(int channel,
+                                            NetworkStatistics& stats)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetNetworkStatistics(channel=%d, stats=?)", channel);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+	
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+	voe::ScopedChannel sc(_channelManager, channel);
+	voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetNetworkStatistics() failed to locate channel");
+        return -1;
+    }
+
+    return channelPtr->GetNetworkStatistics(stats);
+}
+
+int VoENetEqStatsImpl::GetJitterStatistics(int channel,
+                                           JitterStatistics& stats)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetJitterStatistics(channel=%i)", channel);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+    
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+	voe::ScopedChannel sc(_channelManager, channel);
+	voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetJitterStatistics() failed to locate channel");
+        return -1;
+    }
+
+    return channelPtr->GetJitterStatistics(stats); 
+}
+
+int VoENetEqStatsImpl::GetPreferredBufferSize(
+    int channel,
+    unsigned short& preferredBufferSize)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetPreferredBufferSize(channel=%i, ?)", channel);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+      
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+	voe::ScopedChannel sc(_channelManager, channel);
+	voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetPreferredBufferSize() failed to locate channel");
+        return -1;
+    }
+
+    return channelPtr->GetPreferredBufferSize(preferredBufferSize);
+}
+
+int VoENetEqStatsImpl::ResetJitterStatistics(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "ResetJitterStatistics(channel=%i)", channel);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "ResetJitterStatistics() failed to locate channel");
+        return -1;
+    }
+
+    return channelPtr->ResetJitterStatistics();
+}
+
+#endif  // #ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
+
+}   // namespace webrtc
diff --git a/voice_engine/main/source/voe_neteq_stats_impl.h b/voice_engine/main/source/voe_neteq_stats_impl.h
new file mode 100644
index 0000000..e8a7d27
--- /dev/null
+++ b/voice_engine/main/source/voe_neteq_stats_impl.h
@@ -0,0 +1,46 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_IMPL_H
+
+#include "voe_neteq_stats.h"
+
+#include "ref_count.h"
+#include "shared_data.h"
+
+namespace webrtc {
+
+class VoENetEqStatsImpl : public virtual voe::SharedData,
+                          public VoENetEqStats,
+                          public voe::RefCount
+{
+public:
+    virtual int Release();
+
+    virtual int GetNetworkStatistics(int channel,
+                                     NetworkStatistics& stats);
+
+    virtual int GetJitterStatistics(int channel,
+                                    JitterStatistics& stats);
+
+    virtual int GetPreferredBufferSize(int channel,
+                                       unsigned short& preferredBufferSize);
+
+    virtual int ResetJitterStatistics(int channel);
+
+protected:
+    VoENetEqStatsImpl();
+    virtual ~VoENetEqStatsImpl();
+};
+
+}  // namespace webrtc
+
+#endif    // WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_IMPL_H
diff --git a/voice_engine/main/source/voe_network_impl.cc b/voice_engine/main/source/voe_network_impl.cc
new file mode 100644
index 0000000..07882eb
--- /dev/null
+++ b/voice_engine/main/source/voe_network_impl.cc
@@ -0,0 +1,944 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_network_impl.h"
+
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "trace.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+namespace webrtc
+{
+
+VoENetwork* VoENetwork::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_NETWORK_API
+    return NULL;
+#else
+    if (NULL == voiceEngine)
+    {
+        return NULL;
+    }
+    VoiceEngineImpl* s =
+            reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
+    VoENetworkImpl* d = s;
+    (*d)++;
+    return (d);
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_NETWORK_API
+
+VoENetworkImpl::VoENetworkImpl()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
+                 "VoENetworkImpl() - ctor");
+}
+
+VoENetworkImpl::~VoENetworkImpl()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
+                 "~VoENetworkImpl() - dtor");
+}
+
+int VoENetworkImpl::Release()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "VoENetworkImpl::Release()");
+    (*this)--;
+    int refCount = GetCount();
+    if (refCount < 0)
+    {
+        Reset();
+        _engineStatistics.SetLastError(VE_INTERFACE_NOT_FOUND,
+                                       kTraceWarning);
+        return (-1);
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "VoENetworkImpl reference counter = %d", refCount);
+    return (refCount);
+}
+
+int VoENetworkImpl::RegisterExternalTransport(int channel,
+                                              Transport& transport)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetExternalTransport(channel=%d, transport=0x%x)",
+                 channel, &transport);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "SetExternalTransport() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->RegisterExternalTransport(transport);
+}
+
+int VoENetworkImpl::DeRegisterExternalTransport(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "DeRegisterExternalTransport(channel=%d)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "DeRegisterExternalTransport() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->DeRegisterExternalTransport();
+}
+
+int VoENetworkImpl::ReceivedRTPPacket(int channel,
+                                      const void* data,
+                                      unsigned int length)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+                 "ReceivedRTPPacket(channel=%d, length=%u)", channel, length);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if ((length < 12) || (length > 807))
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_PACKET, kTraceError,
+            "ReceivedRTPPacket() invalid packet length");
+        return -1;
+    }
+    if (NULL == data)
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "ReceivedRTPPacket() invalid data vector");
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "ReceivedRTPPacket() failed to locate channel");
+        return -1;
+    }
+
+    if (!channelPtr->ExternalTransport())
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_OPERATION, kTraceError,
+            "ReceivedRTPPacket() external transport is not enabled");
+        return -1;
+    }
+    return channelPtr->ReceivedRTPPacket((const WebRtc_Word8*) data, length);
+}
+
+int VoENetworkImpl::ReceivedRTCPPacket(int channel, const void* data,
+                                       unsigned int length)
+{
+    WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+                 "ReceivedRTCPPacket(channel=%d, length=%u)", channel, length);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (length < 4)
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_PACKET, kTraceError,
+            "ReceivedRTCPPacket() invalid packet length");
+        return -1;
+    }
+    if (NULL == data)
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "ReceivedRTCPPacket() invalid data vector");
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "ReceivedRTCPPacket() failed to locate channel");
+        return -1;
+    }
+    if (!channelPtr->ExternalTransport())
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_OPERATION, kTraceError,
+            "ReceivedRTCPPacket() external transport is not enabled");
+        return -1;
+    }
+    return channelPtr->ReceivedRTCPPacket((const WebRtc_Word8*) data, length);
+}
+
+int VoENetworkImpl::GetSourceInfo(int channel,
+                                  int& rtpPort,
+                                  int& rtcpPort,
+                                  char ipAddr[64])
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetSourceInfo(channel=%d, rtpPort=?, rtcpPort=?, ipAddr[]=?)",
+                 channel);
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (NULL == ipAddr)
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "GetSourceInfo() invalid IP-address buffer");
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetSourceInfo() failed to locate channel");
+        return -1;
+    }
+    if (channelPtr->ExternalTransport())
+    {
+        _engineStatistics.SetLastError(
+            VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+            "GetSourceInfo() external transport is enabled");
+        return -1;
+    }
+    return channelPtr->GetSourceInfo(rtpPort, rtcpPort, ipAddr);
+#else
+    _engineStatistics.SetLastError(
+        VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+        "GetSourceInfo() VoE is built for external transport");
+    return -1;
+#endif
+}
+
+int VoENetworkImpl::GetLocalIP(char ipAddr[64], bool ipv6)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetLocalIP(ipAddr[]=?, ipv6=%d)", ipv6);
+    IPHONE_NOT_SUPPORTED();
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (NULL == ipAddr)
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "GetLocalIP() invalid IP-address buffer");
+        return -1;
+    }
+
+    // Create a temporary socket module to ensure that this method can be
+    // called also when no channels are created.
+    WebRtc_UWord8 numSockThreads(1);
+    UdpTransport* socketPtr =
+        UdpTransport::Create(
+            -1,
+            numSockThreads);
+    if (NULL == socketPtr)
+    {
+        _engineStatistics.SetLastError(
+            VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceError,
+            "GetLocalIP() failed to create socket module");
+        return -1;
+    }
+
+    WebRtc_Word8 localIPAddr[64];
+
+    if (ipv6)
+    {
+        WebRtc_UWord8 localIP[16];
+        if (socketPtr->LocalHostAddressIPV6(localIP) != 0)
+        {
+            _engineStatistics.SetLastError(
+                VE_INVALID_IP_ADDRESS, kTraceError,
+                "GetLocalIP() failed to retrieve local IP - 1");
+            return -1;
+        }
+        // Convert 128-bit address to character string (a:b:c:d:e:f:g:h)
+        sprintf(localIPAddr,
+                "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x"
+                "%.2x:%.2x%.2x",
+                localIP[0], localIP[1], localIP[2], localIP[3], localIP[4],
+                localIP[5], localIP[6], localIP[7], localIP[8], localIP[9],
+                localIP[10], localIP[11], localIP[12], localIP[13],
+                localIP[14], localIP[15]);
+    }
+    else
+    {
+        WebRtc_UWord32 localIP(0);
+        // Read local IP (as 32-bit address) from the socket module
+        if (socketPtr->LocalHostAddress(localIP) != 0)
+        {
+            _engineStatistics.SetLastError(
+                VE_INVALID_IP_ADDRESS, kTraceError,
+                "GetLocalIP() failed to retrieve local IP - 2");
+            return -1;
+        }
+        // Convert 32-bit address to character string (x.y.z.w)
+        sprintf(localIPAddr, "%d.%d.%d.%d", (int) ((localIP >> 24) & 0x0ff),
+                (int) ((localIP >> 16) & 0x0ff),
+                (int) ((localIP >> 8) & 0x0ff),
+                (int) (localIP & 0x0ff));
+    }
+
+    strcpy(ipAddr, localIPAddr);
+
+    UdpTransport::Destroy(socketPtr);
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetLocalIP() => ipAddr=%s", ipAddr);
+    return 0;
+#else
+    _engineStatistics.SetLastError(
+        VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+        "GetLocalIP() VoE is built for external transport");
+    return -1;
+#endif
+}
+
+int VoENetworkImpl::EnableIPv6(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "EnableIPv6(channel=%d)", channel);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "EnableIPv6() failed to locate channel");
+        return -1;
+    }
+    if (channelPtr->ExternalTransport())
+    {
+        _engineStatistics.SetLastError(
+            VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+            "EnableIPv6() external transport is enabled");
+        return -1;
+    }
+    return channelPtr->EnableIPv6();
+#else
+    _engineStatistics.SetLastError(
+        VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+        "EnableIPv6() VoE is built for external transport");
+    return -1;
+#endif
+}
+
+bool VoENetworkImpl::IPv6IsEnabled(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+               "IPv6IsEnabled(channel=%d)", channel);
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return false;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "IPv6IsEnabled() failed to locate channel");
+        return false;
+    }
+    if (channelPtr->ExternalTransport())
+    {
+        _engineStatistics.SetLastError(
+            VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+            "IPv6IsEnabled() external transport is enabled");
+        return false;
+    }
+    return channelPtr->IPv6IsEnabled();
+#else
+    _engineStatistics.SetLastError(
+        VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+        "IPv6IsEnabled() VoE is built for external transport");
+    return false;
+#endif
+}
+
+int VoENetworkImpl::SetSourceFilter(int channel,
+                                    int rtpPort,
+                                    int rtcpPort,
+                                    const char ipAddr[64])
+{
+    (ipAddr == NULL) ? WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
+                                    VoEId(_instanceId, -1),
+                                    "SetSourceFilter(channel=%d, rtpPort=%d,"
+                                    " rtcpPort=%d)",
+                                    channel, rtpPort, rtcpPort)
+                     : WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
+                                    VoEId(_instanceId, -1),
+                                    "SetSourceFilter(channel=%d, rtpPort=%d,"
+                                    " rtcpPort=%d, ipAddr=%s)",
+                                    channel, rtpPort, rtcpPort, ipAddr);
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if ((rtpPort < 0) || (rtpPort > 65535))
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_PORT_NMBR, kTraceError,
+            "SetSourceFilter() invalid RTP port");
+        return -1;
+    }
+    if ((rtcpPort < 0) || (rtcpPort > 65535))
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_PORT_NMBR, kTraceError,
+            "SetSourceFilter() invalid RTCP port");
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "SetSourceFilter() failed to locate channel");
+        return -1;
+    }
+    if (channelPtr->ExternalTransport())
+    {
+        _engineStatistics.SetLastError(
+            VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+            "SetSourceFilter() external transport is enabled");
+        return -1;
+    }
+    return channelPtr->SetSourceFilter(rtpPort, rtcpPort, ipAddr);
+#else
+    _engineStatistics.SetLastError(
+        VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+        "SetSourceFilter() VoE is built for external transport");
+    return -1;
+#endif
+}
+
+int VoENetworkImpl::GetSourceFilter(int channel,
+                                    int& rtpPort,
+                                    int& rtcpPort,
+                                    char ipAddr[64])
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetSourceFilter(channel=%d, rtpPort=?, rtcpPort=?, "
+                 "ipAddr[]=?)",
+                 channel);
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (NULL == ipAddr)
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "GetSourceFilter() invalid IP-address buffer");
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetSourceFilter() failed to locate channel");
+        return -1;
+    }
+    if (channelPtr->ExternalTransport())
+    {
+        _engineStatistics.SetLastError(
+            VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+            "GetSourceFilter() external transport is enabled");
+        return -1;
+    }
+    return channelPtr->GetSourceFilter(rtpPort, rtcpPort, ipAddr);
+#else
+    _engineStatistics.SetLastError(
+        VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+        "GetSourceFilter() VoE is built for external transport");
+    return -1;
+#endif
+}
+
+int VoENetworkImpl::SetSendTOS(int channel,
+                               int DSCP,
+                               int priority,
+                               bool useSetSockopt)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetSendTOS(channel=%d, DSCP=%d, useSetSockopt=%d)",
+                 channel, DSCP, useSetSockopt);
+
+#if !defined(_WIN32) && !defined(WEBRTC_LINUX) && !defined(WEBRTC_MAC)
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceWarning,
+        "SetSendTOS() is not supported on this platform");
+    return -1;
+#endif
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if ((DSCP < 0) || (DSCP > 63))
+    {
+        _engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                                       "SetSendTOS() Invalid DSCP value");
+        return -1;
+    }
+#if defined(_WIN32) || defined(WEBRTC_LINUX)
+    if ((priority < -1) || (priority > 7))
+    {
+        _engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                                       "SetSendTOS() Invalid priority value");
+        return -1;
+    }
+#else
+    if (-1 != priority)
+    {
+        _engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                                       "SetSendTOS() priority not supported");
+        return -1;
+    }
+#endif
+#if defined(_WIN32)
+    if ((priority >= 0) && useSetSockopt)
+    {
+        // On Windows, priority and useSetSockopt cannot be combined
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "SetSendTOS() priority and useSetSockopt conflict");
+        return -1;
+    }
+#endif
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "SetSendTOS() failed to locate channel");
+        return -1;
+    }
+    if (channelPtr->ExternalTransport())
+    {
+        _engineStatistics.SetLastError(
+            VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+            "SetSendTOS() external transport is enabled");
+        return -1;
+    }
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+    useSetSockopt = true;
+    WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "   force useSetSockopt=true since there is no alternative"
+                 " implementation");
+#endif
+
+    return channelPtr->SetSendTOS(DSCP, priority, useSetSockopt);
+#else
+    _engineStatistics.SetLastError(
+        VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+        "SetSendTOS() VoE is built for external transport");
+    return -1;
+#endif
+}
+
+int VoENetworkImpl::GetSendTOS(int channel,
+                               int& DSCP,
+                               int& priority,
+                               bool& useSetSockopt)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetSendTOS(channel=%d)", channel);
+
+#if !defined(_WIN32) && !defined(WEBRTC_LINUX) && !defined(WEBRTC_MAC)
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceWarning,
+        "GetSendTOS() is not supported on this platform");
+    return -1;
+#endif
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                       "GetSendTOS() failed to locate channel");
+        return -1;
+    }
+    if (channelPtr->ExternalTransport())
+    {
+        _engineStatistics.SetLastError(
+            VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+            "GetSendTOS() external transport is enabled");
+        return -1;
+    }
+    return channelPtr->GetSendTOS(DSCP, priority, useSetSockopt);
+#else
+    _engineStatistics.SetLastError(
+        VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+        "GetSendTOS() VoE is built for external transport");
+    return -1;
+#endif
+}
+
+int VoENetworkImpl::SetSendGQoS(int channel,
+                                bool enable,
+                                int serviceType,
+                                int overrideDSCP)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetSendGQOS(channel=%d, enable=%d, serviceType=%d,"
+                 " overrideDSCP=%d)",
+                 channel, (int) enable, serviceType, overrideDSCP);
+    ANDROID_NOT_SUPPORTED(); IPHONE_NOT_SUPPORTED();
+#if !defined(_WIN32)
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceWarning,
+        "SetSendGQOS() is not supported on this platform");
+    return -1;
+#elif !defined(WEBRTC_EXTERNAL_TRANSPORT)
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                      "SetSendGQOS() failed to locate channel");
+        return -1;
+    }
+    if (channelPtr->ExternalTransport())
+    {
+        _engineStatistics.SetLastError(
+            VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+            "SetSendGQOS() external transport is enabled");
+        return -1;
+    }
+    return channelPtr->SetSendGQoS(enable, serviceType, overrideDSCP);
+#else
+    _engineStatistics.SetLastError(
+        VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+        "SetSendGQOS() VoE is built for external transport");
+    return -1;
+#endif
+}
+
+int VoENetworkImpl::GetSendGQoS(int channel,
+                                bool& enabled,
+                                int& serviceType,
+                                int& overrideDSCP)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetSendGQOS(channel=%d)", channel);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+#if !defined(_WIN32)
+    _engineStatistics.SetLastError(
+        VE_FUNC_NOT_SUPPORTED, kTraceWarning,
+        "GetSendGQOS() is not supported on this platform");
+    return -1;
+#elif !defined(WEBRTC_EXTERNAL_TRANSPORT)
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+                                      "GetSendGQOS() failed to locate channel");
+        return -1;
+    }
+    if (channelPtr->ExternalTransport())
+    {
+        _engineStatistics.SetLastError(
+            VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+            "GetSendGQOS() external transport is enabled");
+        return -1;
+    }
+    return channelPtr->GetSendGQoS(enabled, serviceType, overrideDSCP);
+#else
+    _engineStatistics.SetLastError(
+        VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+        "GetSendGQOS() VoE is built for external transport");
+    return -1;
+#endif
+}
+
+int VoENetworkImpl::SetPacketTimeoutNotification(int channel,
+                                                 bool enable,
+                                                 int timeoutSeconds)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetPacketTimeoutNotification(channel=%d, enable=%d, "
+                 "timeoutSeconds=%d)",
+                 channel, (int) enable, timeoutSeconds);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (enable &&
+        ((timeoutSeconds < kVoiceEngineMinPacketTimeoutSec) ||
+        (timeoutSeconds > kVoiceEngineMaxPacketTimeoutSec)))
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "SetPacketTimeoutNotification() invalid timeout size");
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "SetPacketTimeoutNotification() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->SetPacketTimeoutNotification(enable, timeoutSeconds);
+}
+
+int VoENetworkImpl::GetPacketTimeoutNotification(int channel,
+                                                 bool& enabled,
+                                                 int& timeoutSeconds)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetPacketTimeoutNotification(channel=%d, enabled=?,"
+                 " timeoutSeconds=?)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetPacketTimeoutNotification() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetPacketTimeoutNotification(enabled, timeoutSeconds);
+}
+
+int VoENetworkImpl::RegisterDeadOrAliveObserver(int channel,
+                                                VoEConnectionObserver&
+                                                observer)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "RegisterDeadOrAliveObserver(channel=%d, observer=0x%x)",
+                 channel, &observer);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "RegisterDeadOrAliveObserver() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->RegisterDeadOrAliveObserver(observer);
+}
+
+int VoENetworkImpl::DeRegisterDeadOrAliveObserver(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "DeRegisterDeadOrAliveObserver(channel=%d)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "DeRegisterDeadOrAliveObserver() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->DeRegisterDeadOrAliveObserver();
+}
+
+int VoENetworkImpl::SetPeriodicDeadOrAliveStatus(int channel, bool enable,
+                                                 int sampleTimeSeconds)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SetPeriodicDeadOrAliveStatus(channel=%d, enable=%d,"
+                 " sampleTimeSeconds=%d)",
+                 channel, enable, sampleTimeSeconds);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (enable &&
+        ((sampleTimeSeconds < kVoiceEngineMinSampleTimeSec) ||
+        (sampleTimeSeconds > kVoiceEngineMaxSampleTimeSec)))
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "SetPeriodicDeadOrAliveStatus() invalid sample time");
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "SetPeriodicDeadOrAliveStatus() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->SetPeriodicDeadOrAliveStatus(enable, sampleTimeSeconds);
+}
+
+int VoENetworkImpl::GetPeriodicDeadOrAliveStatus(int channel,
+                                                 bool& enabled,
+                                                 int& sampleTimeSeconds)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "GetPeriodicDeadOrAliveStatus(channel=%d, enabled=?,"
+                 " sampleTimeSeconds=?)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetPeriodicDeadOrAliveStatus() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetPeriodicDeadOrAliveStatus(enabled,
+                                                    sampleTimeSeconds);
+}
+
+int VoENetworkImpl::SendUDPPacket(int channel,
+                                  const void* data,
+                                  unsigned int length,
+                                  int& transmittedBytes,
+                                  bool useRtcpSocket)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId, -1),
+                 "SendUDPPacket(channel=%d, data=0x%x, length=%u, useRTCP=%d)",
+                 channel, data, length, useRtcpSocket);
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (NULL == data)
+    {
+        _engineStatistics.SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+                                       "SendUDPPacket() invalid data buffer");
+        return -1;
+    }
+    if (0 == length)
+    {
+        _engineStatistics.SetLastError(VE_INVALID_PACKET, kTraceError,
+                                       "SendUDPPacket() invalid packet size");
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "SendUDPPacket() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->SendUDPPacket(data,
+                                     length,
+                                     transmittedBytes,
+                                     useRtcpSocket);
+#else
+    _engineStatistics.SetLastError(
+        VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+        "SendUDPPacket() VoE is built for external transport");
+    return -1;
+#endif
+}
+
+#endif  // WEBRTC_VOICE_ENGINE_NETWORK_API
+
+} // namespace webrtc
diff --git a/voice_engine/main/source/voe_network_impl.h b/voice_engine/main/source/voe_network_impl.h
new file mode 100644
index 0000000..cc300ac
--- /dev/null
+++ b/voice_engine/main/source/voe_network_impl.h
@@ -0,0 +1,117 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_NETWORK_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_NETWORK_IMPL_H
+
+#include "voe_network.h"
+
+#include "ref_count.h"
+#include "shared_data.h"
+
+
+namespace webrtc
+{
+
+class VoENetworkImpl: public virtual voe::SharedData,
+                      public VoENetwork,
+                      public voe::RefCount
+{
+public:
+    virtual int Release();
+
+    virtual int RegisterExternalTransport(int channel, Transport& transport);
+
+    virtual int DeRegisterExternalTransport(int channel);
+
+    virtual int ReceivedRTPPacket(int channel,
+                                  const void* data,
+                                  unsigned int length);
+
+    virtual int ReceivedRTCPPacket(int channel,
+                                   const void* data,
+                                   unsigned int length);
+
+    virtual int GetSourceInfo(int channel,
+                              int& rtpPort,
+                              int& rtcpPort,
+                              char ipAddr[64]);
+
+    virtual int GetLocalIP(char ipAddr[64], bool ipv6 = false);
+
+    virtual int EnableIPv6(int channel);
+
+    virtual bool IPv6IsEnabled(int channel);
+
+    virtual int SetSourceFilter(int channel,
+                                int rtpPort,
+                                int rtcpPort,
+                                const char ipAddr[64] = 0);
+
+    virtual int GetSourceFilter(int channel,
+                                int& rtpPort,
+                                int& rtcpPort,
+                                char ipAddr[64]);
+
+    virtual int SetSendTOS(int channel,
+                           int DSCP,
+                           int priority = -1,
+                           bool useSetSockopt = false);
+
+    virtual int GetSendTOS(int channel,
+                           int& DSCP,
+                           int& priority,
+                           bool& useSetSockopt);
+
+    virtual int SetSendGQoS(int channel,
+                            bool enable,
+                            int serviceType,
+                            int overrideDSCP);
+
+    virtual int GetSendGQoS(int channel,
+                            bool& enabled,
+                            int& serviceType,
+                            int& overrideDSCP);
+
+    virtual int SetPacketTimeoutNotification(int channel,
+                                             bool enable,
+                                             int timeoutSeconds = 2);
+
+    virtual int GetPacketTimeoutNotification(int channel,
+                                             bool& enabled,
+                                             int& timeoutSeconds);
+
+    virtual int RegisterDeadOrAliveObserver(int channel,
+                                            VoEConnectionObserver& observer);
+
+    virtual int DeRegisterDeadOrAliveObserver(int channel);
+
+    virtual int SetPeriodicDeadOrAliveStatus(int channel,
+                                             bool enable,
+                                             int sampleTimeSeconds = 2);
+
+    virtual int GetPeriodicDeadOrAliveStatus(int channel,
+                                             bool& enabled,
+                                             int& sampleTimeSeconds);
+
+    virtual int SendUDPPacket(int channel,
+                              const void* data,
+                              unsigned int length,
+                              int& transmittedBytes,
+                              bool useRtcpSocket = false);
+
+protected:
+    VoENetworkImpl();
+    virtual ~VoENetworkImpl();
+};
+
+} // namespace webrtc
+
+#endif  // WEBRTC_VOICE_ENGINE_VOE_NETWORK_IMPL_H
diff --git a/voice_engine/main/source/voe_rtp_rtcp_impl.cc b/voice_engine/main/source/voe_rtp_rtcp_impl.cc
new file mode 100644
index 0000000..98629da
--- /dev/null
+++ b/voice_engine/main/source/voe_rtp_rtcp_impl.cc
@@ -0,0 +1,747 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_rtp_rtcp_impl.h"
+#include "trace.h"
+#include "file_wrapper.h"
+#include "critical_section_wrapper.h"
+#include "voice_engine_impl.h"
+#include "voe_errors.h"
+
+#include "channel.h"
+#include "transmit_mixer.h"
+
+namespace webrtc {
+
+VoERTP_RTCP* VoERTP_RTCP::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
+    return NULL;
+#else
+    if (NULL == voiceEngine)
+    {
+        return NULL;
+    }
+    VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
+    VoERTP_RTCPImpl* d = s;
+    (*d)++;
+    return (d);
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
+
+VoERTP_RTCPImpl::VoERTP_RTCPImpl()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoERTP_RTCPImpl::VoERTP_RTCPImpl() - ctor");
+}
+
+VoERTP_RTCPImpl::~VoERTP_RTCPImpl()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoERTP_RTCPImpl::~VoERTP_RTCPImpl() - dtor");
+}
+
+int VoERTP_RTCPImpl::Release()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoERTP_RTCP::Release()");
+    (*this)--;
+    int refCount = GetCount();
+    if (refCount < 0)
+    {
+        Reset();  // reset reference counter to zero => OK to delete VE
+        _engineStatistics.SetLastError(
+            VE_INTERFACE_NOT_FOUND, kTraceWarning);
+        return (-1);
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoERTP_RTCP reference counter = %d", refCount);
+    return (refCount);
+}
+
+int VoERTP_RTCPImpl::RegisterRTPObserver(int channel, VoERTPObserver& observer)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "RegisterRTPObserver(channel=%d observer=0x%x)",
+                 channel, &observer);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "RegisterRTPObserver() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->RegisterRTPObserver(observer);
+}
+
+int VoERTP_RTCPImpl::DeRegisterRTPObserver(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "DeRegisterRTPObserver(channel=%d)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "DeRegisterRTPObserver() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->DeRegisterRTPObserver();
+}
+
+int VoERTP_RTCPImpl::RegisterRTCPObserver(int channel, VoERTCPObserver& observer)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "RegisterRTCPObserver(channel=%d observer=0x%x)",
+                 channel, &observer);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "RegisterRTPObserver() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->RegisterRTCPObserver(observer);
+}
+
+int VoERTP_RTCPImpl::DeRegisterRTCPObserver(int channel)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "DeRegisterRTCPObserver(channel=%d)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "DeRegisterRTCPObserver() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->DeRegisterRTCPObserver();
+}
+
+int VoERTP_RTCPImpl::SetLocalSSRC(int channel, unsigned int ssrc)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetLocalSSRC(channel=%d, %lu)", channel, ssrc);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "SetLocalSSRC() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->SetLocalSSRC(ssrc);
+}
+
+int VoERTP_RTCPImpl::GetLocalSSRC(int channel, unsigned int& ssrc)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetLocalSSRC(channel=%d, ssrc=?)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetLocalSSRC() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetLocalSSRC(ssrc);
+}
+
+int VoERTP_RTCPImpl::GetRemoteSSRC(int channel, unsigned int& ssrc)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetRemoteSSRC(channel=%d, ssrc=?)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetRemoteSSRC() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetRemoteSSRC(ssrc);
+}
+
+int VoERTP_RTCPImpl::GetRemoteCSRCs(int channel, unsigned int arrCSRC[15])
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetRemoteCSRCs(channel=%d, arrCSRC=?)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetRemoteCSRCs() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetRemoteCSRCs(arrCSRC);
+}
+
+
+int VoERTP_RTCPImpl::SetRTPAudioLevelIndicationStatus(int channel,
+                                                      bool enable,
+                                                      unsigned char ID)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetRTPAudioLevelIndicationStatus(channel=%d, enable=%d,"
+                 " ID=%u)", channel, enable, ID);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (ID < kVoiceEngineMinRtpExtensionId ||
+        ID > kVoiceEngineMaxRtpExtensionId)
+    {
+        // [RFC5285] The 4-bit ID is the local identifier of this element in
+        // the range 1-14 inclusive.
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "SetRTPAudioLevelIndicationStatus() invalid ID parameter");
+        return -1;
+    }
+
+    // Set AudioProcessingModule level-metric mode based on user input.
+    // Note that this setting may conflict with the
+    // AudioProcessing::SetMetricsStatus API.
+    if (_audioProcessingModulePtr->level_estimator()->Enable(enable) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_APM_ERROR, kTraceError,
+            "SetRTPAudioLevelIndicationStatus() failed to set level-metric"
+            "mode");
+        return -1;
+    }
+
+    // Ensure that the transmit mixer reads the audio-level metric for each
+    // 10ms packet and copies the same value to all active channels.
+    // The metric is derived within the AudioProcessingModule.
+    _transmitMixerPtr->SetRTPAudioLevelIndicationStatus(enable);
+
+    // Set state and ID for the specified channel.
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "SetRTPAudioLevelIndicationStatus() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->SetRTPAudioLevelIndicationStatus(enable, ID);
+}
+
+int VoERTP_RTCPImpl::GetRTPAudioLevelIndicationStatus(int channel,
+                                                      bool& enabled,
+                                                      unsigned char& ID)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetRTPAudioLevelIndicationStatus(channel=%d, enable=?, ID=?)",
+                 channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetRTPAudioLevelIndicationStatus() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetRTPAudioLevelIndicationStatus(enabled, ID);
+}
+
+int VoERTP_RTCPImpl::SetRTCPStatus(int channel, bool enable)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetRTCPStatus(channel=%d, enable=%d)", channel, enable);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "SetRTCPStatus() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->SetRTCPStatus(enable);
+}
+
+int VoERTP_RTCPImpl::GetRTCPStatus(int channel, bool& enabled)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetRTCPStatus(channel=%d)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetRTCPStatus() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetRTCPStatus(enabled);
+}
+
+int VoERTP_RTCPImpl::SetRTCP_CNAME(int channel, const char cName[256])
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetRTCP_CNAME(channel=%d, cName=%s)", channel, cName);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "SetRTCP_CNAME() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->SetRTCP_CNAME(cName);
+}
+
+int VoERTP_RTCPImpl::GetRTCP_CNAME(int channel, char cName[256])
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetRTCP_CNAME(channel=%d, cName=?)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetRTCP_CNAME() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetRTCP_CNAME(cName);
+}
+
+int VoERTP_RTCPImpl::GetRemoteRTCP_CNAME(int channel, char cName[256])
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetRemoteRTCP_CNAME(channel=%d, cName=?)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetRemoteRTCP_CNAME() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetRemoteRTCP_CNAME(cName);
+}
+
+int VoERTP_RTCPImpl::GetRemoteRTCPData(
+    int channel,
+    unsigned int& NTPHigh, // from sender info in SR
+    unsigned int& NTPLow, // from sender info in SR
+    unsigned int& timestamp, // from sender info in SR
+    unsigned int& playoutTimestamp, // derived locally
+    unsigned int* jitter, // from report block 1 in SR/RR
+    unsigned short* fractionLost) // from report block 1 in SR/RR
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetRemoteRTCPData(channel=%d,...)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetRemoteRTCP_CNAME() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetRemoteRTCPData(NTPHigh,
+                                         NTPLow,
+                                         timestamp,
+                                         playoutTimestamp,
+                                         jitter,
+                                         fractionLost);
+}
+
+int VoERTP_RTCPImpl::SendApplicationDefinedRTCPPacket(
+    int channel,
+    const unsigned char subType,
+    unsigned int name,
+    const char* data,
+    unsigned short dataLengthInBytes)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1), 
+                 "SendApplicationDefinedRTCPPacket(channel=%d, subType=%u,"
+                 "name=%u, data=?, dataLengthInBytes=%u)",
+                 channel, subType, name, dataLengthInBytes);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "SendApplicationDefinedRTCPPacket() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->SendApplicationDefinedRTCPPacket(subType,
+                                                        name,
+                                                        data,
+                                                        dataLengthInBytes);
+}
+
+int VoERTP_RTCPImpl::GetRTPStatistics(int channel,
+                                      unsigned int& averageJitterMs,
+                                      unsigned int& maxJitterMs,
+                                      unsigned int& discardedPackets)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetRTPStatistics(channel=%d,....)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetRTPStatistics() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetRTPStatistics(averageJitterMs,
+                                        maxJitterMs,
+                                        discardedPackets);
+}
+
+int VoERTP_RTCPImpl::GetRTCPStatistics(int channel, CallStatistics& stats)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetRTCPStatistics(channel=%d)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetRTPStatistics() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetRTPStatistics(stats);
+}
+
+int VoERTP_RTCPImpl::SetFECStatus(int channel, bool enable, int redPayloadtype)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetFECStatus(channel=%d, enable=%d, redPayloadtype=%d)",
+                 channel, enable, redPayloadtype);
+#ifdef WEBRTC_CODEC_RED
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "SetFECStatus() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->SetFECStatus(enable, redPayloadtype);
+#else
+    _engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+                                   "SetFECStatus() RED is not supported");
+    return -1;
+#endif
+}
+
+int VoERTP_RTCPImpl::GetFECStatus(int channel,
+                                  bool& enabled,
+                                  int& redPayloadtype)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetFECStatus(channel=%d, enabled=?, redPayloadtype=?)",
+                 channel);
+#ifdef WEBRTC_CODEC_RED
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetFECStatus() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetFECStatus(enabled, redPayloadtype);
+#else
+    _engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+                                   "GetFECStatus() RED is not supported");
+    return -1;
+#endif
+}
+
+int VoERTP_RTCPImpl::SetRTPKeepaliveStatus(int channel,
+                                           bool enable,
+                                           unsigned char unknownPayloadType,
+                                           int deltaTransmitTimeSeconds)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1), 
+                 "SetRTPKeepaliveStatus(channel=%d, enable=%d,"
+                 " unknownPayloadType=%u, deltaTransmitTimeSeconds=%d)",
+                 channel, enable, unknownPayloadType, deltaTransmitTimeSeconds);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "SetRTPKeepaliveStatus() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->SetRTPKeepaliveStatus(enable,
+                                             unknownPayloadType,
+                                             deltaTransmitTimeSeconds);
+}
+
+int VoERTP_RTCPImpl::GetRTPKeepaliveStatus(int channel,
+                                           bool& enabled,
+                                           unsigned char& unknownPayloadType,
+                                           int& deltaTransmitTimeSeconds)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetRTPKeepaliveStatus(channel=%d)", channel);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetRTPKeepaliveStatus() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetRTPKeepaliveStatus(enabled,
+                                             unknownPayloadType,
+                                             deltaTransmitTimeSeconds);
+}
+
+int VoERTP_RTCPImpl::StartRTPDump(int channel,
+                                  const char fileNameUTF8[1024],
+                                  RTPDirections direction)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "StartRTPDump(channel=%d, fileNameUTF8=%s, direction=%d)",
+                 channel, fileNameUTF8, direction);
+    assert(1024 == FileWrapper::kMaxFileNameSize);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "StartRTPDump() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->StartRTPDump(fileNameUTF8, direction);
+}
+
+int VoERTP_RTCPImpl::StopRTPDump(int channel, RTPDirections direction)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "StopRTPDump(channel=%d, direction=%d)", channel, direction);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "StopRTPDump() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->StopRTPDump(direction);
+}
+
+int VoERTP_RTCPImpl::RTPDumpIsActive(int channel, RTPDirections direction)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "RTPDumpIsActive(channel=%d, direction=%d)",
+                 channel, direction);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "StopRTPDump() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->RTPDumpIsActive(direction);
+}
+
+int VoERTP_RTCPImpl::InsertExtraRTPPacket(int channel,
+                                          unsigned char payloadType,
+                                          bool markerBit,
+                                          const char* payloadData,
+                                          unsigned short payloadSize)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "InsertExtraRTPPacket(channel=%d, payloadType=%u,"
+                 " markerBit=%u, payloadSize=%u)",
+                 channel, payloadType, markerBit, payloadSize);
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "StopRTPDump() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->InsertExtraRTPPacket(payloadType,
+                                            markerBit,
+                                            payloadData,
+                                            payloadSize);
+}
+
+#endif  // #ifdef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
+
+}  // namespace webrtc
diff --git a/voice_engine/main/source/voe_rtp_rtcp_impl.h b/voice_engine/main/source/voe_rtp_rtcp_impl.h
new file mode 100644
index 0000000..3cdf162
--- /dev/null
+++ b/voice_engine/main/source/voe_rtp_rtcp_impl.h
@@ -0,0 +1,134 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_RTP_RTCP_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_RTP_RTCP_IMPL_H
+
+#include "voe_rtp_rtcp.h"
+
+#include "ref_count.h"
+#include "shared_data.h"
+
+namespace webrtc {
+
+class VoERTP_RTCPImpl : public virtual voe::SharedData,
+                        public VoERTP_RTCP,
+                        public voe::RefCount
+{
+public:
+
+    virtual int Release();
+    // Registration of observers for RTP and RTCP callbacks
+    virtual int RegisterRTPObserver(int channel, VoERTPObserver& observer);
+
+    virtual int DeRegisterRTPObserver(int channel);
+
+    virtual int RegisterRTCPObserver(int channel, VoERTCPObserver& observer);
+
+    virtual int DeRegisterRTCPObserver(int channel);
+
+    // RTCP
+    virtual int SetRTCPStatus(int channel, bool enable);
+
+    virtual int GetRTCPStatus(int channel, bool& enabled);
+
+    virtual int SetRTCP_CNAME(int channel, const char cName[256]);
+
+    virtual int GetRTCP_CNAME(int channel, char cName[256]);
+
+    virtual int GetRemoteRTCP_CNAME(int channel, char cName[256]);
+
+    virtual int GetRemoteRTCPData(int channel,
+                                  unsigned int& NTPHigh,
+                                  unsigned int& NTPLow,
+                                  unsigned int& timestamp,
+                                  unsigned int& playoutTimestamp,
+                                  unsigned int* jitter = NULL,
+                                  unsigned short* fractionLost = NULL);
+
+    virtual int SendApplicationDefinedRTCPPacket(
+        int channel,
+        const unsigned char subType,
+        unsigned int name,
+        const char* data,
+        unsigned short dataLengthInBytes);
+
+    // SSRC
+    virtual int SetLocalSSRC(int channel, unsigned int ssrc);
+
+    virtual int GetLocalSSRC(int channel, unsigned int& ssrc);
+
+    virtual int GetRemoteSSRC(int channel, unsigned int& ssrc);
+
+    // RTP Header Extension for Client-to-Mixer Audio Level Indication
+    virtual int SetRTPAudioLevelIndicationStatus(int channel,
+                                                 bool enable,
+                                                 unsigned char ID);
+
+    virtual int GetRTPAudioLevelIndicationStatus(int channel,
+                                                 bool& enabled,
+                                                 unsigned char& ID);
+
+    // CSRC 
+    virtual int GetRemoteCSRCs(int channel, unsigned int arrCSRC[15]);
+
+    // Statistics
+    virtual int GetRTPStatistics(int channel,
+                                 unsigned int& averageJitterMs,
+                                 unsigned int& maxJitterMs,
+                                 unsigned int& discardedPackets);
+
+    virtual int GetRTCPStatistics(int channel, CallStatistics& stats);
+
+    // RTP keepalive mechanism (maintains NAT mappings associated to RTP flows)
+    virtual int SetRTPKeepaliveStatus(int channel,
+                                      bool enable,
+                                      unsigned char unknownPayloadType,
+                                      int deltaTransmitTimeSeconds = 15);
+
+    virtual int GetRTPKeepaliveStatus(int channel,
+                                      bool& enabled,
+                                      unsigned char& unknownPayloadType,
+                                      int& deltaTransmitTimeSeconds);
+
+    // FEC
+    virtual int SetFECStatus(int channel,
+                             bool enable,
+                             int redPayloadtype = -1);
+
+    virtual int GetFECStatus(int channel, bool& enabled, int& redPayloadtype);
+
+    // Store RTP and RTCP packets and dump to file (compatible with rtpplay)
+    virtual int StartRTPDump(int channel,
+                             const char fileNameUTF8[1024],
+                             RTPDirections direction = kRtpIncoming);
+
+    virtual int StopRTPDump(int channel,
+                            RTPDirections direction = kRtpIncoming);
+
+    virtual int RTPDumpIsActive(int channel,
+                                RTPDirections direction = kRtpIncoming);
+
+    // Insert (and transmits) extra RTP packet into active RTP audio stream
+    virtual int InsertExtraRTPPacket(int channel,
+                                     unsigned char payloadType,
+                                     bool markerBit,
+                                     const char* payloadData,
+                                     unsigned short payloadSize);
+
+protected:
+    VoERTP_RTCPImpl();
+    virtual ~VoERTP_RTCPImpl();
+};
+
+}  // namespace webrtc
+
+#endif    // WEBRTC_VOICE_ENGINE_VOE_RTP_RTCP_IMPL_H
+
diff --git a/voice_engine/main/source/voe_video_sync_impl.cc b/voice_engine/main/source/voe_video_sync_impl.cc
new file mode 100644
index 0000000..2d210be
--- /dev/null
+++ b/voice_engine/main/source/voe_video_sync_impl.cc
@@ -0,0 +1,249 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_video_sync_impl.h"
+
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "trace.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+namespace webrtc {
+
+VoEVideoSync* VoEVideoSync::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
+    return NULL;
+#else
+    if (NULL == voiceEngine)
+    {
+        return NULL;
+    }
+    VoiceEngineImpl* s =
+        reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
+    VoEVideoSyncImpl* d = s;
+    (*d)++;
+    return (d);
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
+
+VoEVideoSyncImpl::VoEVideoSyncImpl()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoEVideoSyncImpl::VoEVideoSyncImpl() - ctor");
+}
+
+VoEVideoSyncImpl::~VoEVideoSyncImpl()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoEVideoSyncImpl::~VoEVideoSyncImpl() - dtor");
+}
+
+int VoEVideoSyncImpl::Release()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoEVideoSync::Release()");
+    (*this)--;
+    int refCount = GetCount();
+    if (refCount < 0)
+    {
+        Reset();  // reset reference counter to zero => OK to delete VE
+        _engineStatistics.SetLastError(VE_INTERFACE_NOT_FOUND,
+                                       kTraceWarning);
+        return (-1);
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "VoEVideoSync reference counter = %d", refCount);
+    return (refCount);
+}
+
+int VoEVideoSyncImpl::GetPlayoutTimestamp(int channel, unsigned int& timestamp)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetPlayoutTimestamp(channel=%d, timestamp=?)", channel);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+	voe::ScopedChannel sc(_channelManager, channel);
+	voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetPlayoutTimestamp() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetPlayoutTimestamp(timestamp);
+}
+
+int VoEVideoSyncImpl::SetInitTimestamp(int channel,
+                                       unsigned int timestamp)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetInitTimestamp(channel=%d, timestamp=%lu)",
+                 channel, timestamp);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "SetInitTimestamp() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->SetInitTimestamp(timestamp);
+}
+
+int VoEVideoSyncImpl::SetInitSequenceNumber(int channel,
+                                            short sequenceNumber)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetInitSequenceNumber(channel=%d, sequenceNumber=%hd)",
+                 channel, sequenceNumber);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "SetInitSequenceNumber() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->SetInitSequenceNumber(sequenceNumber);
+}
+
+int VoEVideoSyncImpl::SetMinimumPlayoutDelay(int channel,int delayMs)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "SetMinimumPlayoutDelay(channel=%d, delayMs=%d)",
+                 channel, delayMs);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+	voe::ScopedChannel sc(_channelManager, channel);
+	voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "SetMinimumPlayoutDelay() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->SetMinimumPlayoutDelay(delayMs);
+}
+
+int VoEVideoSyncImpl::GetDelayEstimate(int channel, int& delayMs)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetDelayEstimate(channel=%d, delayMs=?)", channel);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetDelayEstimate() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetDelayEstimate(delayMs);
+}
+
+int VoEVideoSyncImpl::GetPlayoutBufferSize(int& bufferMs)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "GetPlayoutBufferSize(bufferMs=?)");
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    AudioDeviceModule::BufferType type
+        (AudioDeviceModule::kFixedBufferSize);
+    WebRtc_UWord16 sizeMS(0);
+    if (_audioDevicePtr->PlayoutBuffer(&type, &sizeMS) != 0)
+    {
+        _engineStatistics.SetLastError(
+	    VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
+	    "GetPlayoutBufferSize() failed to read buffer size");
+        return -1;
+    }
+    bufferMs = sizeMS;
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetPlayoutBufferSize() => bufferMs=%d", bufferMs);
+    return 0;
+}
+
+int VoEVideoSyncImpl::GetRtpRtcp(int channel,
+                                           RtpRtcp* &rtpRtcpModule)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+                 "GetRtpRtcp(channel=%i)", channel);
+    
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetPlayoutTimestamp() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetRtpRtcp(rtpRtcpModule);
+}
+
+
+#endif  // #ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
+
+}  // namespace webrtc
diff --git a/voice_engine/main/source/voe_video_sync_impl.h b/voice_engine/main/source/voe_video_sync_impl.h
new file mode 100644
index 0000000..13c901a
--- /dev/null
+++ b/voice_engine/main/source/voe_video_sync_impl.h
@@ -0,0 +1,50 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_VIDEO_SYNC_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_VIDEO_SYNC_IMPL_H
+
+#include "voe_video_sync.h"
+
+#include "ref_count.h"
+#include "shared_data.h"
+
+namespace webrtc {
+
+class VoEVideoSyncImpl : public virtual voe::SharedData,
+                         public VoEVideoSync,
+                         public voe::RefCount
+{
+public:
+    virtual int Release();
+
+    virtual int GetPlayoutBufferSize(int& bufferMs);
+
+    virtual int SetMinimumPlayoutDelay(int channel, int delayMs);
+
+    virtual int GetDelayEstimate(int channel, int& delayMs);
+
+    virtual int SetInitTimestamp(int channel, unsigned int timestamp);
+
+    virtual int SetInitSequenceNumber(int channel, short sequenceNumber);
+
+    virtual int GetPlayoutTimestamp(int channel, unsigned int& timestamp);
+
+	virtual int GetRtpRtcp (int channel,
+	                                  RtpRtcp* &rtpRtcpModule);
+
+protected:
+    VoEVideoSyncImpl();
+    virtual ~VoEVideoSyncImpl();
+};
+
+}   // namespace webrtc
+
+#endif    // WEBRTC_VOICE_ENGINE_VOE_VIDEO_SYNC_IMPL_H
diff --git a/voice_engine/main/source/voe_volume_control_impl.cc b/voice_engine/main/source/voe_volume_control_impl.cc
new file mode 100644
index 0000000..b55b395
--- /dev/null
+++ b/voice_engine/main/source/voe_volume_control_impl.cc
@@ -0,0 +1,661 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_volume_control_impl.h"
+
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "output_mixer.h"
+#include "trace.h"
+#include "transmit_mixer.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+namespace webrtc {
+
+VoEVolumeControl* VoEVolumeControl::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
+    return NULL;
+#else
+    if (NULL == voiceEngine)
+    {
+        return NULL;
+    }
+    VoiceEngineImpl* s =
+        reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
+    VoEVolumeControlImpl* d = s;
+    (*d)++;
+    return (d);
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
+
+VoEVolumeControlImpl::VoEVolumeControlImpl()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
+               "VoEVolumeControlImpl::VoEVolumeControlImpl() - ctor");
+}
+
+VoEVolumeControlImpl::~VoEVolumeControlImpl()
+{
+    WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
+               "VoEVolumeControlImpl::~VoEVolumeControlImpl() - dtor");
+}
+
+int VoEVolumeControlImpl::Release()
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "VoEVolumeControl::Release()");
+    (*this)--;
+    int refCount = GetCount();
+    if (refCount < 0)
+    {
+        Reset();  // reset reference counter to zero => OK to delete VE
+        _engineStatistics.SetLastError(
+            VE_INTERFACE_NOT_FOUND, kTraceWarning);
+        return (-1);
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+               "VoEVolumeControl reference counter = %d", refCount);
+    return (refCount);
+}
+
+int VoEVolumeControlImpl::SetSpeakerVolume(unsigned int volume)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "SetSpeakerVolume(volume=%u)", volume);
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (volume > kMaxVolumeLevel)
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "SetSpeakerVolume() invalid argument");
+        return -1;
+    }
+
+    WebRtc_UWord32 maxVol(0);
+    WebRtc_UWord32 spkrVol(0);
+
+    // scale: [0,kMaxVolumeLevel] -> [0,MaxSpeakerVolume]
+    if (_audioDevicePtr->MaxSpeakerVolume(&maxVol) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_MIC_VOL_ERROR, kTraceError,
+            "SetSpeakerVolume() failed to get max volume");
+        return -1;
+    }
+    // round the value and avoid floating computation
+    spkrVol = (WebRtc_UWord32)((volume * maxVol +
+        (int)(kMaxVolumeLevel / 2)) / (kMaxVolumeLevel));
+
+    // set the actual volume using the audio mixer
+    if (_audioDevicePtr->SetSpeakerVolume(spkrVol) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_MIC_VOL_ERROR, kTraceError,
+            "SetSpeakerVolume() failed to set speaker volume");
+        return -1;
+    }
+    return 0;
+}
+
+int VoEVolumeControlImpl::GetSpeakerVolume(unsigned int& volume)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "GetSpeakerVolume()");
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    WebRtc_UWord32 spkrVol(0);
+    WebRtc_UWord32 maxVol(0);
+
+    if (_audioDevicePtr->SpeakerVolume(&spkrVol) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_GET_MIC_VOL_ERROR, kTraceError,
+            "GetSpeakerVolume() unable to get speaker volume");
+        return -1;
+    }
+
+    // scale: [0, MaxSpeakerVolume] -> [0, kMaxVolumeLevel]
+    if (_audioDevicePtr->MaxSpeakerVolume(&maxVol) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_GET_MIC_VOL_ERROR, kTraceError,
+            "GetSpeakerVolume() unable to get max speaker volume");
+        return -1;
+    }
+    // round the value and avoid floating computation
+    volume = (WebRtc_UWord32) ((spkrVol * kMaxVolumeLevel +
+        (int)(maxVol / 2)) / (maxVol));
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+               "GetSpeakerVolume() => volume=%d", volume);
+    return 0;
+}
+
+int VoEVolumeControlImpl::SetSystemOutputMute(bool enable)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "GetSystemOutputMute(enabled=%d)", enable);
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    if (_audioDevicePtr->SetSpeakerMute(enable) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_GET_MIC_VOL_ERROR, kTraceError,
+            "SpeakerMute() unable to Set speaker mute");
+        return -1;
+    }
+
+    return 0;
+}
+
+int VoEVolumeControlImpl::GetSystemOutputMute(bool& enabled)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "GetSystemOutputMute(enabled=?)");
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    if (_audioDevicePtr->SpeakerMute(&enabled) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_GET_MIC_VOL_ERROR, kTraceError,
+            "SpeakerMute() unable to get speaker mute state");
+        return -1;
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+               "GetSystemOutputMute() => %d", enabled);
+    return 0;
+}
+
+int VoEVolumeControlImpl::SetMicVolume(unsigned int volume)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "SetMicVolume(volume=%u)", volume);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (volume > kMaxVolumeLevel)
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "SetMicVolume() invalid argument");
+        return -1;
+    }
+
+    WebRtc_UWord32 maxVol(0);
+    WebRtc_UWord32 micVol(0);
+
+    // scale: [0, kMaxVolumeLevel] -> [0,MaxMicrophoneVolume]
+    if (_audioDevicePtr->MaxMicrophoneVolume(&maxVol) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_MIC_VOL_ERROR, kTraceError,
+            "SetMicVolume() failed to get max volume");
+        return -1;
+    }
+    // round the value and avoid floating point computation
+    micVol = (WebRtc_UWord32) ((volume * maxVol +
+        (int)(kMaxVolumeLevel / 2)) / (kMaxVolumeLevel));
+
+	// set the actual volume using the audio mixer
+    if (_audioDevicePtr->SetMicrophoneVolume(micVol) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_MIC_VOL_ERROR, kTraceError,
+            "SetMicVolume() failed to set mic volume");
+        return -1;
+    }
+    return 0;
+}
+
+int VoEVolumeControlImpl::GetMicVolume(unsigned int& volume)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "GetMicVolume()");
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    WebRtc_UWord32 micVol(0);
+    WebRtc_UWord32 maxVol(0);
+
+    if (_audioDevicePtr->MicrophoneVolume(&micVol) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_GET_MIC_VOL_ERROR, kTraceError,
+            "GetMicVolume() unable to get microphone volume");
+        return -1;
+    }
+
+	// scale: [0, MaxMicrophoneVolume] -> [0, kMaxVolumeLevel]
+    if (_audioDevicePtr->MaxMicrophoneVolume(&maxVol) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_GET_MIC_VOL_ERROR, kTraceError,
+            "GetMicVolume() unable to get max microphone volume");
+        return -1;
+    }
+    // round the value and avoid floating point calculation
+    volume = (WebRtc_UWord32) ((micVol * kMaxVolumeLevel +
+        (int)(maxVol / 2)) / (maxVol));
+
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+               "GetMicVolume() => volume=%d", volume);
+    return 0;
+}
+
+int VoEVolumeControlImpl::SetInputMute(int channel, bool enable)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "SetInputMute(channel=%d, enable=%d)", channel, enable);
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (channel == -1)
+    {
+        // Mute before demultiplexing <=> affects all channels
+        return _transmitMixerPtr->SetMute(enable);
+    }
+    else
+    {
+        // Mute after demultiplexing <=> affects one channel only
+        voe::ScopedChannel sc(_channelManager, channel);
+        voe::Channel* channelPtr = sc.ChannelPtr();
+        if (channelPtr == NULL)
+        {
+            _engineStatistics.SetLastError(
+                VE_CHANNEL_NOT_VALID, kTraceError,
+                "SetInputMute() failed to locate channel");
+            return -1;
+        }
+        return channelPtr->SetMute(enable);
+    }
+    return 0;
+}
+
+int VoEVolumeControlImpl::GetInputMute(int channel, bool& enabled)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "GetInputMute(channel=%d)", channel);
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (channel == -1)
+    {
+        enabled = _transmitMixerPtr->Mute();
+    }
+    else
+    {
+        voe::ScopedChannel sc(_channelManager, channel);
+        voe::Channel* channelPtr = sc.ChannelPtr();
+        if (channelPtr == NULL)
+        {
+            _engineStatistics.SetLastError(
+                VE_CHANNEL_NOT_VALID, kTraceError,
+                "SetInputMute() failed to locate channel");
+            return -1;
+        }
+        enabled = channelPtr->Mute();
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+               "GetInputMute() => enabled = %d", (int)enabled);
+    return 0;
+}
+
+int VoEVolumeControlImpl::SetSystemInputMute(bool enable)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "SetSystemInputMute(enabled=%d)", enable);
+
+    if (!_engineStatistics.Initialized())
+    {
+            _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+            return -1;
+    }
+
+    if (_audioDevicePtr->SetMicrophoneMute(enable) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_GET_MIC_VOL_ERROR, kTraceError,
+            "MicrophoneMute() unable to set microphone mute state");
+        return -1;
+    }
+
+    return 0;
+}
+
+int VoEVolumeControlImpl::GetSystemInputMute(bool& enabled)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "GetSystemInputMute(enabled=?)");
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    if (_audioDevicePtr->MicrophoneMute(&enabled) != 0)
+    {
+        _engineStatistics.SetLastError(
+            VE_GET_MIC_VOL_ERROR, kTraceError,
+            "MicrophoneMute() unable to get microphone mute state");
+        return -1;
+    }
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+               "GetSystemInputMute() => %d", enabled);
+	return 0;
+}
+
+int VoEVolumeControlImpl::GetSpeechInputLevel(unsigned int& level)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "GetSpeechInputLevel()");
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    WebRtc_Word8 currentLevel = _transmitMixerPtr->AudioLevel();
+    level = static_cast<unsigned int> (currentLevel);
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+               "GetSpeechInputLevel() => %d", level);
+    return 0;
+}
+
+int VoEVolumeControlImpl::GetSpeechOutputLevel(int channel,
+                                               unsigned int& level)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "GetSpeechOutputLevel(channel=%d, level=?)", channel);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+	
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (channel == -1)
+    {
+        return _outputMixerPtr->GetSpeechOutputLevel((WebRtc_UWord32&)level);
+    }
+    else
+    {
+        voe::ScopedChannel sc(_channelManager, channel);
+        voe::Channel* channelPtr = sc.ChannelPtr();
+        if (channelPtr == NULL)
+        {
+            _engineStatistics.SetLastError(
+                VE_CHANNEL_NOT_VALID, kTraceError,
+                "GetSpeechOutputLevel() failed to locate channel");
+            return -1;
+        }
+        channelPtr->GetSpeechOutputLevel((WebRtc_UWord32&)level);
+    }
+    return 0;
+}
+
+int VoEVolumeControlImpl::GetSpeechInputLevelFullRange(unsigned int& level)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "GetSpeechInputLevelFullRange(level=?)");
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    WebRtc_Word16 currentLevel = _transmitMixerPtr->AudioLevelFullRange();
+    level = static_cast<unsigned int> (currentLevel);
+    WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+               "GetSpeechInputLevelFullRange() => %d", level);
+    return 0;
+}
+
+int VoEVolumeControlImpl::GetSpeechOutputLevelFullRange(int channel,
+                                                        unsigned int& level)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "GetSpeechOutputLevelFullRange(channel=%d, level=?)", channel);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (channel == -1)
+    {
+        return _outputMixerPtr->GetSpeechOutputLevelFullRange(
+            (WebRtc_UWord32&)level);
+    }
+    else
+    {
+        voe::ScopedChannel sc(_channelManager, channel);
+        voe::Channel* channelPtr = sc.ChannelPtr();
+        if (channelPtr == NULL)
+        {
+            _engineStatistics.SetLastError(
+                VE_CHANNEL_NOT_VALID, kTraceError,
+                "GetSpeechOutputLevelFullRange() failed to locate channel");
+            return -1;
+        }
+        channelPtr->GetSpeechOutputLevelFullRange((WebRtc_UWord32&)level);
+    }
+    return 0;
+}
+
+int VoEVolumeControlImpl::SetChannelOutputVolumeScaling(int channel,
+                                                        float scaling)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "SetChannelOutputVolumeScaling(channel=%d, scaling=%3.2f)",
+               channel, scaling);
+    IPHONE_NOT_SUPPORTED();
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    if (scaling < kMinOutputVolumeScaling ||
+        scaling > kMaxOutputVolumeScaling)
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "SetChannelOutputVolumeScaling() invalid parameter");
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "SetChannelOutputVolumeScaling() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->SetChannelOutputVolumeScaling(scaling);
+}
+
+int VoEVolumeControlImpl::GetChannelOutputVolumeScaling(int channel,
+                                                        float& scaling)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "GetChannelOutputVolumeScaling(channel=%d, scaling=?)", channel);
+    IPHONE_NOT_SUPPORTED();
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+    voe::ScopedChannel sc(_channelManager, channel);
+    voe::Channel* channelPtr = sc.ChannelPtr();
+    if (channelPtr == NULL)
+    {
+        _engineStatistics.SetLastError(
+            VE_CHANNEL_NOT_VALID, kTraceError,
+            "GetChannelOutputVolumeScaling() failed to locate channel");
+        return -1;
+    }
+    return channelPtr->GetChannelOutputVolumeScaling(scaling);
+}
+
+int VoEVolumeControlImpl::SetOutputVolumePan(int channel,
+                                             float left,
+                                             float right)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "SetOutputVolumePan(channel=%d, left=%2.1f, right=%2.1f)",
+               channel, left, right);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    bool available(false);
+    _audioDevicePtr->StereoPlayoutIsAvailable(&available);
+    if (!available)
+    {
+        _engineStatistics.SetLastError(
+            VE_FUNC_NO_STEREO, kTraceError,
+            "SetOutputVolumePan() stereo playout not supported");
+        return -1;
+    }
+    if ((left < kMinOutputVolumePanning)  ||
+        (left > kMaxOutputVolumePanning)  ||
+        (right < kMinOutputVolumePanning) ||
+        (right > kMaxOutputVolumePanning))
+    {
+        _engineStatistics.SetLastError(
+            VE_INVALID_ARGUMENT, kTraceError,
+            "SetOutputVolumePan() invalid parameter");
+        return -1;
+    }
+
+    if (channel == -1)
+    {
+        // Master balance (affectes the signal after output mixing)
+        return _outputMixerPtr->SetOutputVolumePan(left, right);
+    }
+    else
+    {
+        // Per-channel balance (affects the signal before output mixing)
+        voe::ScopedChannel sc(_channelManager, channel);
+        voe::Channel* channelPtr = sc.ChannelPtr();
+        if (channelPtr == NULL)
+        {
+            _engineStatistics.SetLastError(
+                VE_CHANNEL_NOT_VALID, kTraceError,
+                "SetOutputVolumePan() failed to locate channel");
+            return -1;
+        }
+        return channelPtr->SetOutputVolumePan(left, right);
+    }
+    return 0;
+}
+
+int VoEVolumeControlImpl::GetOutputVolumePan(int channel,
+                                             float& left,
+                                             float& right)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_instanceId,-1),
+               "GetOutputVolumePan(channel=%d, left=?, right=?)", channel);
+    ANDROID_NOT_SUPPORTED();
+    IPHONE_NOT_SUPPORTED();
+
+    if (!_engineStatistics.Initialized())
+    {
+        _engineStatistics.SetLastError(VE_NOT_INITED, kTraceError);
+        return -1;
+    }
+
+    bool available(false);
+    _audioDevicePtr->StereoPlayoutIsAvailable(&available);
+    if (!available)
+    {
+        _engineStatistics.SetLastError(
+            VE_FUNC_NO_STEREO, kTraceError,
+            "GetOutputVolumePan() stereo playout not supported");
+        return -1;
+    }
+
+    if (channel == -1)
+    {
+        return _outputMixerPtr->GetOutputVolumePan(left, right);
+    }
+    else
+    {
+        voe::ScopedChannel sc(_channelManager, channel);
+        voe::Channel* channelPtr = sc.ChannelPtr();
+        if (channelPtr == NULL)
+        {
+            _engineStatistics.SetLastError(
+                VE_CHANNEL_NOT_VALID, kTraceError,
+                "GetOutputVolumePan() failed to locate channel");
+            return -1;
+        }
+        return channelPtr->GetOutputVolumePan(left, right);
+    }
+    return 0;
+}
+
+#endif  // #ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
+
+}  // namespace webrtc
diff --git a/voice_engine/main/source/voe_volume_control_impl.h b/voice_engine/main/source/voe_volume_control_impl.h
new file mode 100644
index 0000000..18f4a1b
--- /dev/null
+++ b/voice_engine/main/source/voe_volume_control_impl.h
@@ -0,0 +1,74 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_IMPL_H
+
+#include "voe_volume_control.h"
+
+#include "ref_count.h"
+#include "shared_data.h"
+
+namespace webrtc {
+
+class VoEVolumeControlImpl : public virtual voe::SharedData,
+                             public VoEVolumeControl,
+                             public voe::RefCount
+{
+public:
+    virtual int Release();
+
+    virtual int SetSpeakerVolume(unsigned int volume);
+
+    virtual int GetSpeakerVolume(unsigned int& volume);
+
+    virtual int SetSystemOutputMute(bool enable);
+
+    virtual int GetSystemOutputMute(bool& enabled);
+
+    virtual int SetMicVolume(unsigned int volume);
+
+    virtual int GetMicVolume(unsigned int& volume);
+
+    virtual int SetInputMute(int channel, bool enable);
+
+    virtual int GetInputMute(int channel, bool& enabled);
+
+    virtual int SetSystemInputMute(bool enable);
+
+    virtual int GetSystemInputMute(bool& enabled);
+
+    virtual int GetSpeechInputLevel(unsigned int& level);
+
+    virtual int GetSpeechOutputLevel(int channel, unsigned int& level);
+
+    virtual int GetSpeechInputLevelFullRange(unsigned int& level);
+
+    virtual int GetSpeechOutputLevelFullRange(int channel,
+                                              unsigned int& level);
+
+    virtual int SetChannelOutputVolumeScaling(int channel, float scaling);
+
+    virtual int GetChannelOutputVolumeScaling(int channel, float& scaling);
+
+    virtual int SetOutputVolumePan(int channel, float left, float right);
+
+    virtual int GetOutputVolumePan(int channel, float& left, float& right);
+
+
+protected:
+    VoEVolumeControlImpl();
+    virtual ~VoEVolumeControlImpl();
+};
+
+}   // namespace webrtc
+
+#endif    // WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_IMPL_H
+
diff --git a/voice_engine/main/source/voice_engine_core.gyp b/voice_engine/main/source/voice_engine_core.gyp
new file mode 100644
index 0000000..89fd023
--- /dev/null
+++ b/voice_engine/main/source/voice_engine_core.gyp
@@ -0,0 +1,127 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+  'includes': [
+    '../../../common_settings.gypi',
+  ],
+  'targets': [
+    {
+      'target_name': 'voice_engine_core',
+      'type': '<(library)',
+      'dependencies': [
+        '../../../common_audio/resampler/main/source/resampler.gyp:resampler',
+        '../../../common_audio/signal_processing_library/main/source/spl.gyp:spl',
+        '../../../modules/audio_coding/main/source/audio_coding_module.gyp:audio_coding_module',
+        '../../../modules/audio_conference_mixer/source/audio_conference_mixer.gyp:audio_conference_mixer',
+        '../../../modules/audio_device/main/source/audio_device.gyp:audio_device',
+        '../../../modules/audio_processing/main/source/apm.gyp:audio_processing',
+        '../../../modules/media_file/source/media_file.gyp:media_file',
+        '../../../modules/rtp_rtcp/source/rtp_rtcp.gyp:rtp_rtcp',
+        '../../../modules/udp_transport/source/udp_transport.gyp:udp_transport',
+        '../../../modules/utility/source/utility.gyp:webrtc_utility',
+        '../../../system_wrappers/source/system_wrappers.gyp:system_wrappers',
+      ],
+      'include_dirs': [
+        '../../..',
+        '../interface',
+      ],
+      'direct_dependent_settings': {
+        'include_dirs': [
+          '../../..',
+          '../interface',
+        ],
+      },
+      'sources': [
+        '../../../common_types.h',
+        '../../../engine_configurations.h',
+        '../../../typedefs.h',
+        '../interface/voe_audio_processing.h',
+        '../interface/voe_base.h',
+        '../interface/voe_call_report.h',
+        '../interface/voe_codec.h',
+        '../interface/voe_dtmf.h',
+        '../interface/voe_encryption.h',
+        '../interface/voe_errors.h',
+        '../interface/voe_external_media.h',
+        '../interface/voe_file.h',
+        '../interface/voe_hardware.h',
+        '../interface/voe_neteq_stats.h',
+        '../interface/voe_network.h',
+        '../interface/voe_rtp_rtcp.h',
+        '../interface/voe_video_sync.h',
+        '../interface/voe_volume_control.h',
+        'audio_frame_operations.cc',
+        'audio_frame_operations.h',
+        'channel.cc',
+        'channel.h',
+        'channel_manager.cc',
+        'channel_manager.h',
+        'channel_manager_base.cc',
+        'channel_manager_base.h',
+        'dtmf_inband.cc',
+        'dtmf_inband.h',
+        'dtmf_inband_queue.cc',
+        'dtmf_inband_queue.h',
+        'level_indicator.cc',
+        'level_indicator.h',
+        'monitor_module.cc',
+        'monitor_module.h',
+        'output_mixer.cc',
+        'output_mixer.h',
+        'ref_count.cc',
+        'ref_count.h',
+        'shared_data.cc',
+        'shared_data.h',
+        'statistics.cc',
+        'statistics.h',
+        'transmit_mixer.cc',
+        'transmit_mixer.h',
+        'utility.cc',
+        'utility.h',
+        'voe_audio_processing_impl.cc',
+        'voe_audio_processing_impl.h',
+        'voe_base_impl.cc',
+        'voe_base_impl.h',
+        'voe_call_report_impl.cc',
+        'voe_call_report_impl.h',
+        'voe_codec_impl.cc',
+        'voe_codec_impl.h',
+        'voe_dtmf_impl.cc',
+        'voe_dtmf_impl.h',
+        'voe_encryption_impl.cc',
+        'voe_encryption_impl.h',
+        'voe_external_media_impl.cc',
+        'voe_external_media_impl.h',
+        'voe_file_impl.cc',
+        'voe_file_impl.h',
+        'voe_hardware_impl.cc',
+        'voe_hardware_impl.h',
+        'voe_neteq_stats_impl.cc',
+        'voe_neteq_stats_impl.h',
+        'voe_network_impl.cc',
+        'voe_network_impl.h',
+        'voe_rtp_rtcp_impl.cc',
+        'voe_rtp_rtcp_impl.h',
+        'voe_video_sync_impl.cc',
+        'voe_video_sync_impl.h',
+        'voe_volume_control_impl.cc',
+        'voe_volume_control_impl.h',
+        'voice_engine_defines.h',
+        'voice_engine_impl.cc',
+        'voice_engine_impl.h',
+      ],
+    },
+  ],
+}
+
+# Local Variables:
+# tab-width:2
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=2 shiftwidth=2:
diff --git a/voice_engine/main/source/voice_engine_defines.h b/voice_engine/main/source/voice_engine_defines.h
new file mode 100644
index 0000000..ad542b3
--- /dev/null
+++ b/voice_engine/main/source/voice_engine_defines.h
@@ -0,0 +1,598 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ *  This file contains common constants for VoiceEngine, as well as
+ *  platform specific settings and include files.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOICE_ENGINE_DEFINES_H
+#define WEBRTC_VOICE_ENGINE_VOICE_ENGINE_DEFINES_H
+
+#include "engine_configurations.h"
+
+// ----------------------------------------------------------------------------
+//  Enumerators
+// ----------------------------------------------------------------------------
+
+namespace webrtc
+{
+
+// VolumeControl
+enum { kMinVolumeLevel = 0 };
+enum { kMaxVolumeLevel = 255 };
+// Min scale factor for per-channel volume scaling
+const float kMinOutputVolumeScaling = 0.0f;
+// Max scale factor for per-channel volume scaling
+const float kMaxOutputVolumeScaling = 10.0f;
+// Min scale factor for output volume panning
+const float kMinOutputVolumePanning = 0.0f;
+// Max scale factor for output volume panning
+const float kMaxOutputVolumePanning = 1.0f;
+
+// DTMF
+enum { kMinDtmfEventCode = 0 };                 // DTMF digit "0"
+enum { kMaxDtmfEventCode = 15 };                // DTMF digit "D"
+enum { kMinTelephoneEventCode = 0 };            // RFC4733 (Section 2.3.1)
+enum { kMaxTelephoneEventCode = 255 };          // RFC4733 (Section 2.3.1)
+enum { kMinTelephoneEventDuration = 100 };
+enum { kMaxTelephoneEventDuration = 60000 };    // Actual limit is 2^16
+enum { kMinTelephoneEventAttenuation = 0 };     // 0 dBm0
+enum { kMaxTelephoneEventAttenuation = 36 };    // -36 dBm0
+enum { kMinTelephoneEventSeparationMs = 100 };  // Min delta time between two
+                                                // telephone events
+
+enum { EcAec = 0 };                             // AEC mode
+enum { EcAecm = 1 };                            // AECM mode
+
+enum { kVoiceEngineMaxIpPacketSizeBytes = 1500 };       // assumes Ethernet
+
+enum { kVoiceEngineMaxModuleVersionSize = 960 };
+
+// Base
+enum { kVoiceEngineVersionMaxMessageSize = 1024 };
+
+// Encryption
+// SRTP uses 30 bytes key length
+enum { kVoiceEngineMaxSrtpKeyLength = 30 };
+// SRTP minimum key/tag length for encryption level
+enum { kVoiceEngineMinSrtpEncryptLength = 16 };
+// SRTP maximum key/tag length for encryption level
+enum { kVoiceEngineMaxSrtpEncryptLength = 256 };
+// SRTP maximum key/tag length for authentication level,
+// HMAC SHA1 authentication type
+enum { kVoiceEngineMaxSrtpAuthSha1Length = 20 };
+// SRTP maximum tag length for authentication level,
+// null authentication type
+enum { kVoiceEngineMaxSrtpTagAuthNullLength = 12 };
+// SRTP maximum key length for authentication level,
+// null authentication type
+enum { kVoiceEngineMaxSrtpKeyAuthNullLength = 256 };
+
+// Audio processing
+enum { kVoiceEngineAudioProcessingDeviceSampleRateHz = 48000 };
+
+// Codec
+// Min init target rate for iSAC-wb
+enum { kVoiceEngineMinIsacInitTargetRateBpsWb = 10000 };
+// Max init target rate for iSAC-wb
+enum { kVoiceEngineMaxIsacInitTargetRateBpsWb = 32000 };
+// Min init target rate for iSAC-swb
+enum { kVoiceEngineMinIsacInitTargetRateBpsSwb = 10000 };
+// Max init target rate for iSAC-swb
+enum { kVoiceEngineMaxIsacInitTargetRateBpsSwb = 56000 };
+// Lowest max rate for iSAC-wb
+enum { kVoiceEngineMinIsacMaxRateBpsWb = 32000 };
+// Highest max rate for iSAC-wb
+enum { kVoiceEngineMaxIsacMaxRateBpsWb = 53400 };
+// Lowest max rate for iSAC-swb
+enum { kVoiceEngineMinIsacMaxRateBpsSwb = 32000 };
+// Highest max rate for iSAC-swb
+enum { kVoiceEngineMaxIsacMaxRateBpsSwb = 107000 };
+// Lowest max payload size for iSAC-wb
+enum { kVoiceEngineMinIsacMaxPayloadSizeBytesWb = 120 };
+// Highest max payload size for iSAC-wb
+enum { kVoiceEngineMaxIsacMaxPayloadSizeBytesWb = 400 };
+// Lowest max payload size for iSAC-swb
+enum { kVoiceEngineMinIsacMaxPayloadSizeBytesSwb = 120 };
+// Highest max payload size for iSAC-swb
+enum { kVoiceEngineMaxIsacMaxPayloadSizeBytesSwb = 600 };
+
+// VideoSync
+// Lowest minimum playout delay
+enum { kVoiceEngineMinMinPlayoutDelayMs = 0 };
+// Highest minimum playout delay
+enum { kVoiceEngineMaxMinPlayoutDelayMs = 1000 };
+
+// Network
+// Min packet-timeout time for received RTP packets
+enum { kVoiceEngineMinPacketTimeoutSec = 1 };
+// Max packet-timeout time for received RTP packets
+enum { kVoiceEngineMaxPacketTimeoutSec = 150 };
+// Min sample time for dead-or-alive detection
+enum { kVoiceEngineMinSampleTimeSec = 1 };
+// Max sample time for dead-or-alive detection
+enum { kVoiceEngineMaxSampleTimeSec = 150 };
+
+// RTP/RTCP
+// Min 4-bit ID for RTP extension (see section 4.2 in RFC 5285)
+enum { kVoiceEngineMinRtpExtensionId = 1 };
+// Max 4-bit ID for RTP extension
+enum { kVoiceEngineMaxRtpExtensionId = 14 };
+
+} // namespace webrtc
+
+#define WEBRTC_AUDIO_PROCESSING_OFF false
+
+#define WEBRTC_VOICE_ENGINE_HP_DEFAULT_STATE true
+    // AudioProcessing HP is ON
+#define WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE  WEBRTC_AUDIO_PROCESSING_OFF
+    // AudioProcessing NS off
+#define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE true
+    // AudioProcessing AGC on
+#define WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE  WEBRTC_AUDIO_PROCESSING_OFF
+    // AudioProcessing EC off
+#define WEBRTC_VOICE_ENGINE_LEVEL_ESTIMATOR_DEFAULT_STATE \
+    WEBRTC_AUDIO_PROCESSING_OFF
+    // AudioProcessing Estimator off
+#define WEBRTC_VOICE_ENGINE_VAD_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
+    // AudioProcessing off
+#define WEBRTC_VOICE_ENGINE_RX_AGC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
+    // AudioProcessing RX AGC off
+#define WEBRTC_VOICE_ENGINE_RX_NS_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
+    // AudioProcessing RX NS off
+#define WEBRTC_VOICE_ENGINE_RX_HP_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
+    // AudioProcessing RX High Pass Filter off
+
+#define WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE  NoiseSuppression::kModerate
+    // AudioProcessing NS moderate suppression
+#define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE GainControl::kAdaptiveAnalog
+    // AudioProcessing AGC analog digital combined
+#define WEBRTC_VOICE_ENGINE_EC_DEFAULT_MODE  EcAec
+    // AudioProcessing EC AEC
+#define WEBRTC_VOICE_ENGINE_RX_AGC_DEFAULT_MODE GainControl::kAdaptiveDigital
+    // AudioProcessing AGC mode
+#define WEBRTC_VOICE_ENGINE_RX_NS_DEFAULT_MODE  NoiseSuppression::kModerate
+    // AudioProcessing RX NS mode
+
+// Macros
+// Comparison of two strings without regard to case
+#define STR_CASE_CMP(x,y) ::_stricmp(x,y)
+// Compares characters of two strings without regard to case
+#define STR_NCASE_CMP(x,y,n) ::_strnicmp(x,y,n)
+
+// ----------------------------------------------------------------------------
+//  Build information macros
+// ----------------------------------------------------------------------------
+
+#if defined(_DEBUG)
+#define BUILDMODE "d"
+#elif defined(DEBUG)
+#define BUILDMODE "d"
+#elif defined(NDEBUG)
+#define BUILDMODE "r"
+#else
+#define BUILDMODE "?"
+#endif
+
+#define BUILDTIME __TIME__
+#define BUILDDATE __DATE__
+
+// Example: "Oct 10 2002 12:05:30 r"
+#define BUILDINFO BUILDDATE " " BUILDTIME " " BUILDMODE
+
+// ----------------------------------------------------------------------------
+//  Macros
+// ----------------------------------------------------------------------------
+
+#if (defined(_DEBUG) && defined(_WIN32) && (_MSC_VER >= 1400))
+  #include <windows.h>
+  #include <stdio.h>
+  #define DEBUG_PRINT(...)      \
+  {                             \
+    char msg[256];              \
+    sprintf(msg, __VA_ARGS__);  \
+    OutputDebugStringA(msg);    \
+  }
+#else
+  // special fix for visual 2003
+  #define DEBUG_PRINT(exp)      ((void)0)
+#endif  // defined(_DEBUG) && defined(_WIN32)
+
+#define CHECK_CHANNEL(channel)  if (CheckChannel(channel) == -1) return -1;
+
+// ----------------------------------------------------------------------------
+//  Default Trace filter
+// ----------------------------------------------------------------------------
+
+#define WEBRTC_VOICE_ENGINE_DEFAULT_TRACE_FILTER \
+    kTraceStateInfo | kTraceWarning | kTraceError | kTraceCritical | \
+    kTraceApiCall
+
+// ----------------------------------------------------------------------------
+//  Inline functions
+// ----------------------------------------------------------------------------
+
+namespace webrtc
+{
+
+inline int VoEId(const int veId, const int chId)
+{
+    if (chId == -1)
+    {
+        const int dummyChannel(99);
+        return (int) ((veId << 16) + dummyChannel);
+    }
+    return (int) ((veId << 16) + chId);
+}
+
+inline int VoEModuleId(const int veId, const int chId)
+{
+    return (int) ((veId << 16) + chId);
+}
+
+// Convert module ID to internal VoE channel ID
+inline int VoEChannelId(const int moduleId)
+{
+    return (int) (moduleId & 0xffff);
+}
+
+} // namespace webrtc
+
+// ----------------------------------------------------------------------------
+//  Platform settings
+// ----------------------------------------------------------------------------
+
+// *** WINDOWS ***
+
+#if defined(_WIN32)
+
+  #pragma comment( lib, "winmm.lib" )
+
+  #ifndef WEBRTC_EXTERNAL_TRANSPORT
+    #pragma comment( lib, "ws2_32.lib" )
+  #endif
+
+// ----------------------------------------------------------------------------
+//  Enumerators
+// ----------------------------------------------------------------------------
+
+namespace webrtc
+{
+// Max number of supported channels
+enum { kVoiceEngineMaxNumOfChannels = 32 };
+// Max number of channels which can be played out simultaneously
+enum { kVoiceEngineMaxNumOfActiveChannels = 16 };
+} // namespace webrtc
+
+// ----------------------------------------------------------------------------
+//  Defines
+// ----------------------------------------------------------------------------
+
+  #include <windows.h>
+  #include <mmsystem.h> // timeGetTime
+
+  #define GET_TIME_IN_MS() ::timeGetTime()
+  #define SLEEP(x) ::Sleep(x)
+  // Comparison of two strings without regard to case
+  #define STR_CASE_CMP(x,y) ::_stricmp(x,y)
+  // Compares characters of two strings without regard to case
+  #define STR_NCASE_CMP(x,y,n) ::_strnicmp(x,y,n)
+
+// Default device for Windows PC
+  #define WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE \
+    AudioDeviceModule::kDefaultCommunicationDevice
+
+#endif  // #if (defined(_WIN32)
+
+// *** LINUX ***
+
+#ifdef WEBRTC_LINUX
+
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#ifndef QNX
+  #include <linux/net.h>
+#ifndef ANDROID
+  #include <sys/soundcard.h>
+#endif // ANDROID
+#endif // QNX
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <time.h>
+#include <sys/time.h>
+
+#define DWORD unsigned long int
+#define WINAPI
+#define LPVOID void *
+#define FALSE 0
+#define TRUE 1
+#define UINT unsigned int
+#define UCHAR unsigned char
+#define TCHAR char
+#ifdef QNX
+#define _stricmp stricmp
+#else
+#define _stricmp strcasecmp
+#endif
+#define GetLastError() errno
+#define WSAGetLastError() errno
+#define LPCTSTR const char*
+#define LPCSTR const char*
+#define wsprintf sprintf
+#define TEXT(a) a
+#define _ftprintf fprintf
+#define _tcslen strlen
+#define FAR
+#define __cdecl
+#define LPSOCKADDR struct sockaddr *
+
+namespace
+{
+    void Sleep(unsigned long x)
+    {
+        timespec t;
+        t.tv_sec = x/1000;
+        t.tv_nsec = (x-(x/1000)*1000)*1000000;
+        nanosleep(&t,NULL);
+    }
+
+    DWORD timeGetTime()
+    {
+        struct timeval tv;
+        struct timezone tz;
+        unsigned long val;
+
+        gettimeofday(&tv, &tz);
+        val= tv.tv_sec*1000+ tv.tv_usec/1000;
+        return(val);
+    }
+}
+
+#define SLEEP(x) ::Sleep(x)
+#define GET_TIME_IN_MS timeGetTime
+
+// Default device for Linux and Android
+#define WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE 0
+
+#ifdef ANDROID
+
+// ----------------------------------------------------------------------------
+//  Enumerators
+// ----------------------------------------------------------------------------
+
+namespace webrtc
+{
+  // Max number of supported channels
+  enum { kVoiceEngineMaxNumOfChannels = 2 };
+  // Max number of channels which can be played out simultaneously
+  enum { kVoiceEngineMaxNumOfActiveChannels = 2 };
+} // namespace webrtc
+
+// ----------------------------------------------------------------------------
+//  Defines
+// ----------------------------------------------------------------------------
+
+  // Always excluded for Android builds
+  #undef WEBRTC_CODEC_ISAC
+  #undef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+  #undef WEBRTC_CONFERENCING
+  #undef WEBRTC_TYPING_DETECTION
+
+  // Default audio processing states
+  #undef  WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE
+  #undef  WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE
+  #undef  WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE
+  #define WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE  WEBRTC_AUDIO_PROCESSING_OFF
+  #define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
+  #define WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE  WEBRTC_AUDIO_PROCESSING_OFF
+
+  // Default audio processing modes
+  #undef  WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE
+  #undef  WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE
+  #undef  WEBRTC_VOICE_ENGINE_EC_DEFAULT_MODE
+  #define WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE  \
+      NoiseSuppression::kModerate
+  #define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE \
+      GainControl::kAdaptiveDigital
+  #define WEBRTC_VOICE_ENGINE_EC_DEFAULT_MODE  EcAecm
+
+  #define ANDROID_NOT_SUPPORTED() \
+    _engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, \
+                                   "API call not supported"); \
+    return -1;
+
+#else // LINUX PC
+// ----------------------------------------------------------------------------
+//  Enumerators
+// ----------------------------------------------------------------------------
+
+namespace webrtc
+{
+  // Max number of supported channels
+  enum { kVoiceEngineMaxNumOfChannels = 32 };
+  // Max number of channels which can be played out simultaneously
+  enum { kVoiceEngineMaxNumOfActiveChannels = 16 };
+} // namespace webrtc
+
+// ----------------------------------------------------------------------------
+//  Defines
+// ----------------------------------------------------------------------------
+
+  #define ANDROID_NOT_SUPPORTED()
+
+#endif // ANDROID - LINUX PC
+
+#else
+#define ANDROID_NOT_SUPPORTED()
+#endif  // #ifdef WEBRTC_LINUX
+
+// *** WEBRTC_MAC ***
+// including iPhone
+
+#ifdef WEBRTC_MAC
+
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <sys/time.h>
+#include <time.h>
+#include <AudioUnit/AudioUnit.h>
+#if !defined(MAC_IPHONE) && !defined(MAC_IPHONE_SIM)
+  #include <CoreServices/CoreServices.h>
+  #include <CoreAudio/CoreAudio.h>
+  #include <AudioToolbox/DefaultAudioOutput.h>
+  #include <AudioToolbox/AudioConverter.h>
+  #include <CoreAudio/HostTime.h>
+#endif
+
+#define DWORD unsigned long int
+#define WINAPI
+#define LPVOID void *
+#define FALSE 0
+#define TRUE 1
+#define SOCKADDR_IN struct sockaddr_in
+#define UINT unsigned int
+#define UCHAR unsigned char
+#define TCHAR char
+#define _stricmp strcasecmp
+#define GetLastError() errno
+#define WSAGetLastError() errno
+#define LPCTSTR const char*
+#define wsprintf sprintf
+#define TEXT(a) a
+#define _ftprintf fprintf
+#define _tcslen strlen
+#define FAR
+#define __cdecl
+#define LPSOCKADDR struct sockaddr *
+#define LPCSTR const char*
+#define ULONG unsigned long
+
+namespace
+{
+    void Sleep(unsigned long x)
+    {
+        timespec t;
+        t.tv_sec = x/1000;
+        t.tv_nsec = (x-(x/1000)*1000)*1000000;
+        nanosleep(&t,NULL);
+    }
+
+    DWORD WebRtcTimeGetTime()
+    {
+        struct timeval tv;
+        struct timezone tz;
+        unsigned long val;
+
+        gettimeofday(&tv, &tz);
+        val= tv.tv_sec*1000+ tv.tv_usec/1000;
+        return(val);
+    }
+}
+
+#define SLEEP(x) ::Sleep(x)
+#define GET_TIME_IN_MS WebRtcTimeGetTime
+
+// Default device for Mac and iPhone
+#define WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE 0
+
+// iPhone specific
+#if defined(MAC_IPHONE) || defined(MAC_IPHONE_SIM)
+
+// ----------------------------------------------------------------------------
+//  Enumerators
+// ----------------------------------------------------------------------------
+
+namespace webrtc
+{
+  // Max number of supported channels
+  enum { kVoiceEngineMaxNumOfChannels = 2 };
+  // Max number of channels which can be played out simultaneously
+  enum { kVoiceEngineMaxNumOfActiveChannels = 2 };
+} // namespace webrtc
+
+// ----------------------------------------------------------------------------
+//  Defines
+// ----------------------------------------------------------------------------
+
+  // Always excluded for iPhone builds
+  #undef WEBRTC_CODEC_ISAC
+  #undef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+
+  #undef  WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE
+  #undef  WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE
+  #undef  WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE
+  #define WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE  WEBRTC_AUDIO_PROCESSING_OFF
+  #define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
+  #define WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE  WEBRTC_AUDIO_PROCESSING_OFF
+
+  #undef  WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE
+  #undef  WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE
+  #undef  WEBRTC_VOICE_ENGINE_EC_DEFAULT_MODE
+  #define WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE \
+      NoiseSuppression::kModerate
+  #define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE \
+      GainControl::kAdaptiveDigital
+  #define WEBRTC_VOICE_ENGINE_EC_DEFAULT_MODE EcAecm
+
+  #define IPHONE_NOT_SUPPORTED() \
+    _engineStatistics.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, \
+                                   "API call not supported"); \
+    return -1;
+
+#else // Non-iPhone
+
+// ----------------------------------------------------------------------------
+//  Enumerators
+// ----------------------------------------------------------------------------
+
+namespace webrtc
+{
+  // Max number of supported channels
+  enum { kVoiceEngineMaxNumOfChannels = 32 };
+  // Max number of channels which can be played out simultaneously
+  enum { kVoiceEngineMaxNumOfActiveChannels = 16 };
+} // namespace webrtc
+
+// ----------------------------------------------------------------------------
+//  Defines
+// ----------------------------------------------------------------------------
+
+  #define IPHONE_NOT_SUPPORTED()
+#endif
+
+#else
+#define IPHONE_NOT_SUPPORTED()
+#endif  // #ifdef WEBRTC_MAC
+
+
+
+#endif // WEBRTC_VOICE_ENGINE_VOICE_ENGINE_DEFINES_H
diff --git a/voice_engine/main/source/voice_engine_impl.cc b/voice_engine/main/source/voice_engine_impl.cc
new file mode 100644
index 0000000..2914c50
--- /dev/null
+++ b/voice_engine/main/source/voice_engine_impl.cc
@@ -0,0 +1,310 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_engine_impl.h"
+#include "trace.h"
+#ifdef ANDROID
+#include "audio_device.h" // SetAndroidObjects
+#endif
+
+namespace webrtc
+{
+
+// Counter to be ensure that we can add a correct ID in all static trace
+// methods. It is not the nicest solution, especially not since we already
+// have a counter in VoEBaseImpl. In other words, there is room for
+// improvement here.
+static WebRtc_Word32 gVoiceEngineInstanceCounter = 0;
+
+extern "C"
+{
+WEBRTC_DLLEXPORT VoiceEngine* GetVoiceEngine();
+
+VoiceEngine* GetVoiceEngine()
+{
+    VoiceEngineImpl* self = new VoiceEngineImpl();
+    VoiceEngine* ve = reinterpret_cast<VoiceEngine*> (self);
+    if (ve != NULL)
+    {
+        gVoiceEngineInstanceCounter++;
+    }
+    return ve;
+}
+} // extern "C"
+
+VoiceEngine* VoiceEngine::Create()
+{
+#if (defined _WIN32)
+    HMODULE hmod_ = LoadLibrary(TEXT("VoiceEngineTestingDynamic.dll"));
+
+    if (hmod_)
+    {
+        typedef VoiceEngine* (*PfnGetVoiceEngine)(void);
+        PfnGetVoiceEngine pfn = (PfnGetVoiceEngine)GetProcAddress(
+                hmod_,"GetVoiceEngine");
+        if (pfn)
+        {
+            VoiceEngine* self = pfn();
+            return (self);
+        }
+    }
+#endif
+
+    return GetVoiceEngine();
+}
+
+int VoiceEngine::SetTraceFilter(const unsigned int filter)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
+                 VoEId(gVoiceEngineInstanceCounter, -1),
+                 "SetTraceFilter(filter=0x%x)", filter);
+
+    // Remember old filter
+    WebRtc_UWord32 oldFilter = 0;
+    Trace::LevelFilter(oldFilter);
+
+    // Set new filter
+    WebRtc_Word32 ret = Trace::SetLevelFilter(filter);
+
+    // If previous log was ignored, log again after changing filter
+    if (kTraceNone == oldFilter)
+    {
+        WEBRTC_TRACE(kTraceApiCall, kTraceVoice, -1,
+                     "SetTraceFilter(filter=0x%x)", filter);
+    }
+
+    return (ret);
+}
+
+int VoiceEngine::SetTraceFile(const char* fileNameUTF8,
+                              const bool addFileCounter)
+{
+    int ret = Trace::SetTraceFile(fileNameUTF8, addFileCounter);
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
+                 VoEId(gVoiceEngineInstanceCounter, -1),
+                 "SetTraceFile(fileNameUTF8=%s, addFileCounter=%d)",
+                 fileNameUTF8, addFileCounter);
+    return (ret);
+}
+
+int VoiceEngine::SetTraceCallback(TraceCallback* callback)
+{
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
+                 VoEId(gVoiceEngineInstanceCounter, -1),
+                 "SetTraceCallback(callback=0x%x)", callback);
+    return (Trace::SetTraceCallback(callback));
+}
+
+bool VoiceEngine::Delete(VoiceEngine*& voiceEngine, bool ignoreRefCounters)
+{
+    if (voiceEngine == NULL)
+    {
+        return false;
+    }
+
+    VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*> (voiceEngine);
+    VoEBaseImpl* base = s;
+
+    WEBRTC_TRACE(kTraceApiCall, kTraceVoice, -1,
+                 "VoiceEngine::Delete(voiceEngine=0x%p, ignoreRefCounters=%d)",
+                 voiceEngine, ignoreRefCounters);
+
+    if (!ignoreRefCounters)
+    {
+        if (base->GetCount() != 0)
+        {
+            WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
+                         "VoEBase reference counter is %d => memory will not "
+                             "be released properly!", base->GetCount());
+            return false;
+        }
+#ifdef WEBRTC_VOICE_ENGINE_CODEC_API
+        VoECodecImpl* codec = s;
+        if (codec->GetCount() != 0)
+        {
+            WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
+                         "VoECodec reference counter is %d => memory will not "
+                             "be released properly!", codec->GetCount());
+            return false;
+        }
+#endif
+
+#ifdef WEBRTC_VOICE_ENGINE_DTMF_API
+        VoEDtmfImpl* dtmf = s;
+        if (dtmf->GetCount() != 0)
+        {
+            WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
+                         "VoEDtmf reference counter is %d =>"
+                             "memory will not be released properly!",
+                         dtmf->GetCount());
+            return false;
+        }
+#endif
+
+#ifdef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
+        VoEEncryptionImpl* encrypt = s;
+        if (encrypt->GetCount() != 0)
+        {
+            WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
+                         "VoEEncryption reference counter is %d => "
+                             "memory will not be released properly!",
+                         encrypt->GetCount());
+            return false;
+        }
+#endif
+
+#ifdef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
+        VoEExternalMediaImpl* extmedia = s;
+        if (extmedia->GetCount() != 0)
+        {
+            WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
+                         "VoEExternalMedia reference counter is %d => "
+                             "memory will not be released properly!",
+                         extmedia->GetCount());
+            return false;
+        }
+#endif
+
+#ifdef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
+        VoECallReportImpl* report = s;
+        if (report->GetCount() != 0)
+        {
+            WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
+                         "VoECallReport reference counter is %d => memory "
+                             "will not be released properly!",
+                         report->GetCount());
+            return false;
+        }
+#endif
+
+#ifdef WEBRTC_VOICE_ENGINE_FILE_API
+        VoEFileImpl* file = s;
+        if (file->GetCount() != 0)
+        {
+            WEBRTC_TRACE(
+                         kTraceCritical,
+                         kTraceVoice,
+                         -1,
+                         "VoEFile reference counter is %d => memory will not "
+                         "be released properly!",
+                         file->GetCount());
+            return false;
+        }
+#endif
+
+#ifdef WEBRTC_VOICE_ENGINE_HARDWARE_API
+        VoEHardwareImpl* hware = s;
+        if (hware->GetCount() != 0)
+        {
+            WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
+                         "VoEHardware reference counter is %d => memory will "
+                         "not be released properly!", hware->GetCount());
+            return false;
+        }
+#endif
+
+#ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
+        VoENetEqStatsImpl* neteqst = s;
+        if (neteqst->GetCount() != 0)
+        {
+            WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
+                         "VoENetEqStats reference counter is %d => "
+                             "memory will not be released properly!",
+                         neteqst->GetCount());
+            return false;
+        }
+#endif
+
+#ifdef WEBRTC_VOICE_ENGINE_NETWORK_API
+        VoENetworkImpl* netw = s;
+        if (netw->GetCount() != 0)
+        {
+            WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
+                         "VoENetworkImpl reference counter is %d => memory "
+                         "will not be released properly!", netw->GetCount());
+            return false;
+        }
+#endif
+
+#ifdef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
+        VoERTP_RTCPImpl* rtcp = s;
+        if (rtcp->GetCount() != 0)
+        {
+            WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
+                         "VoERTP_RTCP reference counter is %d =>"
+                             "memory will not be released properly!",
+                         rtcp->GetCount());
+            return false;
+        }
+#endif
+
+#ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
+        VoEVideoSyncImpl* vsync = s;
+        if (vsync->GetCount() != 0)
+        {
+            WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
+                         "VoEVideoSync reference counter is %d => "
+                             "memory will not be released properly!",
+                         vsync->GetCount());
+            return false;
+        }
+#endif
+
+#ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
+        VoEVolumeControlImpl* volume = s;
+        if (volume->GetCount() != 0)
+        {
+            WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
+                         "VoEVolumeControl reference counter is %d =>"
+                             "memory will not be released properly!",
+                         volume->GetCount());
+            return false;
+        }
+#endif
+
+#ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
+        VoEAudioProcessingImpl* apm = s;
+        if (apm->GetCount() != 0)
+        {
+            WEBRTC_TRACE(kTraceCritical, kTraceVoice, -1,
+                         "VoEAudioProcessing reference counter is %d => "
+                             "memory will not be released properly!",
+                         apm->GetCount());
+            return false;
+        }
+#endif
+        WEBRTC_TRACE(kTraceInfo, kTraceVoice, -1,
+                     "all reference counters are zero => deleting the "
+                     "VoiceEngine instance...");
+
+    } // if (!ignoreRefCounters)
+    else
+    {
+        WEBRTC_TRACE(kTraceInfo, kTraceVoice, -1,
+                     "reference counters are ignored => deleting the "
+                     "VoiceEngine instance...");
+    }
+
+    delete s;
+    voiceEngine = NULL;
+
+    return true;
+}
+
+int VoiceEngine::SetAndroidObjects(void* javaVM, void* env, void* context)
+{
+#ifdef ANDROID
+    return AudioDeviceModule::SetAndroidObjects(javaVM, env, context);
+#else
+    return -1;
+#endif
+}
+
+} //namespace webrtc
diff --git a/voice_engine/main/source/voice_engine_impl.h b/voice_engine/main/source/voice_engine_impl.h
new file mode 100644
index 0000000..99564dc
--- /dev/null
+++ b/voice_engine/main/source/voice_engine_impl.h
@@ -0,0 +1,113 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOICE_ENGINE_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOICE_ENGINE_IMPL_H
+
+#include "engine_configurations.h"
+#include "voe_base_impl.h"
+
+#ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
+#include "voe_audio_processing_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
+#include "voe_call_report_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_CODEC_API
+#include "voe_codec_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_DTMF_API
+#include "voe_dtmf_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
+#include "voe_encryption_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
+#include "voe_external_media_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_FILE_API
+#include "voe_file_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_HARDWARE_API
+#include "voe_hardware_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
+#include "voe_neteq_stats_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_NETWORK_API
+#include "voe_network_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
+#include "voe_rtp_rtcp_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
+#include "voe_video_sync_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
+#include "voe_volume_control_impl.h"
+#endif
+
+namespace webrtc
+{
+
+class VoiceEngineImpl :
+#ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
+                            public VoEAudioProcessingImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
+                            public VoECallReportImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_CODEC_API
+                            public VoECodecImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_DTMF_API
+                            public VoEDtmfImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
+                            public VoEEncryptionImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
+                            public VoEExternalMediaImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_FILE_API
+                            public VoEFileImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_HARDWARE_API
+                            public VoEHardwareImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
+                            public VoENetEqStatsImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_NETWORK_API
+                            public VoENetworkImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
+                            public VoERTP_RTCPImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
+                            public VoEVideoSyncImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
+                            public VoEVolumeControlImpl,
+#endif
+                            public VoEBaseImpl
+{
+public:
+    VoiceEngineImpl()
+    {
+    };
+    virtual ~VoiceEngineImpl()
+    {
+    };
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOICE_ENGINE_IMPL_H
diff --git a/voice_engine/main/test/Android/android_test/.classpath b/voice_engine/main/test/Android/android_test/.classpath
new file mode 100644
index 0000000..86a15c9
--- /dev/null
+++ b/voice_engine/main/test/Android/android_test/.classpath
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>

+<classpath>

+	<classpathentry kind="con" path="com.android.ide.eclipse.adt.ANDROID_FRAMEWORK"/>

+	<classpathentry kind="src" path="src"/>

+	<classpathentry kind="src" path="gen"/>

+	<classpathentry kind="output" path="bin"/>

+</classpath>

diff --git a/voice_engine/main/test/Android/android_test/.project b/voice_engine/main/test/Android/android_test/.project
new file mode 100644
index 0000000..990e2f5
--- /dev/null
+++ b/voice_engine/main/test/Android/android_test/.project
@@ -0,0 +1,33 @@
+<?xml version="1.0" encoding="UTF-8"?>

+<projectDescription>

+	<name>AndroidTest</name>

+	<comment></comment>

+	<projects>

+	</projects>

+	<buildSpec>

+		<buildCommand>

+			<name>com.android.ide.eclipse.adt.ResourceManagerBuilder</name>

+			<arguments>

+			</arguments>

+		</buildCommand>

+		<buildCommand>

+			<name>com.android.ide.eclipse.adt.PreCompilerBuilder</name>

+			<arguments>

+			</arguments>

+		</buildCommand>

+		<buildCommand>

+			<name>org.eclipse.jdt.core.javabuilder</name>

+			<arguments>

+			</arguments>

+		</buildCommand>

+		<buildCommand>

+			<name>com.android.ide.eclipse.adt.ApkBuilder</name>

+			<arguments>

+			</arguments>

+		</buildCommand>

+	</buildSpec>

+	<natures>

+		<nature>com.android.ide.eclipse.adt.AndroidNature</nature>

+		<nature>org.eclipse.jdt.core.javanature</nature>

+	</natures>

+</projectDescription>

diff --git a/voice_engine/main/test/Android/android_test/AndroidManifest.xml b/voice_engine/main/test/Android/android_test/AndroidManifest.xml
new file mode 100644
index 0000000..76e2e34
--- /dev/null
+++ b/voice_engine/main/test/Android/android_test/AndroidManifest.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. -->
+<!--                                                                     -->
+<!-- Use of this source code is governed by a BSD-style license          -->
+<!-- that can be found in the LICENSE file in the root of the source     -->
+<!-- tree. An additional intellectual property rights grant can be found -->
+<!-- in the file PATENTS.  All contributing project authors may          -->
+<!-- be found in the AUTHORS file in the root of the source tree.        -->
+
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+      android:versionCode="1"
+      android:versionName="1.0" package="org.webrtc.voiceengine.test">
+    <application android:icon="@drawable/icon" android:label="@string/app_name" android:debuggable="true">
+        <activity android:name=".AndroidTest"
+                  android:label="@string/app_name"
+                  android:screenOrientation="portrait">
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+                <category android:name="android.intent.category.LAUNCHER" />
+            </intent-filter>
+        </activity>
+
+    </application>
+    <uses-sdk android:minSdkVersion="3" />
+
+	<uses-permission android:name="android.permission.MODIFY_AUDIO_SETTINGS" />
+	<uses-permission android:name="android.permission.RECORD_AUDIO" />
+	<uses-permission android:name="android.permission.INTERNET" />
+
+</manifest> 
diff --git a/voice_engine/main/test/Android/android_test/default.properties b/voice_engine/main/test/Android/android_test/default.properties
new file mode 100644
index 0000000..19ddebd
--- /dev/null
+++ b/voice_engine/main/test/Android/android_test/default.properties
@@ -0,0 +1,11 @@
+# This file is automatically generated by Android Tools.

+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!

+# 

+# This file must be checked in Version Control Systems.

+# 

+# To customize properties used by the Ant build system use,

+# "build.properties", and override values to adapt the script to your

+# project structure.

+

+# Project target.

+target=android-3

diff --git a/voice_engine/main/test/Android/android_test/gen/org/webrtc/voiceengine/test/R.java b/voice_engine/main/test/Android/android_test/gen/org/webrtc/voiceengine/test/R.java
new file mode 100644
index 0000000..ec8f5b4
--- /dev/null
+++ b/voice_engine/main/test/Android/android_test/gen/org/webrtc/voiceengine/test/R.java
@@ -0,0 +1,30 @@
+/* AUTO-GENERATED FILE.  DO NOT MODIFY.

+ *

+ * This class was automatically generated by the

+ * aapt tool from the resource data it found.  It

+ * should not be modified by hand.

+ */

+

+package org.webrtc.voiceengine.test;

+

+public final class R {

+    public static final class attr {

+    }

+    public static final class drawable {

+        public static final int icon=0x7f020000;

+    }

+    public static final class id {

+        public static final int Button01=0x7f050002;

+        public static final int Button02=0x7f050005;

+        public static final int EditText01=0x7f050001;

+        public static final int Spinner01=0x7f050003;

+        public static final int Spinner02=0x7f050004;

+        public static final int TextView01=0x7f050000;

+    }

+    public static final class layout {

+        public static final int main=0x7f030000;

+    }

+    public static final class string {

+        public static final int app_name=0x7f040000;

+    }

+}

diff --git a/voice_engine/main/test/Android/android_test/jni/android_test.cc b/voice_engine/main/test/Android/android_test/jni/android_test.cc
new file mode 100644
index 0000000..04139ba
--- /dev/null
+++ b/voice_engine/main/test/Android/android_test/jni/android_test.cc
@@ -0,0 +1,1525 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <android/log.h>
+
+#include "org_webrtc_voiceengine_test_AndroidTest.h"
+
+#include "thread_wrapper.h"
+
+#include "voe_base.h"
+#include "voe_codec.h"
+#include "voe_file.h"
+#include "voe_network.h"
+#include "voe_audio_processing.h"
+#include "voe_volume_control.h"
+#include "voe_hardware.h"
+#include "voe_rtp_rtcp.h"
+#include "voe_encryption.h"
+
+#include "voe_test_interface.h"
+
+//#define USE_SRTP
+//#define INIT_FROM_THREAD
+//#define START_CALL_FROM_THREAD
+
+#define WEBRTC_LOG_TAG "*WEBRTCN*" // As in WEBRTC Native...
+#define VALIDATE_BASE_POINTER \
+    if (!veData1.base) \
+    { \
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG, \
+                            "Base pointer doesn't exist"); \
+        return -1; \
+    }
+#define VALIDATE_CODEC_POINTER \
+    if (!veData1.codec) \
+    { \
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG, \
+                            "Codec pointer doesn't exist"); \
+        return -1; \
+    }
+#define VALIDATE_FILE_POINTER \
+    if (!veData1.file) \
+    { \
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG, \
+                            "File pointer doesn't exist"); \
+        return -1; \
+    }
+#define VALIDATE_NETWORK_POINTER \
+    if (!veData1.netw) \
+    { \
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG, \
+                            "Network pointer doesn't exist"); \
+        return -1; \
+    }
+#define VALIDATE_APM_POINTER \
+    if (!veData1.codec) \
+    { \
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG, \
+                            "Apm pointer doesn't exist"); \
+        return -1; \
+    }
+#define VALIDATE_VOLUME_POINTER \
+    if (!veData1.volume) \
+    { \
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG, \
+                            "Volume pointer doesn't exist"); \
+        return -1; \
+    }
+#define VALIDATE_HARDWARE_POINTER \
+    if (!veData1.hardware) \
+    { \
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG, \
+                            "Hardware pointer doesn't exist"); \
+        return -1; \
+    }
+#define VALIDATE_RTP_RTCP_POINTER \
+    if (!veData1.rtp_rtcp) \
+    { \
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG, \
+                            "RTP / RTCP pointer doesn't exist"); \
+        return -1; \
+    }
+#define VALIDATE_ENCRYPT_POINTER \
+    if (!veData1.encrypt) \
+    { \
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG, \
+                            "Encrypt pointer doesn't exist"); \
+        return -1; \
+    }
+
+// Register functions in JNI_OnLoad()
+// How do we ensure that VoE is deleted? JNI_OnUnload?
+// What happens if class is unloaded? When loaded again, NativeInit will be
+// called again. Keep what we have?
+// Should we do something in JNI_OnUnload?
+// General design: create a class or keep global struct with "C" functions?
+// Otherwise make sure symbols are as unique as possible.
+
+using namespace webrtc;
+
+// VoiceEngine data struct
+typedef struct
+{
+    // VoiceEngine
+    VoiceEngine* ve;
+    // Sub-APIs
+    VoEBase* base;
+    VoECodec* codec;
+    VoEFile* file;
+    VoENetwork* netw;
+    VoEAudioProcessing* apm;
+    VoEVolumeControl* volume;
+    VoEHardware* hardware;
+    VoERTP_RTCP* rtp_rtcp;
+    VoEEncryption* encrypt;
+    // Other
+    my_transportation* extTrans;
+    JavaVM* jvm;
+} VoiceEngineData;
+
+// Global variables visible in this file
+static VoiceEngineData veData1;
+static VoiceEngineData veData2;
+
+// "Local" functions (i.e. not Java accessible)
+static bool GetSubApis(VoiceEngineData &veData);
+static bool ReleaseSubApis(VoiceEngineData &veData);
+
+char nikkey[64] =
+        "123456781234567812345678123456781234567812345678123456781234567";
+
+class ThreadTest
+{
+public:
+    ThreadTest();
+    ~ThreadTest();
+    int RunTest();
+    int CloseTest();
+private:
+    static bool Run(void* ptr);
+    bool Process();
+private:
+    ThreadWrapper* _thread;
+};
+
+ThreadTest::~ThreadTest()
+{
+    if (_thread)
+    {
+        _thread->SetNotAlive();
+        if (_thread->Stop())
+        {
+            delete _thread;
+            _thread = NULL;
+        }
+    }
+}
+
+ThreadTest::ThreadTest() :
+    _thread(NULL)
+{
+    _thread = ThreadWrapper::CreateThread(Run, this, kNormalPriority,
+                                          "ThreadTest thread");
+}
+
+bool ThreadTest::Run(void* ptr)
+{
+    return static_cast<ThreadTest*> (ptr)->Process();
+}
+
+bool ThreadTest::Process()
+{
+    // Attach this thread to JVM
+    /*JNIEnv* env = NULL;
+     jint res = veData1.jvm->AttachCurrentThread(&env, NULL);
+     char msg[32];
+     sprintf(msg, "res=%d, env=%d", res, env);
+     __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG, msg);*/
+
+#ifdef INIT_FROM_THREAD
+    VALIDATE_BASE_POINTER;
+    veData1.base->Init(0, 0, 0);
+#endif
+
+#ifdef START_CALL_FROM_THREAD
+    // receiving instance
+    veData2.ve = VoiceEngine::Create();
+    GetSubApis(veData2);
+    veData2.base->Init(0, 0, 0);
+    veData2.base->CreateChannel();
+    if(veData2.base->SetLocalReceiver(0, 1234) < 0)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                "set local receiver 2 failed");
+    }
+    veData2.hardware->SetLoudspeakerStatus(false);
+    veData2.volume->SetSpeakerVolume(204);
+    veData2.base->StartReceive(0);
+    if(veData2.base->StartPlayout(0) < 0)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                "start playout failed");
+    }
+
+    __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+            "receiving instance started from thread");
+
+    // sending instance
+    veData1.ve = VoiceEngine::Create();
+    GetSubApis(veData1);
+    veData1.base->Init(0, 0, 0);
+    if(veData1.base->CreateChannel() < 0)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                "create channel failed");
+    }
+    if(veData1.base->SetLocalReceiver(0, 1256) < 0)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                "set local receiver failed");
+    }
+    if(veData1.base->SetSendDestination(0, 1234, "127.0.0.1") < 0)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                "set send destination failed");
+    }
+    if(veData1.base->StartSend(0) < 0)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                "start send failed");
+    }
+
+    __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+            "sending instance started from thread");
+#endif
+
+    _thread->SetNotAlive();
+    _thread->Stop();
+
+    //res = veData1.jvm->DetachCurrentThread();
+
+    return true;
+}
+
+int ThreadTest::RunTest()
+{
+    if (_thread)
+    {
+        unsigned int id;
+        _thread->Start(id);
+    }
+    return 0;
+}
+
+int ThreadTest::CloseTest()
+{
+    VALIDATE_BASE_POINTER
+
+    veData1.base->DeleteChannel(0);
+    veData2.base->DeleteChannel(0);
+    veData1.base->Terminate();
+    veData2.base->Terminate();
+
+    // Release sub-APIs
+    ReleaseSubApis(veData1);
+    ReleaseSubApis(veData2);
+
+    // Delete
+    VoiceEngine::Delete(veData1.ve);
+    VoiceEngine::Delete(veData2.ve);
+    veData2.ve = NULL;
+    veData2.ve = NULL;
+
+    return 0;
+}
+
+ThreadTest threadTest;
+
+//////////////////////////////////////////////////////////////////
+// General functions
+//////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////
+// JNI_OnLoad
+//
+jint JNI_OnLoad(JavaVM* vm, void* /*reserved*/)
+{
+    if (!vm)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                            "JNI_OnLoad did not receive a valid VM pointer");
+        return -1;
+    }
+
+    // Get JNI
+    JNIEnv* env;
+    if (JNI_OK != vm->GetEnv(reinterpret_cast<void**> (&env),
+                             JNI_VERSION_1_4))
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                            "JNI_OnLoad could not get JNI env");
+        return -1;
+    }
+
+    // Get class to register the native functions with
+    // jclass regClass = env->FindClass("webrtc/android/AndroidTest");
+    // if (!regClass) {
+    // return -1; // Exception thrown
+    // }
+
+    // Register native functions
+    // JNINativeMethod methods[1];
+    // methods[0].name = NULL;
+    // methods[0].signature = NULL;
+    // methods[0].fnPtr = NULL;
+    // if (JNI_OK != env->RegisterNatives(regClass, methods, 1))
+    // {
+    // return -1;
+    // }
+
+    // Init VoiceEngine data
+    memset(&veData1, 0, sizeof(veData1));
+    memset(&veData2, 0, sizeof(veData2));
+
+    // Store the JVM
+    veData1.jvm = vm;
+    veData2.jvm = vm;
+
+    return JNI_VERSION_1_4;
+}
+
+/////////////////////////////////////////////
+// Native initialization
+//
+JNIEXPORT jboolean JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_NativeInit(
+        JNIEnv * env,
+        jclass)
+{
+    // Look up and cache any interesting class, field and method IDs for
+    // any used java class here
+
+    return true;
+}
+
+/////////////////////////////////////////////
+// Run auto standard test
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_RunAutoTest(
+        JNIEnv *env,
+        jobject context,
+        jint testType,
+        jint extendedSel)
+{
+    TestType tType(Invalid);
+
+    switch (testType)
+    {
+        case 0:
+            return 0;
+        case 1:
+            tType = Standard;
+            break;
+        case 2:
+            tType = Extended;
+            break;
+        case 3:
+            tType = Stress;
+            break;
+        case 4:
+            tType = Unit;
+            break;
+        default:
+            __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                                "RunAutoTest - Invalid TestType");
+            return -1;
+    }
+
+    ExtendedSelection xsel(XSEL_Invalid);
+
+    switch (extendedSel)
+    {
+        case 0:
+            xsel = XSEL_None;
+            break;
+        case 1:
+            xsel = XSEL_All;
+            break;
+        case 2:
+            xsel = XSEL_Base;
+            break;
+        case 3:
+            xsel = XSEL_CallReport;
+            break;
+        case 4:
+            xsel = XSEL_Codec;
+            break;
+        case 5:
+            xsel = XSEL_DTMF;
+            break;
+        case 6:
+            xsel = XSEL_Encryption;
+            break;
+        case 7:
+            xsel = XSEL_ExternalMedia;
+            break;
+        case 8:
+            xsel = XSEL_File;
+            break;
+        case 9:
+            xsel = XSEL_Hardware;
+            break;
+        case 10:
+            xsel = XSEL_NetEqStats;
+            break;
+        case 11:
+            xsel = XSEL_Network;
+            break;
+        case 12:
+            xsel = XSEL_PTT;
+            break;
+        case 13:
+            xsel = XSEL_RTP_RTCP;
+            break;
+        case 14:
+            xsel = XSEL_VideoSync;
+            break;
+        case 15:
+            xsel = XSEL_VideoSyncExtended;
+            break;
+        case 16:
+            xsel = XSEL_VolumeControl;
+            break;
+        case 17:
+            xsel = XSEL_APM;
+            break;
+        case 18:
+            xsel = XSEL_VQMon;
+            break;
+        default:
+            xsel = XSEL_Invalid;
+            __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                                "RunAutoTest - Invalid extendedType");
+            return -1;
+    }
+
+    // Set instance independent Java objects
+    VoiceEngine::SetAndroidObjects(veData1.jvm, env, context);
+
+    // Call voe test interface function
+    //setAndroidObjects(veData1.jvm, context);
+    jint retVal = runAutoTest(tType, xsel);
+
+    // Clear instance independent Java objects
+    VoiceEngine::SetAndroidObjects(NULL, NULL, NULL);
+
+    return retVal;
+}
+
+//////////////////////////////////////////////////////////////////
+// VoiceEngine API wrapper functions
+//////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////
+// Create VoiceEngine instance
+//
+JNIEXPORT jboolean JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Create(
+        JNIEnv *env,
+        jobject context)
+{
+    // Check if already created
+    if (veData1.ve)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                            "VoE already created");
+        return false;
+    }
+
+    // Set instance independent Java objects
+    VoiceEngine::SetAndroidObjects(veData1.jvm, env, context);
+
+#ifdef START_CALL_FROM_THREAD
+    threadTest.RunTest();
+#else
+    // Create
+    veData1.ve = VoiceEngine::Create();
+    if (!veData1.ve)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                            "Create VoE failed");
+        return false;
+    }
+
+    // Get sub-APIs
+    if (!GetSubApis(veData1))
+    {
+        // If not OK, release all sub-APIs and delete VoE
+        ReleaseSubApis(veData1);
+        if (!VoiceEngine::Delete(veData1.ve))
+        {
+            __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                                "Delete VoE failed");
+        }
+        return false;
+    }
+#endif
+
+    return true;
+}
+
+/////////////////////////////////////////////
+// Delete VoiceEngine instance
+//
+JNIEXPORT jboolean JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Delete(
+        JNIEnv *,
+        jobject)
+{
+#ifdef START_CALL_FROM_THREAD
+    threadTest.CloseTest();
+#else
+    // Check if exists
+    if (!veData1.ve)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                            "VoE does not exist");
+        return false;
+    }
+
+    // Release sub-APIs
+    ReleaseSubApis(veData1);
+
+    // Delete
+    if (!VoiceEngine::Delete(veData1.ve))
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                            "Delete VoE failed");
+        return false;
+    }
+
+    veData1.ve = NULL;
+#endif
+
+    // Clear instance independent Java objects
+    VoiceEngine::SetAndroidObjects(NULL, NULL, NULL);
+
+    return true;
+}
+
+/////////////////////////////////////////////
+// [Base] Authenticate
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Authenticate(
+        JNIEnv *env,
+        jobject,
+        jstring key)
+{
+    const char* keyNative = env->GetStringUTFChars(key, NULL);
+    if (!keyNative)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                            "Could not get UTF string");
+        return -1;
+    }
+
+    jint retVal = veData1.base->Authenticate(keyNative, strlen(keyNative));
+
+    env->ReleaseStringUTFChars(key, keyNative);
+
+    return retVal;
+}
+
+/////////////////////////////////////////////
+// [Base] Initialize VoiceEngine
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Init(
+        JNIEnv *,
+        jobject,
+        jint month,
+        jint day,
+        jint year,
+        jboolean enableTrace,
+        jboolean useExtTrans)
+{
+    VALIDATE_BASE_POINTER;
+
+    if (enableTrace)
+    {
+        if (0 != VoiceEngine::SetTraceFile("/sdcard/trace.txt"))
+        {
+            __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                                "Could not enable trace");
+        }
+        if (0
+                != VoiceEngine::SetEncryptedTraceFile(
+                        "/sdcard/trace_debug.txt"))
+        {
+            __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                                "Could not enable debug trace");
+        }
+        if (0 != VoiceEngine::SetTraceFilter(kTraceAll))
+        {
+            __android_log_write(ANDROID_LOG_WARN, WEBRTC_LOG_TAG,
+                                "Could not set trace filter");
+        }
+    }
+
+    if (useExtTrans)
+    {
+        VALIDATE_NETWORK_POINTER;
+        veData1.extTrans = new my_transportation(veData1.netw);
+    }
+
+    int retVal = 0;
+#ifdef INIT_FROM_THREAD
+    threadTest.RunTest();
+    usleep(200000);
+#else
+    retVal = veData1.base->Init(month, day, year);
+#endif
+    return retVal;
+}
+
+/////////////////////////////////////////////
+// [Base] Terminate VoiceEngine
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Terminate(
+        JNIEnv *,
+        jobj  ect)
+{
+    VALIDATE_BASE_POINTER;
+
+    jint retVal = veData1.base->Terminate();
+
+    delete veData1.extTrans;
+    veData1.extTrans = NULL;
+
+    return retVal;
+}
+
+/////////////////////////////////////////////
+// [Base] Create channel
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_CreateChannel(
+        JNIEnv *,
+        jobject)
+{
+    VALIDATE_BASE_POINTER;
+    jint channel = veData1.base->CreateChannel();
+
+    if (veData1.extTrans)
+    {
+        VALIDATE_NETWORK_POINTER;
+        __android_log_print(ANDROID_LOG_DEBUG, WEBRTC_LOG_TAG,
+                            "Enabling external transport on channel %d",
+                            channel);
+        if (veData1.netw->RegisterExternalTransport(channel, *veData1.extTrans)
+                < 0)
+        {
+            __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                                "Could not set external transport");
+            return -1;
+        }
+    }
+
+    return channel;
+}
+
+/////////////////////////////////////////////
+// [Base] Delete channel
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_DeleteChannel(
+        JNIEnv *,
+        jobject,
+        jint channel)
+{
+    VALIDATE_BASE_POINTER;
+    return veData1.base->DeleteChannel(channel);
+}
+
+/////////////////////////////////////////////
+// [Base] SetLocalReceiver
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_SetLocalReceiver(
+        JNIEnv *,
+        jobject,
+        jint channel,
+        jint port)
+{
+    VALIDATE_BASE_POINTER;
+    return veData1.base->SetLocalReceiver(channel, port);
+}
+
+/////////////////////////////////////////////
+// [Base] SetSendDestination
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_SetSendDestination(
+        JNIEnv *env,
+        jobject,
+        jint channel,
+        jint port,
+        jstring ipaddr)
+{
+    VALIDATE_BASE_POINTER;
+
+    const char* ipaddrNative = env->GetStringUTFChars(ipaddr, NULL);
+    if (!ipaddrNative)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                            "Could not get UTF string");
+        return -1;
+    }
+
+    jint retVal = veData1.base->SetSendDestination(channel, port, ipaddrNative);
+
+    env->ReleaseStringUTFChars(ipaddr, ipaddrNative);
+
+    return retVal;
+}
+
+/////////////////////////////////////////////
+// [Base] StartListen
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StartListen(
+        JNIEnv *,
+        jobject,
+        jint channel)
+{
+#ifdef USE_SRTP
+    VALIDATE_ENCRYPT_POINTER;
+    bool useForRTCP = false;
+    if (veData1.encrypt->EnableSRTPReceive(
+                    channel,CIPHER_AES_128_COUNTER_MODE,30,AUTH_HMAC_SHA1,
+                    16,4, ENCRYPTION_AND_AUTHENTICATION,
+                    (unsigned char*)nikkey, useForRTCP) != 0)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                "Failed to enable SRTP receive");
+        return -1;
+    }
+#endif
+
+    VALIDATE_BASE_POINTER;
+    int retVal = veData1.base->StartReceive(channel);
+
+    return retVal;
+}
+
+/////////////////////////////////////////////
+// [Base] Start playout
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_StartPlayout(
+        JNIEnv *,
+        jobject,
+        jint channel)
+{
+    VALIDATE_BASE_POINTER;
+    int retVal = veData1.base->StartPlayout(channel);
+
+    return retVal;
+}
+
+/////////////////////////////////////////////
+// [Base] Start send
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StartSend(
+        JNIEnv *,
+        jobject,
+        jint channel)
+{
+    /*    int dscp(0), serviceType(-1), overrideDscp(0), res(0);
+     bool gqosEnabled(false), useSetSockOpt(false);
+
+     if (veData1.netw->SetSendTOS(channel, 13, useSetSockOpt) != 0)
+     {
+     __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+         "Failed to set TOS");
+     return -1;
+     }
+
+     res = veData1.netw->GetSendTOS(channel, dscp, useSetSockOpt);
+     if (res != 0 || dscp != 13 || useSetSockOpt != true)
+     {
+     __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+         "Failed to get TOS");
+     return -1;
+     } */
+
+    /*	if (veData1.rtp_rtcp->SetFECStatus(channel, 1) != 0)
+     {
+     __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+         "Failed to enable FEC");
+     return -1;
+     } */
+#ifdef USE_SRTP
+    VALIDATE_ENCRYPT_POINTER;
+    bool useForRTCP = false;
+    if (veData1.encrypt->EnableSRTPSend(
+                    channel,CIPHER_AES_128_COUNTER_MODE,30,AUTH_HMAC_SHA1,
+                    16,4, ENCRYPTION_AND_AUTHENTICATION,
+                    (unsigned char*)nikkey, useForRTCP) != 0)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                "Failed to enable SRTP send");
+        return -1;
+    }
+#endif
+
+    VALIDATE_BASE_POINTER;
+    int retVal = veData1.base->StartSend(channel);
+
+    return retVal;
+}
+
+/////////////////////////////////////////////
+// [Base] Stop listen
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopListen(
+        JNIEnv *,
+        jobject,
+        jint channel)
+{
+#ifdef USE_SRTP
+    VALIDATE_ENCRYPT_POINTER;
+    if (veData1.encrypt->DisableSRTPReceive(channel) != 0)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                "Failed to disable SRTP receive");
+        return -1;
+    }
+#endif
+
+    VALIDATE_BASE_POINTER;
+    return veData1.base->StopReceive(channel);
+}
+
+/////////////////////////////////////////////
+// [Base] Stop playout
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopPlayout(
+        JNIEnv *,
+        jobject,
+        jint channel)
+{
+    VALIDATE_BASE_POINTER;
+    return veData1.base->StopPlayout(channel);
+}
+
+/////////////////////////////////////////////
+// [Base] Stop send
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopSend(
+        JNIEnv *,
+        jobject,
+        jint channel)
+{
+    /*	if (veData1.rtp_rtcp->SetFECStatus(channel, 0) != 0)
+     {
+     __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+         "Failed to disable FEC");
+     return -1;
+     } */
+
+#ifdef USE_SRTP
+    VALIDATE_ENCRYPT_POINTER;
+    if (veData1.encrypt->DisableSRTPSend(channel) != 0)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                "Failed to disable SRTP send");
+        return -1;
+    }
+#endif
+
+    VALIDATE_BASE_POINTER;
+    return veData1.base->StopSend(channel);
+}
+
+/////////////////////////////////////////////
+// [codec] Number of codecs
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_NumOfCodecs(
+        JNIEnv *,
+        jobject)
+{
+    VALIDATE_CODEC_POINTER;
+    return veData1.codec->NumOfCodecs();
+}
+
+/////////////////////////////////////////////
+// [codec] Set send codec
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_SetSendCodec(
+        JNIEnv *,
+        jobject,
+        jint channel,
+        jint index)
+{
+    VALIDATE_CODEC_POINTER;
+
+    CodecInst codec;
+
+    if (veData1.codec->GetCodec(index, codec) != 0)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                            "Failed to get codec");
+        return -1;
+    }
+
+    return veData1.codec->SetSendCodec(channel, codec);
+}
+
+/////////////////////////////////////////////
+// [codec] Set VAD status
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_SetVADStatus(
+        JNIEnv *,
+        jobject,
+        jint channel,
+        jboolean enable,
+        jint mode)
+{
+    VALIDATE_CODEC_POINTER;
+
+    VadModes VADmode = kVadConventional;
+
+    switch (mode)
+    {
+        case 0:
+            break; // already set
+        case 1:
+            VADmode = kVadAggressiveLow;
+            break;
+        case 2:
+            VADmode = kVadAggressiveMid;
+            break;
+        case 3:
+            VADmode = kVadAggressiveHigh;
+            break;
+        default:
+            VADmode = (VadModes) 17; // force error
+            break;
+    }
+
+    return veData1.codec->SetVADStatus(channel, enable, VADmode);
+}
+
+/////////////////////////////////////////////
+// [apm] SetNSStatus
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetNSStatus(
+        JNIEnv *,
+        jobject,
+        jboolean enable,
+        jint mode)
+{
+    VALIDATE_APM_POINTER;
+
+    NsModes NSmode = kNsDefault;
+
+    switch (mode)
+    {
+        case 0:
+            NSmode = kNsUnchanged;
+            break;
+        case 1:
+            break; // already set
+        case 2:
+            NSmode = kNsConference;
+            break;
+        case 3:
+            NSmode = kNsLowSuppression;
+            break;
+        case 4:
+            NSmode = kNsModerateSuppression;
+            break;
+        case 5:
+            NSmode = kNsHighSuppression;
+            break;
+        case 6:
+            NSmode = kNsVeryHighSuppression;
+            break;
+        default:
+            NSmode = (NsModes) 17; // force error
+            break;
+    }
+
+    return veData1.apm->SetNsStatus(enable, NSmode);
+}
+
+/////////////////////////////////////////////
+// [apm] SetAGCStatus
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_SetAGCStatus(
+        JNIEnv *,
+        jobject,
+        jboolean enable,
+        jint mode)
+{
+    VALIDATE_APM_POINTER;
+
+    AgcModes AGCmode = kAgcDefault;
+
+    switch (mode)
+    {
+        case 0:
+            AGCmode = kAgcUnchanged;
+            break;
+        case 1:
+            break; // already set
+        case 2:
+            AGCmode = kAgcAdaptiveAnalog;
+            break;
+        case 3:
+            AGCmode = kAgcAdaptiveDigital;
+            break;
+        case 4:
+            AGCmode = kAgcFixedDigital;
+            break;
+        default:
+            AGCmode = (AgcModes) 17; // force error
+            break;
+    }
+
+    /*	AgcConfig agcConfig;
+     agcConfig.targetLeveldBOv = 3;
+     agcConfig.digitalCompressionGaindB = 50;
+     agcConfig.limiterEnable = 0;
+
+     if (veData1.apm->SetAGCConfig(agcConfig) != 0)
+     {
+     __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+         "Failed to set AGC config");
+     return -1;
+     } */
+
+    return veData1.apm->SetAgcStatus(enable, AGCmode);
+}
+
+/////////////////////////////////////////////
+// [apm] SetECStatus
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetECStatus(
+        JNIEnv *,
+        jobject,
+        jboolean enable,
+        jint mode)
+{
+    VALIDATE_APM_POINTER;
+
+    EcModes ECmode = kEcDefault;
+
+    switch (mode)
+    {
+        case 0:
+            ECmode = kEcDefault;
+            break;
+        case 1:
+            break; // already set
+        case 2:
+            ECmode = kEcConference;
+            break;
+        case 3:
+            ECmode = kEcAec;
+            break;
+        case 4:
+            ECmode = kEcAecm;
+            break;
+        default:
+            ECmode = (EcModes) 17; // force error
+            break;
+    }
+
+    return veData1.apm->SetEcStatus(enable, ECmode);
+}
+
+/////////////////////////////////////////////
+// [File] Start play file locally
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_StartPlayingFileLocally(
+        JNIEnv * env,
+        jobject,
+        jint channel,
+        jstring fileName,
+        jboolean loop)
+{
+    VALIDATE_FILE_POINTER;
+
+    const char* fileNameNative = env->GetStringUTFChars(fileName, NULL);
+    if (!fileNameNative)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                            "Could not get UTF string");
+        return -1;
+    }
+
+    jint retVal = veData1.file->StartPlayingFileLocally(channel,
+                                                        fileNameNative, loop);
+
+    env->ReleaseStringUTFChars(fileName, fileNameNative);
+
+    return retVal;
+}
+
+/////////////////////////////////////////////
+// [File] Stop play file locally
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_StopPlayingFileLocally(
+        JNIEnv *,
+        jobject,
+        jint channel)
+{
+    VALIDATE_FILE_POINTER;
+    return veData1.file->StopPlayingFileLocally(channel);
+}
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    StartRecordingPlayout
+ * Signature: (ILjava/lang/String;Z)I
+ */
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_StartRecordingPlayout(
+        JNIEnv * env,
+        jobject,
+        jint channel,
+        jstring fileName,
+        jboolean)
+{
+    VALIDATE_FILE_POINTER;
+
+    const char* fileNameNative = env->GetStringUTFChars(fileName, NULL);
+    if (!fileNameNative)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                            "Could not get UTF string");
+        return -1;
+    }
+
+    jint retVal = veData1.file->StartRecordingPlayout(channel, fileNameNative,
+                                                      0);
+
+    env->ReleaseStringUTFChars(fileName, fileNameNative);
+
+    return retVal;
+}
+
+/////////////////////////////////////////////
+// [File] Stop Recording Playout
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_StopRecordingPlayout(
+        JNIEnv *,
+        jobject,
+        jint channel)
+{
+    VALIDATE_FILE_POINTER;
+    return veData1.file->StopRecordingPlayout(channel);
+}
+
+/////////////////////////////////////////////
+// [File] Start playing file as microphone
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_StartPlayingFileAsMicrophone(
+        JNIEnv *env,
+        jobject,
+        jint channel,
+        jstring fileName,
+        jboolean loop)
+{
+    VALIDATE_FILE_POINTER;
+
+    const char* fileNameNative = env->GetStringUTFChars(fileName, NULL);
+    if (!fileNameNative)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                            "Could not get UTF string");
+        return -1;
+    }
+
+    jint retVal = veData1.file->StartPlayingFileAsMicrophone(channel,
+                                                             fileNameNative,
+                                                             loop);
+
+    env->ReleaseStringUTFChars(fileName, fileNameNative);
+
+    return retVal;
+}
+
+/////////////////////////////////////////////
+// [File] Stop playing file as microphone
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_StopPlayingFileAsMicrophone(
+        JNIEnv *,
+        jobject,
+        jint channel)
+{
+    VALIDATE_FILE_POINTER;
+    return veData1.file->StopPlayingFileAsMicrophone(channel);
+}
+
+/////////////////////////////////////////////
+// [Volume] Set speaker volume
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_SetSpeakerVolume(
+        JNIEnv *,
+        jobject,
+        jint level)
+{
+    VALIDATE_VOLUME_POINTER;
+    if (veData1.volume->SetSpeakerVolume(level) != 0)
+    {
+        return -1;
+    }
+
+    unsigned int storedVolume = 0;
+    if (veData1.volume->GetSpeakerVolume(storedVolume) != 0)
+    {
+        return -1;
+    }
+
+    if (storedVolume != level)
+    {
+        return -1;
+    }
+
+    return 0;
+}
+
+/////////////////////////////////////////////
+// [Hardware] Set loudspeaker status
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_SetLoudspeakerStatus(
+        JNIEnv *,
+        jobject,
+        jboolean enable)
+{
+    VALIDATE_HARDWARE_POINTER;
+    if (veData1.hardware->SetLoudspeakerStatus(enable) != 0)
+    {
+        return -1;
+    }
+
+    /*VALIDATE_RTP_RTCP_POINTER;
+
+     if (veData1.rtp_rtcp->SetFECStatus(0, enable, -1) != 0)
+     {
+     __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+         "Could not set FEC");
+     return -1;
+     }
+     else if(enable)
+     {
+     __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+         "Could enable FEC");
+     }
+     else
+     {
+     __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+         "Could disable FEC");
+     }*/
+
+    return 0;
+}
+
+//////////////////////////////////////////////////////////////////
+// "Local" functions (i.e. not Java accessible)
+//////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////
+// Get all sub-APIs
+//
+bool GetSubApis(VoiceEngineData &veData)
+{
+    bool getOK = true;
+
+    // Base
+    veData.base = VoEBase::GetInterface(veData.ve);
+    if (!veData.base)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                            "Get base sub-API failed");
+        getOK = false;
+    }
+
+    // Codec
+    veData.codec = VoECodec::GetInterface(veData.ve);
+    if (!veData.codec)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                            "Get codec sub-API failed");
+        getOK = false;
+    }
+
+    // File
+    veData.file = VoEFile::GetInterface(veData.ve);
+    if (!veData.file)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                            "Get file sub-API failed");
+        getOK = false;
+    }
+
+    // Network
+    veData.netw = VoENetwork::GetInterface(veData.ve);
+    if (!veData.netw)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                            "Get network sub-API failed");
+        getOK = false;
+    }
+
+    // AudioProcessing module
+    veData.apm = VoEAudioProcessing::GetInterface(veData.ve);
+    if (!veData.apm)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                            "Get apm sub-API failed");
+        getOK = false;
+    }
+
+    // Volume
+    veData.volume = VoEVolumeControl::GetInterface(veData.ve);
+    if (!veData.volume)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                            "Get volume sub-API failed");
+        getOK = false;
+    }
+
+    // Hardware
+    veData.hardware = VoEHardware::GetInterface(veData.ve);
+    if (!veData.hardware)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                            "Get hardware sub-API failed");
+        getOK = false;
+    }
+
+    // RTP / RTCP
+    veData.rtp_rtcp = VoERTP_RTCP::GetInterface(veData.ve);
+    if (!veData.rtp_rtcp)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                            "Get rtp_rtcp sub-API failed");
+        getOK = false;
+    }
+
+    // Encrypt
+    veData.encrypt = VoEEncryption::GetInterface(veData.ve);
+    if (!veData.encrypt)
+    {
+        __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                            "Get encrypt sub-API failed");
+        getOK = false;
+    }
+
+    return getOK;
+}
+
+/////////////////////////////////////////////
+// Release all sub-APIs
+//
+bool ReleaseSubApis(VoiceEngineData &veData)
+{
+    bool releaseOK = true;
+
+    // Base
+    if (veData.base)
+    {
+        if (0 != veData.base->Release())
+        {
+            __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                                "Release base sub-API failed");
+            releaseOK = false;
+        }
+        else
+        {
+            veData.base = NULL;
+        }
+    }
+
+    // Codec
+    if (veData.codec)
+    {
+        if (0 != veData.codec->Release())
+        {
+            __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                                "Release codec sub-API failed");
+            releaseOK = false;
+        }
+        else
+        {
+            veData.codec = NULL;
+        }
+    }
+
+    // File
+    if (veData.file)
+    {
+        if (0 != veData.file->Release())
+        {
+            __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                                "Release file sub-API failed");
+            releaseOK = false;
+        }
+        else
+        {
+            veData.file = NULL;
+        }
+    }
+
+    // Network
+    if (veData.netw)
+    {
+        if (0 != veData.netw->Release())
+        {
+            __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                                "Release network sub-API failed");
+            releaseOK = false;
+        }
+        else
+        {
+            veData.netw = NULL;
+        }
+    }
+
+    // apm
+    if (veData.apm)
+    {
+        if (0 != veData.apm->Release())
+        {
+            __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                                "Release apm sub-API failed");
+            releaseOK = false;
+        }
+        else
+        {
+            veData.apm = NULL;
+        }
+    }
+
+    // Volume
+    if (veData.volume)
+    {
+        if (0 != veData.volume->Release())
+        {
+            __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                                "Release volume sub-API failed");
+            releaseOK = false;
+        }
+        else
+        {
+            veData.volume = NULL;
+        }
+    }
+
+    // Hardware
+    if (veData.hardware)
+    {
+        if (0 != veData.hardware->Release())
+        {
+            __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                                "Release hardware sub-API failed");
+            releaseOK = false;
+        }
+        else
+        {
+            veData.hardware = NULL;
+        }
+    }
+
+    // RTP RTCP
+    if (veData.rtp_rtcp)
+    {
+        if (0 != veData.rtp_rtcp->Release())
+        {
+            __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                                "Release rtp_rtcp sub-API failed");
+            releaseOK = false;
+        }
+        else
+        {
+            veData.rtp_rtcp = NULL;
+        }
+    }
+
+    // Encrypt
+    if (veData.encrypt)
+    {
+        if (0 != veData.encrypt->Release())
+        {
+            __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+                                "Release encrypt sub-API failed");
+            releaseOK = false;
+        }
+        else
+        {
+            veData.encrypt = NULL;
+        }
+    }
+
+    return releaseOK;
+}
diff --git a/voice_engine/main/test/Android/android_test/jni/org_webrtc_voiceengine_test_AndroidTest.h b/voice_engine/main/test/Android/android_test/jni/org_webrtc_voiceengine_test_AndroidTest.h
new file mode 100644
index 0000000..22bb35a
--- /dev/null
+++ b/voice_engine/main/test/Android/android_test/jni/org_webrtc_voiceengine_test_AndroidTest.h
@@ -0,0 +1,261 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class org_webrtc_voiceengine_test_AndroidTest */
+
+#ifndef _Included_org_webrtc_voiceengine_test_AndroidTest
+#define _Included_org_webrtc_voiceengine_test_AndroidTest
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    NativeInit
+ * Signature: ()Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_NativeInit
+  (JNIEnv *, jclass);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    RunAutoTest
+ * Signature: (II)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_RunAutoTest
+  (JNIEnv *, jobject, jint, jint);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    Create
+ * Signature: ()Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Create
+  (JNIEnv *, jobject);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    Delete
+ * Signature: ()Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Delete
+  (JNIEnv *, jobject);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    Authenticate
+ * Signature: (Ljava/lang/String;)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Authenticate
+  (JNIEnv *, jobject, jstring);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    Init
+ * Signature: (IIIZZ)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Init
+  (JNIEnv *, jobject, jint, jint, jint, jboolean, jboolean);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    Terminate
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Terminate
+  (JNIEnv *, jobject);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    CreateChannel
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_CreateChannel
+  (JNIEnv *, jobject);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    DeleteChannel
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_DeleteChannel
+  (JNIEnv *, jobject, jint);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    SetLocalReceiver
+ * Signature: (II)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetLocalReceiver
+  (JNIEnv *, jobject, jint, jint);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    SetSendDestination
+ * Signature: (IILjava/lang/String;)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetSendDestination
+  (JNIEnv *, jobject, jint, jint, jstring);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    StartListen
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StartListen
+  (JNIEnv *, jobject, jint);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    StartPlayout
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StartPlayout
+  (JNIEnv *, jobject, jint);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    StartSend
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StartSend
+  (JNIEnv *, jobject, jint);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    StopListen
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopListen
+  (JNIEnv *, jobject, jint);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    StopPlayout
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopPlayout
+  (JNIEnv *, jobject, jint);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    StopSend
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopSend
+  (JNIEnv *, jobject, jint);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    StartPlayingFileLocally
+ * Signature: (ILjava/lang/String;Z)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StartPlayingFileLocally
+  (JNIEnv *, jobject, jint, jstring, jboolean);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    StopPlayingFileLocally
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopPlayingFileLocally
+  (JNIEnv *, jobject, jint);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    StartRecordingPlayout
+ * Signature: (ILjava/lang/String;Z)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StartRecordingPlayout
+  (JNIEnv *, jobject, jint, jstring, jboolean);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    StopRecordingPlayout
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopRecordingPlayout
+  (JNIEnv *, jobject, jint);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    StartPlayingFileAsMicrophone
+ * Signature: (ILjava/lang/String;Z)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StartPlayingFileAsMicrophone
+  (JNIEnv *, jobject, jint, jstring, jboolean);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    StopPlayingFileAsMicrophone
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopPlayingFileAsMicrophone
+  (JNIEnv *, jobject, jint);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    NumOfCodecs
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_NumOfCodecs
+  (JNIEnv *, jobject);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    SetSendCodec
+ * Signature: (II)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetSendCodec
+  (JNIEnv *, jobject, jint, jint);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    SetVADStatus
+ * Signature: (IZI)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetVADStatus
+  (JNIEnv *, jobject, jint, jboolean, jint);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    SetNSStatus
+ * Signature: (ZI)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetNSStatus
+  (JNIEnv *, jobject, jboolean, jint);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    SetAGCStatus
+ * Signature: (ZI)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetAGCStatus
+  (JNIEnv *, jobject, jboolean, jint);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    SetECStatus
+ * Signature: (ZI)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetECStatus
+  (JNIEnv *, jobject, jboolean, jint);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    SetSpeakerVolume
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetSpeakerVolume
+  (JNIEnv *, jobject, jint);
+
+/*
+ * Class:     org_webrtc_voiceengine_test_AndroidTest
+ * Method:    SetLoudspeakerStatus
+ * Signature: (Z)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetLoudspeakerStatus
+  (JNIEnv *, jobject, jboolean);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/voice_engine/main/test/Android/android_test/res/drawable/icon.png b/voice_engine/main/test/Android/android_test/res/drawable/icon.png
new file mode 100644
index 0000000..7502484
--- /dev/null
+++ b/voice_engine/main/test/Android/android_test/res/drawable/icon.png
Binary files differ
diff --git a/voice_engine/main/test/Android/android_test/res/layout/main.xml b/voice_engine/main/test/Android/android_test/res/layout/main.xml
new file mode 100644
index 0000000..c2423c8
--- /dev/null
+++ b/voice_engine/main/test/Android/android_test/res/layout/main.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. -->
+<!--                                                                     -->
+<!-- Use of this source code is governed by a BSD-style license          -->
+<!-- that can be found in the LICENSE file in the root of the source     -->
+<!-- tree. An additional intellectual property rights grant can be found -->
+<!-- in the file PATENTS.  All contributing project authors may          -->
+<!-- be found in the AUTHORS file in the root of the source tree.        -->
+
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+    android:orientation="vertical"
+    android:layout_width="fill_parent"
+    android:layout_height="fill_parent"
+    >
+
+<TextView android:text="@+id/TextView01" android:id="@+id/TextView01" android:layout_width="wrap_content" android:layout_height="wrap_content"></TextView>
+<EditText android:text="@+id/EditText01" android:id="@+id/EditText01" android:layout_width="wrap_content" android:layout_height="wrap_content"></EditText><Button android:text="@+id/Button01" android:id="@+id/Button01" android:layout_width="wrap_content" android:layout_height="wrap_content"></Button>
+
+
+<Spinner android:id="@+id/Spinner01" android:layout_width="wrap_content" android:layout_height="wrap_content"></Spinner>
+<Spinner android:id="@+id/Spinner02" android:layout_width="wrap_content" android:layout_height="wrap_content"></Spinner><Button android:text="@+id/Button02" android:id="@+id/Button02" android:layout_width="wrap_content" android:layout_height="wrap_content"></Button>
+</LinearLayout>
diff --git a/voice_engine/main/test/Android/android_test/res/values/strings.xml b/voice_engine/main/test/Android/android_test/res/values/strings.xml
new file mode 100644
index 0000000..9367d1f
--- /dev/null
+++ b/voice_engine/main/test/Android/android_test/res/values/strings.xml
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. -->
+<!--                                                                     -->
+<!-- Use of this source code is governed by a BSD-style license          -->
+<!-- that can be found in the LICENSE file in the root of the source     -->
+<!-- tree. An additional intellectual property rights grant can be found -->
+<!-- in the file PATENTS.  All contributing project authors may          -->
+<!-- be found in the AUTHORS file in the root of the source tree.        -->
+
+<resources>
+    
+    <string name="app_name">WebRtc VoiceEngine Android Test</string>
+</resources>
diff --git a/voice_engine/main/test/Android/android_test/src/org/webrtc/voiceengine/test/AndroidTest.java b/voice_engine/main/test/Android/android_test/src/org/webrtc/voiceengine/test/AndroidTest.java
new file mode 100644
index 0000000..ef5a8c9
--- /dev/null
+++ b/voice_engine/main/test/Android/android_test/src/org/webrtc/voiceengine/test/AndroidTest.java
@@ -0,0 +1,1208 @@
+/*

+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.

+ * 

+ * Use of this source code is governed by a BSD-style license that can be found

+ * in the LICENSE file in the root of the source tree. An additional

+ * intellectual property rights grant can be found in the file PATENTS. All

+ * contributing project authors may be found in the AUTHORS file in the root of

+ * the source tree.

+ */

+

+/*

+ * VoiceEngine Android test application. It starts either auto test or acts like

+ * a GUI test.

+ */

+

+package org.webrtc.voiceengine.test;

+

+import java.io.File;

+import java.io.FileInputStream;

+import java.io.FileNotFoundException;

+import java.io.FileOutputStream;

+import java.io.FileReader;

+import java.io.IOException;

+

+import android.app.Activity;

+import android.content.Context;

+import android.media.AudioFormat;

+import android.media.AudioManager;

+import android.media.AudioRecord;

+import android.media.AudioTrack;

+import android.media.MediaRecorder;

+import android.os.Bundle;

+import android.util.Log;

+import android.view.View;

+import android.widget.AdapterView;

+import android.widget.ArrayAdapter;

+import android.widget.Button;

+import android.widget.EditText;

+import android.widget.Spinner;

+import android.widget.TextView;

+

+public class AndroidTest extends Activity {

+    private byte[] _playBuffer = null;

+    private short[] _circBuffer = new short[8000]; // can hold 50 frames

+

+    private int _recIndex = 0;

+    private int _playIndex = 0;

+    // private int _streamVolume = 4;

+    private int _maxVolume = 0; // Android max level (commonly 5)

+    // VoE level (0-255), corresponds to level 4 out of 5

+    private int _volumeLevel = 204;

+

+    private Thread _playThread;

+    private Thread _recThread;

+    private Thread _autotestThread;

+

+    private static AudioTrack _at;

+    private static AudioRecord _ar;

+

+    private File _fr = null;

+    private FileInputStream _in = null;

+

+    private boolean _isRunningPlay = false;

+    private boolean _isRunningRec = false;

+    private boolean _settingSet = true;

+    private boolean _isCallActive = false;

+    private boolean _runAutotest = false; // ENABLE AUTOTEST HERE!

+

+    private int _channel = -1;

+    private int _codecIndex = 0;

+    private int _ecIndex = 0;

+    private int _nsIndex = 0;

+    private int _agcIndex = 0;

+    private int _vadIndex = 0;

+    private int _audioIndex = 3;

+    private int _settingMenu = 0;

+    private int _receivePort = 1234;

+    private int _destinationPort = 1234;

+    private String _destinationIP = "127.0.0.1";

+

+    // "Build" settings

+    private final boolean _playFromFile = false;

+    // Set to true to send data to native code and back

+    private final boolean _runThroughNativeLayer = true;

+    private final boolean enableSend = true;

+    private final boolean enableReceive = true;

+    private final boolean useNativeThread = false;

+

+    /** Called when the activity is first created. */

+    public void onCreate(Bundle savedInstanceState) {

+        super.onCreate(savedInstanceState);

+        setContentView(R.layout.main);

+

+        TextView tv = (TextView) findViewById(R.id.TextView01);

+        tv.setText("");

+

+        final EditText ed = (EditText) findViewById(R.id.EditText01);

+        ed.setWidth(200);

+        ed.setText(_destinationIP);

+

+        final Button buttonStart = (Button) findViewById(R.id.Button01);

+        buttonStart.setWidth(200);

+        if (_runAutotest) {

+            buttonStart.setText("Run test");

+        } else {

+            buttonStart.setText("Start Call");

+        }

+        // button.layout(50, 50, 100, 40);

+        buttonStart.setOnClickListener(new View.OnClickListener() {

+            public void onClick(View v) {

+

+                if (_runAutotest) {

+                    startAutoTest();

+                } else {

+                    if (_isCallActive) {

+

+                        if (stopCall() != -1) {

+                            _isCallActive = false;

+                            buttonStart.setText("Start Call");

+                        }

+                    } else {

+

+                        _destinationIP = ed.getText().toString();

+                        if (startCall() != -1) {

+                            _isCallActive = true;

+                            buttonStart.setText("Stop Call");

+                        }

+                    }

+                }

+

+                // displayTextFromFile();

+                // recordAudioToFile();

+                // if(!_playFromFile)

+                // {

+                // recAudioInThread();

+                // }

+                // playAudioInThread();

+            }

+        });

+

+        final Button buttonStop = (Button) findViewById(R.id.Button02);

+        buttonStop.setWidth(200);

+        buttonStop.setText("Close app");

+        buttonStop.setOnClickListener(new View.OnClickListener() {

+            public void onClick(View v) {

+

+                if (!_runAutotest) {

+                    ShutdownVoE();

+                }

+

+                // This call terminates and should close the activity

+                finish();

+

+                // playAudioFromFile();

+                // if(!_playFromFile)

+                // {

+                // stopRecAudio();

+                // }

+                // stopPlayAudio();

+            }

+        });

+

+

+        String ap1[] = {"EC off", "AECM"};

+        final ArrayAdapter<String> adapterAp1 = new ArrayAdapter<String>(

+                        this,

+                        android.R.layout.simple_spinner_dropdown_item,

+                        ap1);

+        String ap2[] =

+                        {"NS off", "NS low", "NS moderate", "NS high",

+                                        "NS very high"};

+        final ArrayAdapter<String> adapterAp2 = new ArrayAdapter<String>(

+                        this,

+                        android.R.layout.simple_spinner_dropdown_item,

+                        ap2);

+        String ap3[] = {"AGC off", "AGC adaptive", "AGC fixed"};

+        final ArrayAdapter<String> adapterAp3 = new ArrayAdapter<String>(

+                        this,

+                        android.R.layout.simple_spinner_dropdown_item,

+                        ap3);

+        String ap4[] =

+                        {"VAD off", "VAD conventional", "VAD high rate",

+                                        "VAD mid rate", "VAD low rate"};

+        final ArrayAdapter<String> adapterAp4 = new ArrayAdapter<String>(

+                        this,

+                        android.R.layout.simple_spinner_dropdown_item,

+                        ap4);

+        String codecs[] = {"iSAC", "PCMU", "PCMA", "iLBC", "G.729"};

+        final ArrayAdapter<String> adapterCodecs = new ArrayAdapter<String>(

+                        this,

+                        android.R.layout.simple_spinner_dropdown_item,

+                        codecs);

+        String audio[] =

+                        {"Volume Up", "Volume Down", "Loudspeaker", "Earpiece"};

+        final ArrayAdapter<String> adapterAudio = new ArrayAdapter<String>(

+                        this,

+                        android.R.layout.simple_spinner_dropdown_item,

+                        audio);

+

+        final Spinner spinnerSettings1 = (Spinner) findViewById(R.id.Spinner01);

+        final Spinner spinnerSettings2 = (Spinner) findViewById(R.id.Spinner02);

+        spinnerSettings1.setMinimumWidth(200);

+        String settings[] =

+                        {"Audio", "Codec", "Echo Control", "Noise Suppression",

+                                        "Automatic Gain Control",

+                                        "Voice Activity Detection"};

+        ArrayAdapter<String> adapterSettings1 = new ArrayAdapter<String>(

+                        this,

+                        android.R.layout.simple_spinner_dropdown_item,

+                        settings);

+        spinnerSettings1.setAdapter(adapterSettings1);

+        spinnerSettings1.setOnItemSelectedListener(

+                        new AdapterView.OnItemSelectedListener() {

+            public void onItemSelected(AdapterView adapterView, View view,

+                            int position, long id) {

+

+                _settingMenu = position;

+                _settingSet = false;

+                if (position == 0) {

+                    spinnerSettings2.setAdapter(adapterAudio);

+                    spinnerSettings2.setSelection(_audioIndex);

+                }

+                if (position == 1) {

+                    spinnerSettings2.setAdapter(adapterCodecs);

+                    spinnerSettings2.setSelection(_codecIndex);

+                }

+                if (position == 2) {

+                    spinnerSettings2.setAdapter(adapterAp1);

+                    spinnerSettings2.setSelection(_ecIndex);

+                }

+                if (position == 3) {

+                    spinnerSettings2.setAdapter(adapterAp2);

+                    spinnerSettings2.setSelection(_nsIndex);

+                }

+                if (position == 4) {

+                    spinnerSettings2.setAdapter(adapterAp3);

+                    spinnerSettings2.setSelection(_agcIndex);

+                }

+                if (position == 5) {

+                    spinnerSettings2.setAdapter(adapterAp4);

+                    spinnerSettings2.setSelection(_vadIndex);

+                }

+            }

+

+            public void onNothingSelected(AdapterView adapterView) {

+                WebrtcLog("No setting1 selected");

+            }

+        });

+

+        spinnerSettings2.setMinimumWidth(200);

+        ArrayAdapter<String> adapterSettings2 = new ArrayAdapter<String>(

+                        this,

+                        android.R.layout.simple_spinner_dropdown_item,

+                        codecs);

+        spinnerSettings2.setAdapter(adapterSettings2);

+        spinnerSettings2.setOnItemSelectedListener(

+                        new AdapterView.OnItemSelectedListener() {

+            public void onItemSelected(AdapterView adapterView, View view,

+                            int position, long id) {

+

+                // avoid unintentional setting

+                if (_settingSet == false) {

+                    _settingSet = true;

+                    return;

+                }

+

+                // Change volume

+                if (_settingMenu == 0) {

+                    WebrtcLog("Selected audio " + position);

+                    setAudioProperties(position);

+                    spinnerSettings2.setSelection(_audioIndex);

+                }

+

+                // Change codec

+                if (_settingMenu == 1) {

+                    _codecIndex = position;

+                    WebrtcLog("Selected codec " + position);

+                    if (0 != SetSendCodec(_channel, _codecIndex)) {

+                        WebrtcLog("VoE set send codec failed");

+                    }

+                }

+

+                // Change EC

+                if (_settingMenu == 2) {

+                    boolean enable = true;

+                    int ECmode = 5; // AECM

+                    int AESmode = 0;

+

+                    _ecIndex = position;

+                    WebrtcLog("Selected EC " + position);

+

+                    if (position == 0) {

+                        enable = false;

+                    }

+                    if (position > 1) {

+                        ECmode = 4; // AES

+                        AESmode = position - 1;

+                    }

+

+                    if (0 != SetECStatus(enable, ECmode)) {

+                        WebrtcLog("VoE set EC status failed");

+                    }

+                }

+

+                // Change NS

+                if (_settingMenu == 3) {

+                    boolean enable = true;

+

+                    _nsIndex = position;

+                    WebrtcLog("Selected NS " + position);

+

+                    if (position == 0) {

+                        enable = false;

+                    }

+                    if (0 != SetNSStatus(enable, position + 2)) {

+                        WebrtcLog("VoE set NS status failed");

+                    }

+                }

+

+                // Change AGC

+                if (_settingMenu == 4) {

+                    boolean enable = true;

+

+                    _agcIndex = position;

+                    WebrtcLog("Selected AGC " + position);

+

+                    if (position == 0) {

+                        enable = false;

+                        position = 1; // default

+                    }

+                    if (0 != SetAGCStatus(enable, position + 2)) {

+                        WebrtcLog("VoE set AGC status failed");

+                    }

+                }

+

+                // Change VAD

+                if (_settingMenu == 5) {

+                    boolean enable = true;

+

+                    _vadIndex = position;

+                    WebrtcLog("Selected VAD " + position);

+

+                    if (position == 0) {

+                        enable = false;

+                        position++;

+                    }

+                    if (0 != SetVADStatus(_channel, enable, position - 1)) {

+                        WebrtcLog("VoE set VAD status failed");

+                    }

+                }

+            }

+

+            public void onNothingSelected(AdapterView adapterView) {

+            }

+        });

+

+        // Setup VoiceEngine

+        if (!_runAutotest && !useNativeThread) SetupVoE();

+

+        // Suggest to use the voice call audio stream for hardware volume

+        // controls

+        setVolumeControlStream(AudioManager.STREAM_VOICE_CALL);

+

+        // Get max Android volume and adjust default volume to map exactly to an

+        // Android level

+        AudioManager am =

+                        (AudioManager) getSystemService(Context.AUDIO_SERVICE);

+        _maxVolume = am.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL);

+        if (_maxVolume <= 0) {

+            WebrtcLog("Could not get max volume!");

+        } else {

+            int androidVolumeLevel = (_volumeLevel * _maxVolume) / 255;

+            _volumeLevel = (androidVolumeLevel * 255) / _maxVolume;

+        }

+

+        WebrtcLog("Started Webrtc Android Test");

+    }

+

+    // Will be called when activity is shutdown.

+    // NOTE: Activity may be killed without this function being called,

+    // but then we should not need to clean up.

+    protected void onDestroy() {

+        super.onDestroy();

+        // ShutdownVoE();

+    }

+

+    private void SetupVoE() {

+        // Create VoiceEngine

+        Create(); // Error logging is done in native API wrapper

+

+        // Initialize

+        if (0 != Init(0, 0, 0, false, false)) {

+            WebrtcLog("VoE init failed");

+        }

+

+        // Create channel

+        _channel = CreateChannel();

+        if (0 != _channel) {

+            WebrtcLog("VoE create channel failed");

+        }

+

+    }

+

+    private void ShutdownVoE() {

+        // Delete channel

+        if (0 != DeleteChannel(_channel)) {

+            WebrtcLog("VoE delete channel failed");

+        }

+

+        // Terminate

+        if (0 != Terminate()) {

+            WebrtcLog("VoE terminate failed");

+        }

+

+        // Delete VoiceEngine

+        Delete(); // Error logging is done in native API wrapper

+    }

+

+    int startCall() {

+

+        if (useNativeThread == true) {

+

+            Create();

+            return 0;

+        }

+

+        if (enableReceive == true) {

+            // Set local receiver

+            if (0 != SetLocalReceiver(_channel, _receivePort)) {

+                WebrtcLog("VoE set local receiver failed");

+            }

+

+            if (0 != StartListen(_channel)) {

+                WebrtcLog("VoE start listen failed");

+                return -1;

+            }

+

+            // Route audio to earpiece

+            if (0 != SetLoudspeakerStatus(false)) {

+                WebrtcLog("VoE set louspeaker status failed");

+                return -1;

+            }

+

+            // set volume to default value

+            if (0 != SetSpeakerVolume(_volumeLevel)) {

+                WebrtcLog("VoE set speaker volume failed");

+                return -1;

+            }

+

+            /*

+             * WebrtcLog("VoE start record now"); if (0 !=

+             * StartRecordingPlayout(_channel, "/sdcard/singleUserDemoOut.pcm",

+             * false)) { WebrtcLog("VoE Recording Playout failed"); }

+             * WebrtcLog("VoE start Recording Playout end");

+             */

+            // Start playout

+            if (0 != StartPlayout(_channel)) {

+                WebrtcLog("VoE start playout failed");

+                return -1;

+            }

+

+            // Start playout file

+            // if (0 != StartPlayingFileLocally(_channel,

+            // "/sdcard/singleUserDemo.pcm", true)) {

+            // WebrtcLog("VoE start playout file failed");

+            // return -1;

+            // }

+        }

+

+        if (enableSend == true) {

+            if (0 != SetSendDestination(_channel, _destinationPort,

+                            _destinationIP)) {

+                WebrtcLog("VoE set send  destination failed");

+                return -1;

+            }

+

+            if (0 != SetSendCodec(_channel, _codecIndex)) {

+                WebrtcLog("VoE set send codec failed");

+                return -1;

+            }

+

+            /*

+             * if (0 != StartPlayingFileAsMicrophone(_channel,

+             * "/sdcard/singleUserDemo.pcm", true)) {

+             * WebrtcLog("VoE start playing file as microphone failed"); }

+             */

+            if (0 != StartSend(_channel)) {

+                WebrtcLog("VoE start send failed");

+                return -1;

+            }

+

+            // if (0 != StartPlayingFileAsMicrophone(_channel,

+            // "/sdcard/singleUserDemo.pcm", true)) {

+            // WebrtcLog("VoE start playing file as microphone failed");

+            // return -1;

+            // }

+        }

+

+        return 0;

+    }

+

+    int stopCall() {

+

+        if (useNativeThread == true) {

+

+            Delete();

+            return 0;

+        }

+

+        if (enableSend == true) {

+            // Stop playing file as microphone

+            /*

+             * if (0 != StopPlayingFileAsMicrophone(_channel)) {

+             * WebrtcLog("VoE stop playing file as microphone failed"); return

+             * -1; }

+             */

+            // Stop send

+            if (0 != StopSend(_channel)) {

+                WebrtcLog("VoE stop send failed");

+                return -1;

+            }

+        }

+

+        if (enableReceive == true) {

+            // if (0 != StopRecordingPlayout(_channel)) {

+            // WebrtcLog("VoE stop Recording Playout failed");

+            // }

+            // WebrtcLog("VoE stop Recording Playout ended");

+

+            // Stop listen

+            if (0 != StopListen(_channel)) {

+                WebrtcLog("VoE stop listen failed");

+                return -1;

+            }

+

+            // Stop playout file

+            // if (0 != StopPlayingFileLocally(_channel)) {

+            // WebrtcLog("VoE stop playout file failed");

+            // return -1;

+            // }

+

+            // Stop playout

+            if (0 != StopPlayout(_channel)) {

+                WebrtcLog("VoE stop playout failed");

+                return -1;

+            }

+

+            // Route audio to loudspeaker

+            if (0 != SetLoudspeakerStatus(true)) {

+                WebrtcLog("VoE set louspeaker status failed");

+                return -1;

+            }

+        }

+

+        return 0;

+    }

+

+    int startAutoTest() {

+

+        _autotestThread = new Thread(_autotestProc);

+        _autotestThread.start();

+

+        return 0;

+    }

+

+    private Runnable _autotestProc = new Runnable() {

+        public void run() {

+            // TODO(xians): choose test from GUI

+            // 1 = standard, not used

+            // 2 = extended, 2 = base

+            RunAutoTest(1, 2);

+        }

+    };

+

+    int setAudioProperties(int val) {

+

+        // AudioManager am = (AudioManager)

+        // getSystemService(Context.AUDIO_SERVICE);

+

+        if (val == 0) {

+            // _streamVolume =

+            // am.getStreamVolume(AudioManager.STREAM_VOICE_CALL);

+            // am.setStreamVolume(AudioManager.STREAM_VOICE_CALL,

+            // (_streamVolume+1), 0);

+

+            int androidVolumeLevel = (_volumeLevel * _maxVolume) / 255;

+            if (androidVolumeLevel < _maxVolume) {

+                _volumeLevel = ((androidVolumeLevel + 1) * 255) / _maxVolume;

+                if (0 != SetSpeakerVolume(_volumeLevel)) {

+                    WebrtcLog("VoE set speaker volume failed");

+                }

+            }

+        } else if (val == 1) {

+            // _streamVolume =

+            // am.getStreamVolume(AudioManager.STREAM_VOICE_CALL);

+            // am.setStreamVolume(AudioManager.STREAM_VOICE_CALL,

+            // (_streamVolume-1), 0);

+

+            int androidVolumeLevel = (_volumeLevel * _maxVolume) / 255;

+            if (androidVolumeLevel > 0) {

+                _volumeLevel = ((androidVolumeLevel - 1) * 255) / _maxVolume;

+                if (0 != SetSpeakerVolume(_volumeLevel)) {

+                    WebrtcLog("VoE set speaker volume failed");

+                }

+            }

+        } else if (val == 2) {

+            // route audio to back speaker

+            if (0 != SetLoudspeakerStatus(true)) {

+                WebrtcLog("VoE set loudspeaker status failed");

+            }

+            _audioIndex = 2;

+        } else if (val == 3) {

+            // route audio to earpiece

+            if (0 != SetLoudspeakerStatus(false)) {

+                WebrtcLog("VoE set loudspeaker status failed");

+            }

+            _audioIndex = 3;

+        }

+

+        return 0;

+    }

+

+    int displayTextFromFile() {

+

+        TextView tv = (TextView) findViewById(R.id.TextView01);

+        FileReader fr = null;

+        char[] fileBuffer = new char[64];

+

+        try {

+            fr = new FileReader("/sdcard/test.txt");

+        } catch (FileNotFoundException e) {

+            e.printStackTrace();

+            tv.setText("File not found!");

+        }

+

+        try {

+            fr.read(fileBuffer);

+        } catch (IOException e) {

+            e.printStackTrace();

+        }

+

+        String readString = new String(fileBuffer);

+        tv.setText(readString);

+        // setContentView(tv);

+

+        return 0;

+    }

+

+    int recordAudioToFile() {

+        File fr = null;

+        // final to be reachable within onPeriodicNotification

+        byte[] recBuffer = new byte[320];

+

+        int recBufSize =

+                        AudioRecord.getMinBufferSize(16000,

+                                        AudioFormat.CHANNEL_CONFIGURATION_MONO,

+                                        AudioFormat.ENCODING_PCM_16BIT);

+        AudioRecord rec =

+                        new AudioRecord(MediaRecorder.AudioSource.MIC, 16000,

+                                        AudioFormat.CHANNEL_CONFIGURATION_MONO,

+                                        AudioFormat.ENCODING_PCM_16BIT,

+                                        recBufSize);

+

+        fr = new File("/sdcard/record.pcm");

+        FileOutputStream out = null;

+        try {

+            out = new FileOutputStream(fr);

+        } catch (FileNotFoundException e1) {

+            e1.printStackTrace();

+        }

+

+        // start recording

+        try {

+            rec.startRecording();

+        } catch (IllegalStateException e) {

+            e.printStackTrace();

+        }

+

+        for (int i = 0; i < 550; i++) {

+            // note, there is a short version of write as well!

+            int wrBytes = rec.read(recBuffer, 0, 320);

+

+            try {

+                out.write(recBuffer);

+            } catch (IOException e) {

+                e.printStackTrace();

+            }

+        }

+

+        // stop playout

+        try {

+            rec.stop();

+        } catch (IllegalStateException e) {

+            e.printStackTrace();

+        }

+

+        return 0;

+    }

+

+    int playAudioFromFile() {

+

+        File fr = null;

+        // final to be reachable within onPeriodicNotification

+        // final byte[] playBuffer = new byte [320000];

+        // final to be reachable within onPeriodicNotification

+        final byte[] playBuffer = new byte[320];

+

+        final int playBufSize =

+                        AudioTrack.getMinBufferSize(16000,

+                                        AudioFormat.CHANNEL_CONFIGURATION_MONO,

+                                        AudioFormat.ENCODING_PCM_16BIT);

+        // final int playBufSize = 1920; // 100 ms buffer

+        // byte[] playBuffer = new byte [playBufSize];

+        final AudioTrack play =

+                        new AudioTrack(AudioManager.STREAM_VOICE_CALL, 16000,

+                                        AudioFormat.CHANNEL_CONFIGURATION_MONO,

+                                        AudioFormat.ENCODING_PCM_16BIT,

+                                        playBufSize, AudioTrack.MODE_STREAM);

+

+        // implementation of the playpos callback functions

+        play.setPlaybackPositionUpdateListener(

+                        new AudioTrack.OnPlaybackPositionUpdateListener() {

+

+            int count = 0;

+

+            public void onPeriodicNotification(AudioTrack track) {

+                // int wrBytes = play.write(playBuffer, count, 320);

+                count += 320;

+            }

+

+            public void onMarkerReached(AudioTrack track) {

+

+            }

+        });

+

+        // set the notification period = 160 samples

+        // int ret = play.setPositionNotificationPeriod(160);

+

+        fr = new File("/sdcard/record.pcm");

+        FileInputStream in = null;

+        try {

+            in = new FileInputStream(fr);

+        } catch (FileNotFoundException e1) {

+            e1.printStackTrace();

+        }

+

+        // try {

+        // in.read(playBuffer);

+        // } catch (IOException e) {

+        // e.printStackTrace();

+        // }

+

+        // play all at once

+        // int wrBytes = play.write(playBuffer, 0, 320000);

+

+

+        // start playout

+        try {

+            play.play();

+        } catch (IllegalStateException e) {

+            e.printStackTrace();

+        }

+

+        // returns the number of samples that has been written

+        // int headPos = play.getPlaybackHeadPosition();

+

+        // play with multiple writes

+        for (int i = 0; i < 500; i++) {

+            try {

+                in.read(playBuffer);

+            } catch (IOException e) {

+                e.printStackTrace();

+            }

+

+

+            // note, there is a short version of write as well!

+            int wrBytes = play.write(playBuffer, 0, 320);

+

+            Log.d("testWrite", "wrote");

+        }

+

+        // stop playout

+        try {

+            play.stop();

+        } catch (IllegalStateException e) {

+            e.printStackTrace();

+        }

+

+        return 0;

+    }

+

+    int playAudioInThread() {

+

+        if (_isRunningPlay) {

+            return 0;

+        }

+

+        // File fr = null;

+        // final byte[] playBuffer = new byte[320];

+        if (_playFromFile) {

+            _playBuffer = new byte[320];

+        } else {

+            // reset index

+            _playIndex = 0;

+        }

+        // within

+        // onPeriodicNotification

+

+        // Log some info (static)

+        WebrtcLog("Creating AudioTrack object");

+        final int minPlayBufSize =

+                        AudioTrack.getMinBufferSize(16000,

+                                        AudioFormat.CHANNEL_CONFIGURATION_MONO,

+                                        AudioFormat.ENCODING_PCM_16BIT);

+        WebrtcLog("Min play buf size = " + minPlayBufSize);

+        WebrtcLog("Min volume = " + AudioTrack.getMinVolume());

+        WebrtcLog("Max volume = " + AudioTrack.getMaxVolume());

+        WebrtcLog("Native sample rate = "

+                        + AudioTrack.getNativeOutputSampleRate(

+                                        AudioManager.STREAM_VOICE_CALL));

+

+        final int playBufSize = minPlayBufSize; // 3200; // 100 ms buffer

+        // byte[] playBuffer = new byte [playBufSize];

+        try {

+            _at = new AudioTrack(

+                            AudioManager.STREAM_VOICE_CALL,

+                            16000,

+                            AudioFormat.CHANNEL_CONFIGURATION_MONO,

+                            AudioFormat.ENCODING_PCM_16BIT,

+                            playBufSize, AudioTrack.MODE_STREAM);

+        } catch (Exception e) {

+            WebrtcLog(e.getMessage());

+        }

+

+        // Log some info (non-static)

+        WebrtcLog("Notification marker pos = "

+                        + _at.getNotificationMarkerPosition());

+        WebrtcLog("Play head pos = " + _at.getPlaybackHeadPosition());

+        WebrtcLog("Pos notification dt = "

+                        + _at.getPositionNotificationPeriod());

+        WebrtcLog("Playback rate = " + _at.getPlaybackRate());

+        WebrtcLog("Sample rate = " + _at.getSampleRate());

+

+        // implementation of the playpos callback functions

+        // _at.setPlaybackPositionUpdateListener(

+        // new AudioTrack.OnPlaybackPositionUpdateListener() {

+        //

+        // int count = 3200;

+        //

+        // public void onPeriodicNotification(AudioTrack track) {

+        // // int wrBytes = play.write(playBuffer, count, 320);

+        // count += 320;

+        // }

+        //

+        // public void onMarkerReached(AudioTrack track) {

+        // }

+        // });

+

+        // set the notification period = 160 samples

+        // int ret = _at.setPositionNotificationPeriod(160);

+

+        if (_playFromFile) {

+            _fr = new File("/sdcard/singleUserDemo.pcm");

+            try {

+                _in = new FileInputStream(_fr);

+            } catch (FileNotFoundException e1) {

+                e1.printStackTrace();

+            }

+        }

+

+        // try {

+        // in.read(playBuffer);

+        // } catch (IOException e) {

+        // e.printStackTrace();

+        // }

+

+        _isRunningPlay = true;

+

+        // buffer = new byte[3200];

+        _playThread = new Thread(_playProc);

+        // ar.startRecording();

+        // bytesRead = 3200;

+        // recording = true;

+        _playThread.start();

+

+        return 0;

+    }

+

+    int stopPlayAudio() {

+        if (!_isRunningPlay) {

+            return 0;

+        }

+

+        _isRunningPlay = false;

+

+        return 0;

+    }

+

+    private Runnable _playProc = new Runnable() {

+        public void run() {

+

+            // set high thread priority

+            android.os.Process.setThreadPriority(

+                            android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);

+

+            // play all at once

+            // int wrBytes = play.write(playBuffer, 0, 320000);

+

+            // fill the buffer

+            // play.write(playBuffer, 0, 3200);

+

+            // play.flush();

+

+            // start playout

+            try {

+                _at.play();

+            } catch (IllegalStateException e) {

+                e.printStackTrace();

+            }

+

+            // play with multiple writes

+            int i = 0;

+            for (; i < 3000 && _isRunningPlay; i++) {

+

+                if (_playFromFile) {

+                    try {

+                        _in.read(_playBuffer);

+                    } catch (IOException e) {

+                        e.printStackTrace();

+                    }

+

+                    int wrBytes = _at.write(_playBuffer, 0 /* i * 320 */, 320);

+                } else {

+                    int wrSamples =

+                                    _at.write(_circBuffer, _playIndex * 160,

+                                                    160);

+

+                    // WebrtcLog("Played 10 ms from buffer, _playIndex = " +

+                    // _playIndex);

+                    // WebrtcLog("Diff = " + (_recIndex - _playIndex));

+

+                    if (_playIndex == 49) {

+                        _playIndex = 0;

+                    } else {

+                        _playIndex += 1;

+                    }

+                }

+

+                // WebrtcLog("Wrote 10 ms to buffer, head = "

+                // + _at.getPlaybackHeadPosition());

+            }

+

+            // stop playout

+            try {

+                _at.stop();

+            } catch (IllegalStateException e) {

+                e.printStackTrace();

+            }

+

+            // returns the number of samples that has been written

+            WebrtcLog("Test stopped, i = " + i + ", head = "

+                            + _at.getPlaybackHeadPosition());

+            int headPos = _at.getPlaybackHeadPosition();

+

+            // flush the buffers

+            _at.flush();

+

+            // release the object

+            _at.release();

+            _at = null;

+

+            // try {

+            // Thread.sleep() must be within a try - catch block

+            // Thread.sleep(3000);

+            // }catch (Exception e){

+            // System.out.println(e.getMessage());

+            // }

+

+            _isRunningPlay = false;

+

+        }

+    };

+

+    int recAudioInThread() {

+

+        if (_isRunningRec) {

+            return 0;

+        }

+

+        // within

+        // onPeriodicNotification

+

+        // reset index

+        _recIndex = 20;

+

+        // Log some info (static)

+        WebrtcLog("Creating AudioRecord object");

+        final int minRecBufSize = AudioRecord.getMinBufferSize(16000,

+                        AudioFormat.CHANNEL_CONFIGURATION_MONO,

+                        AudioFormat.ENCODING_PCM_16BIT);

+        WebrtcLog("Min rec buf size = " + minRecBufSize);

+        // WebrtcLog("Min volume = " + AudioTrack.getMinVolume());

+        // WebrtcLog("Max volume = " + AudioTrack.getMaxVolume());

+        // WebrtcLog("Native sample rate = "

+        // + AudioRecord

+        // .getNativeInputSampleRate(AudioManager.STREAM_VOICE_CALL));

+

+        final int recBufSize = minRecBufSize; // 3200; // 100 ms buffer

+        try {

+            _ar = new AudioRecord(

+                            MediaRecorder.AudioSource.MIC,

+                            16000,

+                            AudioFormat.CHANNEL_CONFIGURATION_MONO,

+                            AudioFormat.ENCODING_PCM_16BIT,

+                            recBufSize);

+        } catch (Exception e) {

+            WebrtcLog(e.getMessage());

+        }

+

+        // Log some info (non-static)

+        WebrtcLog("Notification marker pos = "

+                        + _ar.getNotificationMarkerPosition());

+        // WebrtcLog("Play head pos = " + _ar.getRecordHeadPosition());

+        WebrtcLog("Pos notification dt rec= "

+                        + _ar.getPositionNotificationPeriod());

+        // WebrtcLog("Playback rate = " + _ar.getRecordRate());

+        // WebrtcLog("Playback rate = " + _ar.getPlaybackRate());

+        WebrtcLog("Sample rate = " + _ar.getSampleRate());

+        // WebrtcLog("Playback rate = " + _ar.getPlaybackRate());

+        // WebrtcLog("Playback rate = " + _ar.getPlaybackRate());

+

+        _isRunningRec = true;

+

+        _recThread = new Thread(_recProc);

+

+        _recThread.start();

+

+        return 0;

+    }

+

+    int stopRecAudio() {

+        if (!_isRunningRec) {

+            return 0;

+        }

+

+        _isRunningRec = false;

+

+        return 0;

+    }

+

+    private Runnable _recProc = new Runnable() {

+        public void run() {

+

+            // set high thread priority

+            android.os.Process.setThreadPriority(

+                            android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);

+

+            // start recording

+            try {

+                _ar.startRecording();

+            } catch (IllegalStateException e) {

+                e.printStackTrace();

+            }

+

+            // keep recording to circular buffer

+            // for a while

+            int i = 0;

+            int rdSamples = 0;

+            short[] tempBuffer = new short[160]; // Only used for native case

+

+            for (; i < 3000 && _isRunningRec; i++) {

+                if (_runThroughNativeLayer) {

+                    rdSamples = _ar.read(tempBuffer, 0, 160);

+                    // audioLoop(tempBuffer, 160); // Insert into native layer

+                } else {

+                    rdSamples = _ar.read(_circBuffer, _recIndex * 160, 160);

+

+                    // WebrtcLog("Recorded 10 ms to buffer, _recIndex = " +

+                    // _recIndex);

+                    // WebrtcLog("rdSamples = " + rdSamples);

+

+                    if (_recIndex == 49) {

+                        _recIndex = 0;

+                    } else {

+                        _recIndex += 1;

+                    }

+                }

+            }

+

+            // stop recording

+            try {

+                _ar.stop();

+            } catch (IllegalStateException e) {

+                e.printStackTrace();

+            }

+

+            // release the object

+            _ar.release();

+            _ar = null;

+

+            // try {

+            // Thread.sleep() must be within a try - catch block

+            // Thread.sleep(3000);

+            // }catch (Exception e){

+            // System.out.println(e.getMessage());

+            // }

+

+            _isRunningRec = false;

+

+            // returns the number of samples that has been written

+            // WebrtcLog("Test stopped, i = " + i + ", head = "

+            // + _at.getPlaybackHeadPosition());

+            // int headPos = _at.getPlaybackHeadPosition();

+        }

+    };

+

+    private void WebrtcLog(String msg) {

+        Log.d("*Webrtc*", msg);

+    }

+

+    // //////////////// Native function prototypes ////////////////////

+

+    private native static boolean NativeInit();

+

+    private native int RunAutoTest(int testType, int extendedSel);

+

+    private native boolean Create();

+

+    private native boolean Delete();

+

+    private native int Authenticate(String key);

+

+    private native int Init(int month, int day, int year,

+                    boolean enableTrace, boolean useExtTrans);

+

+    private native int Terminate();

+

+    private native int CreateChannel();

+

+    private native int DeleteChannel(int channel);

+

+    private native int SetLocalReceiver(int channel, int port);

+

+    private native int SetSendDestination(int channel, int port,

+                    String ipaddr);

+

+    private native int StartListen(int channel);

+

+    private native int StartPlayout(int channel);

+

+    private native int StartSend(int channel);

+

+    private native int StopListen(int channel);

+

+    private native int StopPlayout(int channel);

+

+    private native int StopSend(int channel);

+

+    private native int StartPlayingFileLocally(int channel, String fileName,

+                    boolean loop);

+

+    private native int StopPlayingFileLocally(int channel);

+

+    private native int StartRecordingPlayout(int channel, String fileName,

+                    boolean loop);

+

+    private native int StopRecordingPlayout(int channel);

+

+    private native int StartPlayingFileAsMicrophone(int channel,

+                    String fileName, boolean loop);

+

+    private native int StopPlayingFileAsMicrophone(int channel);

+

+    private native int NumOfCodecs();

+

+    private native int SetSendCodec(int channel, int index);

+

+    private native int SetVADStatus(int channel, boolean enable, int mode);

+

+    private native int SetNSStatus(boolean enable, int mode);

+

+    private native int SetAGCStatus(boolean enable, int mode);

+

+    private native int SetECStatus(boolean enable, int mode);

+

+    private native int SetSpeakerVolume(int volume);

+

+    private native int SetLoudspeakerStatus(boolean enable);

+

+    /*

+     * this is used to load the 'AndroidJavaAPI' library on application startup.

+     * The library has already been unpacked into

+     * /data/data/webrtc.android.AndroidTest/lib/libAndroidJavaAPI.so at

+     * installation time by the package manager.

+     */

+    static {

+        Log.d("*Webrtc*", "Loading AndroidJavaAPI...");

+        System.loadLibrary("AndroidJavaAPI");

+

+        Log.d("*Webrtc*", "Calling native init...");

+        if (!NativeInit()) {

+            Log.e("*Webrtc*", "Native init failed");

+            throw new RuntimeException("Native init failed");

+        } else {

+            Log.d("*Webrtc*", "Native init successful");

+        }

+    }

+}

diff --git a/voice_engine/main/test/auto_test/TraceScan.exe b/voice_engine/main/test/auto_test/TraceScan.exe
new file mode 100755
index 0000000..b960f89
--- /dev/null
+++ b/voice_engine/main/test/auto_test/TraceScan.exe
Binary files differ
diff --git a/voice_engine/main/test/auto_test/audio_long16.pcm b/voice_engine/main/test/auto_test/audio_long16.pcm
new file mode 100644
index 0000000..853e0df
--- /dev/null
+++ b/voice_engine/main/test/auto_test/audio_long16.pcm
Binary files differ
diff --git a/voice_engine/main/test/auto_test/audio_long16.wav b/voice_engine/main/test/auto_test/audio_long16.wav
new file mode 100644
index 0000000..ebe91c4
--- /dev/null
+++ b/voice_engine/main/test/auto_test/audio_long16.wav
Binary files differ
diff --git a/voice_engine/main/test/auto_test/audio_long16big_endian.pcm b/voice_engine/main/test/auto_test/audio_long16big_endian.pcm
new file mode 100644
index 0000000..563e4e9
--- /dev/null
+++ b/voice_engine/main/test/auto_test/audio_long16big_endian.pcm
Binary files differ
diff --git a/voice_engine/main/test/auto_test/audio_long8.pcm b/voice_engine/main/test/auto_test/audio_long8.pcm
new file mode 100644
index 0000000..85d17e5
--- /dev/null
+++ b/voice_engine/main/test/auto_test/audio_long8.pcm
Binary files differ
diff --git a/voice_engine/main/test/auto_test/audio_long8mulaw.wav b/voice_engine/main/test/auto_test/audio_long8mulaw.wav
new file mode 100644
index 0000000..2d3d8b3
--- /dev/null
+++ b/voice_engine/main/test/auto_test/audio_long8mulaw.wav
Binary files differ
diff --git a/voice_engine/main/test/auto_test/audio_short16.pcm b/voice_engine/main/test/auto_test/audio_short16.pcm
new file mode 100644
index 0000000..15a0f18
--- /dev/null
+++ b/voice_engine/main/test/auto_test/audio_short16.pcm
Binary files differ
diff --git a/voice_engine/main/test/auto_test/audio_tiny11.wav b/voice_engine/main/test/auto_test/audio_tiny11.wav
new file mode 100644
index 0000000..6db80d5
--- /dev/null
+++ b/voice_engine/main/test/auto_test/audio_tiny11.wav
Binary files differ
diff --git a/voice_engine/main/test/auto_test/audio_tiny16.wav b/voice_engine/main/test/auto_test/audio_tiny16.wav
new file mode 100644
index 0000000..baab0ac
--- /dev/null
+++ b/voice_engine/main/test/auto_test/audio_tiny16.wav
Binary files differ
diff --git a/voice_engine/main/test/auto_test/audio_tiny22.wav b/voice_engine/main/test/auto_test/audio_tiny22.wav
new file mode 100644
index 0000000..b421867
--- /dev/null
+++ b/voice_engine/main/test/auto_test/audio_tiny22.wav
Binary files differ
diff --git a/voice_engine/main/test/auto_test/audio_tiny32.wav b/voice_engine/main/test/auto_test/audio_tiny32.wav
new file mode 100644
index 0000000..773ac23
--- /dev/null
+++ b/voice_engine/main/test/auto_test/audio_tiny32.wav
Binary files differ
diff --git a/voice_engine/main/test/auto_test/audio_tiny44.wav b/voice_engine/main/test/auto_test/audio_tiny44.wav
new file mode 100644
index 0000000..c9faa45
--- /dev/null
+++ b/voice_engine/main/test/auto_test/audio_tiny44.wav
Binary files differ
diff --git a/voice_engine/main/test/auto_test/audio_tiny48.wav b/voice_engine/main/test/auto_test/audio_tiny48.wav
new file mode 100644
index 0000000..8ebf11a
--- /dev/null
+++ b/voice_engine/main/test/auto_test/audio_tiny48.wav
Binary files differ
diff --git a/voice_engine/main/test/auto_test/audio_tiny8.wav b/voice_engine/main/test/auto_test/audio_tiny8.wav
new file mode 100644
index 0000000..d71c65e
--- /dev/null
+++ b/voice_engine/main/test/auto_test/audio_tiny8.wav
Binary files differ
diff --git a/voice_engine/main/test/auto_test/voe_cpu_test.cc b/voice_engine/main/test/auto_test/voe_cpu_test.cc
new file mode 100644
index 0000000..fb14d36
--- /dev/null
+++ b/voice_engine/main/test/auto_test/voe_cpu_test.cc
@@ -0,0 +1,106 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+#include <cassert>
+#if defined(_WIN32)
+ #include <conio.h>
+#endif
+
+#include "voe_cpu_test.h"
+
+using namespace webrtc;
+
+namespace voetest {
+
+#ifdef MAC_IPHONE
+extern char micFile[256];
+#else
+extern const char* micFile;
+#endif
+
+#define CHECK(expr)                                             \
+    if (expr)                                                   \
+    {                                                           \
+        printf("Error at line: %i, %s \n", __LINE__, #expr);    \
+        printf("Error code: %i \n", base->LastError());  \
+        PAUSE												    \
+        return -1;                                              \
+    }
+
+extern char* GetFilename(char* filename);
+extern const char* GetFilename(const char* filename);
+extern int GetResource(char* resource, char* dest, int destLen);
+extern char* GetResource(char* resource);
+extern const char* GetResource(const char* resource);
+
+VoECpuTest::VoECpuTest(VoETestManager& mgr) :
+	_mgr(mgr)
+{
+	
+}
+
+int VoECpuTest::DoTest()
+{
+    printf("------------------------------------------------\n");
+    printf(" CPU Reference Test\n");
+    printf("------------------------------------------------\n");
+
+    VoEBase* base = _mgr.BasePtr();
+    VoEFile* file = _mgr.FilePtr();
+    VoECodec* codec = _mgr.CodecPtr();
+    VoEAudioProcessing* apm = _mgr.APMPtr();
+
+    int channel(-1);
+    CodecInst isac;
+
+    isac.pltype = 104;
+    strcpy(isac.plname, "ISAC");
+    isac.pacsize = 960;
+    isac.plfreq = 32000;
+    isac.channels = 1;
+    isac.rate = -1;
+
+    CHECK(base->Init());
+    channel = base->CreateChannel();
+
+    CHECK(base->SetLocalReceiver(channel, 5566));
+    CHECK(base->SetSendDestination(channel, 5566, "127.0.0.1"));
+    CHECK(codec->SetRecPayloadType(channel, isac));
+    CHECK(codec->SetSendCodec(channel, isac));
+
+    CHECK(base->StartReceive(channel));
+    CHECK(base->StartPlayout(channel));
+    CHECK(base->StartSend(channel));
+    CHECK(file->StartPlayingFileAsMicrophone(channel, micFile, true, true));
+
+    CHECK(codec->SetVADStatus(channel, true));
+    CHECK(apm->SetAgcStatus(true, kAgcAdaptiveAnalog));
+    CHECK(apm->SetNsStatus(true, kNsModerateSuppression));
+    CHECK(apm->SetEcStatus(true, kEcAec));
+
+    TEST_LOG("\nMeasure CPU and memory while running a full-duplex"
+        " iSAC-swb call.\n\n");
+
+    PAUSE
+
+    CHECK(base->StopSend(channel));
+    CHECK(base->StopPlayout(channel));
+    CHECK(base->StopReceive(channel));
+
+    base->DeleteChannel(channel);
+    CHECK(base->Terminate());
+
+    return 0;
+}
+
+}  //  namespace voetest
diff --git a/voice_engine/main/test/auto_test/voe_cpu_test.h b/voice_engine/main/test/auto_test/voe_cpu_test.h
new file mode 100644
index 0000000..4c1ef96
--- /dev/null
+++ b/voice_engine/main/test/auto_test/voe_cpu_test.h
@@ -0,0 +1,32 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_CPU_TEST_H
+#define WEBRTC_VOICE_ENGINE_VOE_CPU_TEST_H
+
+#include "voe_standard_test.h"
+
+namespace voetest {
+
+class VoETestManager;
+
+class VoECpuTest
+{
+public:
+    VoECpuTest(VoETestManager& mgr);
+    ~VoECpuTest() {};
+    int DoTest();
+private:
+    VoETestManager& _mgr;
+};
+
+}  // namespace voetest
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_CPU_TEST_H
diff --git a/voice_engine/main/test/auto_test/voe_extended_test.cc b/voice_engine/main/test/auto_test/voe_extended_test.cc
new file mode 100644
index 0000000..a9b65be
--- /dev/null
+++ b/voice_engine/main/test/auto_test/voe_extended_test.cc
@@ -0,0 +1,8300 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include "critical_section_wrapper.h"
+#include "event_wrapper.h"
+#include "thread_wrapper.h"
+#include "voe_extended_test.h"
+#include "../../source/voice_engine_defines.h"  // defines build macros
+
+#if defined(_WIN32)
+#include <conio.h>
+#include <winsock2.h>
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#include <netdb.h>
+#endif
+
+using namespace webrtc;
+
+namespace voetest
+{
+
+#define _SEND_TO_REMOTE_IP_  // Set this flag to ensure that test packets are
+                             // transmitted to
+// RemoteIP::RemotePort during tests of SetSendToS and SetSendGQos.
+// Requires receiver at the remote side and Wireshark with a proper ip.src
+// filter.
+
+#ifdef _SEND_TO_REMOTE_IP_
+const int RemotePort = 12345; // transmit to this UDP port
+const char* RemoteIP = "192.168.200.1"; // transmit to this IP address
+#endif
+
+#ifdef MAC_IPHONE
+#define SLEEP_IF_IPHONE(x) SLEEP(x)
+extern char micFile[256];
+#else
+#define SLEEP_IF_IPHONE(x)
+extern const char* micFile;
+#endif
+
+#ifdef ANDROID
+// Global pointers
+extern void* globalJavaVM;
+extern void* globalContext;
+#endif
+
+extern char* GetFilename(char* filename);
+extern const char* GetFilename(const char* filename);
+extern int GetResource(char* resource, char* dest, int destLen);
+extern char* GetResource(char* resource);
+extern const char* GetResource(const char* resource);
+
+// ----------------------------------------------------------------------------
+//  External transport (Transport) implementations:
+// ----------------------------------------------------------------------------
+
+ExtendedTestTransport::ExtendedTestTransport(VoENetwork* ptr) :
+    myNetw(ptr),
+    _thread(NULL),
+    _lock(NULL),
+    _event(NULL),
+    _length(0),
+    _channel(0)
+{
+    const char* threadName = "voe_extended_test_external_thread";
+    _lock = CriticalSectionWrapper::CreateCriticalSection();
+    _event = EventWrapper::Create();
+    _thread = ThreadWrapper::CreateThread(Run, this, kHighPriority, threadName);
+    if (_thread)
+    {
+        unsigned int id;
+        _thread->Start(id);
+    }
+}
+
+ExtendedTestTransport::~ExtendedTestTransport()
+{
+    if (_thread)
+    {
+        _thread->SetNotAlive();
+        _event->Set();
+        if (_thread->Stop())
+        {
+            delete _thread;
+            _thread = NULL;
+            delete _event;
+            _event = NULL;
+            delete _lock;
+            _lock = NULL;
+        }
+    }
+}
+
+bool ExtendedTestTransport::Run(void* ptr)
+{
+    return static_cast<ExtendedTestTransport*> (ptr)->Process();
+}
+
+bool ExtendedTestTransport::Process()
+{
+    switch (_event->Wait(500))
+    {
+        case kEventSignaled:
+            _lock->Enter();
+            myNetw->ReceivedRTPPacket(_channel, _packetBuffer, _length);
+            _lock->Leave();
+            return true;
+        case kEventTimeout:
+            return true;
+        case kEventError:
+            break;
+    }
+    return true;
+}
+
+int ExtendedTestTransport::SendPacket(int channel, const void *data, int len)
+{
+    _lock->Enter();
+    if (len < 1612)
+    {
+        memcpy(_packetBuffer, (const unsigned char*) data, len);
+        _length = len;
+        _channel = channel;
+    }
+    _lock->Leave();
+    _event->Set(); // triggers ReceivedRTPPacket() from worker thread
+    return len;
+}
+
+int ExtendedTestTransport::SendRTCPPacket(int channel, const void *data,
+                                          int len)
+{
+    myNetw->ReceivedRTCPPacket(channel, data, len);
+    return len;
+}
+
+XTransport::XTransport(VoENetwork* netw, VoEFile* file) :
+    _netw(netw), _file(file)
+{
+}
+
+int XTransport::SendPacket(int channel, const void *data, int len)
+{
+    // loopback
+    // _netw->ReceivedRTPPacket(channel, data, len);
+
+    return 0;
+}
+
+int XTransport::SendRTCPPacket(int, const void *, int)
+{
+    return 0;
+}
+
+// ----------------------------------------------------------------------------
+//  VoERTPObserver
+// ----------------------------------------------------------------------------
+
+XRTPObserver::XRTPObserver() :
+    _SSRC(0)
+{
+}
+
+XRTPObserver::~XRTPObserver()
+{
+}
+
+void XRTPObserver::OnIncomingCSRCChanged(
+    const int /*channel*/,
+    const unsigned int /*CSRC*/, const bool /*added*/)
+{
+}
+
+void XRTPObserver::OnIncomingSSRCChanged(const int /*channel*/,
+                                         const unsigned int SSRC)
+{
+    // char msg[128];
+    // sprintf(msg, "OnIncomingSSRCChanged(channel=%d, SSRC=%lu)\n",
+    //        channel, SSRC);
+    // TEST_LOG(msg);
+
+    _SSRC = SSRC; // skip channel dependency for simplicty
+
+}
+
+// ----------------------------------------------------------------------------
+//  VoEExtendedTest
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::PrepareTest(const char* str) const
+{
+    TEST_LOG("\n\n================================================\n");
+    TEST_LOG("\tExtended *%s* Test\n", str);
+    TEST_LOG("================================================\n\n");
+
+    return 0;
+}
+
+int VoEExtendedTest::TestPassed(const char* str) const
+{
+    TEST_LOG("\n\n------------------------------------------------\n");
+    TEST_LOG("\tExtended *%s* test passed!\n", str);
+    TEST_LOG("------------------------------------------------\n\n");
+
+    return 0;
+}
+
+void VoEExtendedTest::OnPeriodicDeadOrAlive(const int /*channel*/,
+                                            const bool alive)
+{
+    _alive = alive;
+    if (alive)
+    {
+        TEST_LOG("=> ALIVE ");
+    } else
+    {
+        TEST_LOG("=> DEAD ");
+    }
+    fflush(NULL);
+}
+
+void VoEExtendedTest::CallbackOnError(const int errCode, int)
+{
+    _errCode = errCode;
+    TEST_LOG("\n************************\n");
+    TEST_LOG(" RUNTIME ERROR: %d \n", errCode);
+    TEST_LOG("************************\n");
+}
+
+VoEExtendedTest::VoEExtendedTest(VoETestManager& mgr) :
+    _mgr(mgr)
+{
+    for (int i = 0; i < 32; i++)
+    {
+        _listening[i] = false;
+        _playing[i] = false;
+        _sending[i] = false;
+    }
+}
+
+VoEExtendedTest::~VoEExtendedTest()
+{
+}
+
+void VoEExtendedTest::StartMedia(int channel, int rtpPort, bool listen,
+                                 bool playout, bool send)
+{
+    VoEBase* base = _mgr.BasePtr();
+
+    _listening[channel] = false;
+    _playing[channel] = false;
+    _sending[channel] = false;
+
+    base->SetLocalReceiver(channel, rtpPort);
+    base->SetSendDestination(channel, rtpPort, "127.0.0.1");
+    if (listen)
+    {
+        _listening[channel] = true;
+        base->StartReceive(channel);
+    }
+    if (playout)
+    {
+        _playing[channel] = true;
+        base->StartPlayout(channel);
+    }
+    if (send)
+    {
+        _sending[channel] = true;
+        base->StartSend(channel);
+    }
+}
+
+void VoEExtendedTest::StopMedia(int channel)
+{
+    VoEBase* base = _mgr.BasePtr();
+
+    if (_listening[channel])
+    {
+        _listening[channel] = false;
+        base->StopReceive(channel);
+    }
+    if (_playing[channel])
+    {
+        _playing[channel] = false;
+        base->StopPlayout(channel);
+    }
+    if (_sending[channel])
+    {
+        _sending[channel] = false;
+        base->StopSend(channel);
+    }
+}
+
+void VoEExtendedTest::Play(int channel,
+                           unsigned int timeMillisec,
+                           bool addFileAsMicrophone,
+                           bool addTimeMarker)
+{
+    VoEBase* base = _mgr.BasePtr();
+    VoEFile* file = _mgr.FilePtr();
+
+    base->StartPlayout(channel);
+    TEST_LOG("[playing]");
+    fflush(NULL);
+    if (addFileAsMicrophone)
+    {
+        file->StartPlayingFileAsMicrophone(channel, micFile, true, true);
+        TEST_LOG("[file as mic]");
+        fflush(NULL);
+    }
+    if (addTimeMarker)
+    {
+        float dtSec = (float) ((float) timeMillisec / 1000.0);
+        TEST_LOG("[dT=%.1f]", dtSec);
+        fflush(NULL); // print sleep time in seconds
+    }
+    SLEEP(timeMillisec);
+    base->StopPlayout(channel);
+    file->StopPlayingFileAsMicrophone(channel);
+}
+
+void VoEExtendedTest::Sleep(unsigned int timeMillisec, bool addMarker)
+{
+    if (addMarker)
+    {
+        float dtSec = (float) ((float) timeMillisec / 1000.0);
+        TEST_LOG("[dT=%.1f]", dtSec); // print sleep time in seconds
+    }
+    ::Sleep(timeMillisec);
+}
+
+int VoEExtendedTest::TestBase()
+{
+#ifndef _WIN32
+    // Sleep a bit instead when pause not supported
+#undef PAUSE
+#define PAUSE SLEEP(2000);
+#endif
+
+    PrepareTest("Base");
+
+    VoEBase* base = _mgr.BasePtr();
+    VoENetwork* netw = _mgr.NetworkPtr();
+#ifdef _TEST_RTP_RTCP_
+    VoERTP_RTCP* rtp = _mgr.RTP_RTCPPtr();
+#endif
+
+    //////////////////////////
+    // SetTraceFileName
+
+#ifdef _USE_EXTENDED_TRACE_
+    TEST(SetTraceFileName - SetDebugTraceFileName); ANL();
+
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(NULL)); MARK();
+    // don't use these files
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(GetFilename(""
+        "VoEBase_trace_dont_use.txt"))); MARK();
+    // use these instead
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(GetFilename(""
+        "VoEBase_trace.txt"))); MARK();
+    TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStream |
+                                              kTraceStateInfo |
+                                              kTraceWarning |
+                                              kTraceError |
+                                              kTraceCritical |
+                                              kTraceApiCall |
+                                              kTraceMemory |
+                                              kTraceInfo)); MARK();
+
+    ANL(); AOK(); ANL(); ANL();
+#endif
+
+    ///////////////////////////////////////
+    // RegisterVoiceEngineObserver
+    // DeRegisterVoiceEngineObserver
+
+    TEST(SetObserver);
+    ANL();
+
+    TEST_MUSTPASS(base->RegisterVoiceEngineObserver(*this));
+    MARK();
+    SLEEP(100);
+    TEST_MUSTPASS(base->DeRegisterVoiceEngineObserver());
+    MARK();
+
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    /////////////////////
+    // GetVersion
+
+    TEST(GetVersion);
+    ANL();
+
+    char version[1024];
+    // audio device module and AudioProcessing fail to getversion when they
+    // are not initiliazed
+    TEST_MUSTPASS(base->GetVersion(version));
+    MARK();
+    TEST_LOG("\n-----\n%s\n-----\n", version);
+
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    ///////////////
+    // Init
+
+    TEST(Init);
+    ANL();
+
+    TEST_MUSTPASS(base->Init());
+    MARK();
+    TEST_MUSTPASS(base->Terminate());
+
+    TEST_MUSTPASS(base->Init());
+    MARK();
+    // ensure that no new memory is allocated at the second call (check
+    // trace file)
+    TEST_MUSTPASS(base->Init());
+    MARK();
+    TEST_MUSTPASS(base->Terminate());
+#if (!defined(MAC_IPHONE) && !defined(ANDROID))
+    // verify AEC recording
+    TEST_MUSTPASS(base->Init());
+    MARK(); // verify output dat-files
+    TEST_MUSTPASS(base->Terminate());
+#endif
+
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    ////////////////////
+    // Terminate
+
+    TEST(Terminate);
+    ANL();
+    TEST_MUSTPASS(base->Terminate());
+    MARK(); // should be ignored
+    TEST_MUSTPASS(base->Init());
+    TEST_MUSTPASS(base->Terminate());
+    MARK(); // should terminate
+
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    ///////////////////////////
+    // MaxNumOfChannels
+
+    TEST(MaxNumOfChannels);
+    ANL();
+    TEST_MUSTPASS(base->MaxNumOfChannels() < 0);
+    MARK();
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    ////////////////////////
+    // CreateChannel
+    // DeleteChannel
+
+    int i;
+    int channel;
+    int nChannels(base->MaxNumOfChannels());
+
+    TEST(CreateChannel);
+    ANL();
+    TEST(DeleteChannel);
+    ANL();
+
+    TEST_MUSTPASS(base->Init());
+
+    channel = base->CreateChannel();
+    MARK();
+    TEST_MUSTPASS(channel != 0);
+    channel = base->CreateChannel();
+    MARK();
+    TEST_MUSTPASS(channel != 1);
+
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    MARK();
+    TEST_MUSTPASS(base->DeleteChannel(1));
+    MARK();
+
+    // create and delete one channel many times
+    for (i = 0; i < 10; i++)
+    {
+        channel = base->CreateChannel();
+        MARK();
+        TEST_MUSTPASS(channel != 0); // should be 0 each time
+        TEST_MUSTPASS(base->DeleteChannel(channel));
+        MARK();
+    }
+    // create max number of channels
+    for (i = 0; i < nChannels; i++)
+    {
+        channel = base->CreateChannel();
+        MARK();
+        TEST_MUSTPASS(channel != i);
+    }
+    channel = base->CreateChannel();
+    MARK(); // should fail since no more channels can now be created
+    TEST_MUSTPASS(channel != -1);
+
+    int aChannel = (((nChannels - 17) > 0) ? (nChannels - 17) : 0);
+    TEST_MUSTPASS(base->DeleteChannel(aChannel));
+    MARK();
+    channel = base->CreateChannel();
+    MARK(); // should reuse channel
+    TEST_MUSTPASS(channel != aChannel);
+
+    // delete all created channels
+    for (i = 0; i < nChannels; i++)
+    {
+        TEST_MUSTPASS(base->DeleteChannel(i));
+        MARK();
+    }
+
+    // try to delete a non-existing channel
+    TEST_MUSTPASS(-1 != base->DeleteChannel(aChannel));
+    MARK();
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    // ------------------------------------------------------------------------
+    // >> SetLocalReceiver
+    //
+    // State: VE not initialized, no existing channels
+
+    TEST_MUSTPASS(base->Init());
+
+    int ch;
+
+    TEST(SetLocalReceiver);
+    ANL();
+
+    // no channel created yet => should fail
+    TEST_MUSTPASS(!base->SetLocalReceiver(0, 100));
+    MARK();
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+    ch = base->CreateChannel();
+
+#ifdef MAC_IPHONE
+    printf("\nNOTE: Local IP must be set in source code (line %d) \n",
+           __LINE__ + 1);
+    char* localIp = "127.0.0.1";
+#else
+    char localIp[64] = { 0 };
+    TEST_MUSTPASS(netw->GetLocalIP(localIp));
+    MARK();
+    // NOTE: This API is supported on Win, Mac and Linux and may fail or not
+    // return local IP for other platforms.
+#endif
+
+    // trivial invalid function calls
+    TEST_MUSTPASS(!base->SetLocalReceiver(ch+1, 12345));
+    MARK();
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+    TEST_MUSTPASS(!base->SetLocalReceiver(ch, -1));
+    MARK();
+    TEST_ERROR(VE_INVALID_PORT_NMBR);
+
+    // check conflict with ongoing receiving
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 12345));
+    MARK();
+    TEST_MUSTPASS(base->StartReceive(ch));
+    TEST_MUSTPASS(!base->SetLocalReceiver(ch, 12345));
+    MARK();
+    TEST_ERROR(VE_ALREADY_LISTENING);
+    TEST_MUSTPASS(base->StopReceive(ch));
+
+    // check conflict with ongoing transmission
+    TEST_MUSTPASS(base->SetSendDestination(ch, 12345, "127.0.0.1"));
+    TEST_MUSTPASS(base->StartSend(ch));
+    TEST_MUSTPASS(!base->SetLocalReceiver(ch, 12345));
+    MARK();
+    TEST_ERROR(VE_ALREADY_SENDING);
+    TEST_MUSTPASS(base->StopSend(ch));
+
+    // valid function calls
+    // Need to sleep between, otherwise it may fail for unknown reason
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 12345));
+    MARK();
+    SLEEP(100);
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 12345));
+    MARK();
+    SLEEP(100);
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 12345, kVoEDefault, localIp));
+    MARK();
+    SLEEP(100);
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 12345, kVoEDefault, NULL,
+                                         "230.1.2.3"));
+    MARK();
+    SLEEP(100);
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 12345, kVoEDefault, localIp,
+                                         "230.1.2.3"));
+    MARK();
+    SLEEP(100);
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 12345, 5555, NULL));
+    MARK();
+    SLEEP(100);
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 12345));
+    MARK();
+    SLEEP(100);
+
+    // STATE: no media but sockets exists and are binded to 12345 and 12346
+    // respectively
+
+    // Add some dynamic tests as well:
+
+    // ensure that last setting is used (cancels old settings)
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 12345));
+    MARK();
+    SLEEP(100);
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 44444));
+    MARK();
+    SLEEP(100);
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 54321));
+    MARK();
+    TEST_MUSTPASS(base->SetSendDestination(ch, 54321, "127.0.0.1"));
+    TEST_MUSTPASS(base->StartReceive(ch));
+    TEST_MUSTPASS(base->StartSend(ch));
+    Play(ch, 1000, true, true);
+    TEST_MUSTPASS(base->StopSend(ch));
+    TEST_MUSTPASS(base->StopReceive(ch));
+
+    TEST_MUSTPASS(base->DeleteChannel(ch));
+
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    // >> end of SetLocalReceiver
+    // ------------------------------------------------------------------------
+
+    // ------------------------------------------------------------------------
+    // >> GetLocalReceiver
+    //
+    // State: VE initialized, no existing channels
+
+    TEST(GetLocalReceiver);
+    ANL();
+
+    int port;
+    char ipaddr[64];
+    int RTCPport;
+
+    ch = base->CreateChannel();
+
+    // verify non-configured (blank) local receiver
+    TEST_MUSTPASS(base->GetLocalReceiver(ch, port, RTCPport, ipaddr));
+    MARK();
+    TEST_MUSTPASS(port != 0);
+    TEST_MUSTPASS(RTCPport != 0);
+    TEST_MUSTPASS(strcmp(ipaddr, "") != 0);
+
+    // check some trivial set/get combinations
+
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 12345))
+    TEST_MUSTPASS(base->GetLocalReceiver(ch, port, RTCPport, ipaddr));
+    MARK();
+    TEST_MUSTPASS(port != 12345);
+    TEST_MUSTPASS(RTCPport != 12346);
+    TEST_MUSTPASS(strcmp(ipaddr, "0.0.0.0") != 0); // now binded to "any" IP
+
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 12345, 55555))
+    TEST_MUSTPASS(base->GetLocalReceiver(ch, port, RTCPport, ipaddr));
+    MARK();
+    TEST_MUSTPASS(port != 12345);
+    TEST_MUSTPASS(RTCPport != 55555);
+    TEST_MUSTPASS(strcmp(ipaddr, "0.0.0.0") != 0);
+
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 12345, kVoEDefault, localIp))
+    TEST_MUSTPASS(base->GetLocalReceiver(ch, port, RTCPport, ipaddr));
+    MARK();
+    TEST_MUSTPASS(port != 12345);
+    TEST_MUSTPASS(RTCPport != 12346);
+    TEST_MUSTPASS(strcmp(ipaddr, localIp) != 0);
+
+    TEST_MUSTPASS(base->DeleteChannel(ch));
+
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    // >> end of GetLocalReceiver
+    // ------------------------------------------------------------------------
+
+    // ------------------------------------------------------------------------
+    // >> SetSendDestination
+    //
+    // State: VE initialized, no existing channels
+
+    TEST(SetSendDestination);
+    ANL();
+
+    // call without existing channel
+    TEST_MUSTPASS(!base->SetSendDestination(0, 12345, "127.0.0.1"));
+    MARK();
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+    ch = base->CreateChannel();
+
+    // trivial fail tests
+    TEST_MUSTPASS(!base->SetSendDestination(ch, 65536, "127.0.0.1"));
+    MARK();
+    TEST_ERROR(VE_INVALID_PORT_NMBR); // invalid RTP port
+    TEST_MUSTPASS(!base->SetSendDestination(ch, 12345, "127.0.0.1", 65536));
+    MARK();
+    TEST_ERROR(VE_INVALID_PORT_NMBR); // invalid source port
+    TEST_MUSTPASS(!base->SetSendDestination(ch, 12345, "127.0.0.1", kVoEDefault,
+                                            65536));
+    MARK();
+    TEST_ERROR(VE_INVALID_PORT_NMBR); // invalid RTCP port
+    TEST_MUSTPASS(!base->SetSendDestination(ch, 12345, "127.0.0.300"));
+    MARK();
+    TEST_ERROR(VE_INVALID_IP_ADDRESS); // invalid IP address
+
+    // sockets must be created first to support multi-cast (not required
+    // otherwise)
+    TEST_MUSTPASS(!base->SetSendDestination(ch, 55555, "230.0.0.1"));
+    MARK();
+    TEST_ERROR(VE_SOCKET_ERROR);
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 55555)); // create sockets
+    TEST_MUSTPASS(base->SetSendDestination(ch, 55555, "230.0.0.1"));
+    MARK(); // should work now
+
+    base->DeleteChannel(0);
+    ch = base->CreateChannel();
+
+    // STATE: one channel created, no sockets exist
+
+    // valid function calls
+    TEST_MUSTPASS(base->SetSendDestination(ch, 33333, "127.0.0.1"));
+    MARK();
+    TEST_MUSTPASS(base->SetSendDestination(ch, 33333, "127.0.0.1", 44444));
+    MARK();
+    TEST_MUSTPASS(base->SetSendDestination(ch, 33333, "127.0.0.1", kVoEDefault,
+                                           55555));
+    MARK();
+    TEST_MUSTPASS(base->SetSendDestination(ch, 33333, "127.0.0.1", 44444,
+                                           55555));
+    MARK();
+
+    base->DeleteChannel(0);
+    ch = base->CreateChannel();
+
+    // create receive sockets first and then an extra pair of send sockets
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 44444));
+    TEST_MUSTPASS(base->SetSendDestination(ch, 44444, "127.0.0.1", 11111));
+    MARK(); // binds to 11111
+
+    TEST_MUSTPASS(base->DeleteChannel(ch));
+
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    // >> end of SetSendDestination
+    // ------------------------------------------------------------------------
+
+    // ------------------------------------------------------------------------
+    // >> GetSendDestination
+    //
+    // State: VE initialized, no existing channels
+
+    TEST(GetSendDestination);
+    ANL();
+
+    int sourcePort;
+
+    ch = base->CreateChannel();
+
+    // verify non-configured (blank) local receiver
+    TEST_MUSTPASS(base->GetSendDestination(ch, port, ipaddr, sourcePort,
+                                           RTCPport));
+    MARK();
+    TEST_MUSTPASS(port != 0);
+    TEST_MUSTPASS(sourcePort != 0);
+    TEST_MUSTPASS(RTCPport != 0);
+    TEST_MUSTPASS(strcmp(ipaddr, "") != 0);
+
+    // check some trivial set/get combinations
+
+    TEST_MUSTPASS(base->SetSendDestination(ch, 44444, "127.0.0.1"));
+    TEST_MUSTPASS(base->GetSendDestination(ch, port, ipaddr, sourcePort,
+                                           RTCPport));
+    MARK();
+    TEST_MUSTPASS(port != 44444);
+    TEST_MUSTPASS(sourcePort != 0); // should be 0 since no local receiver has
+                                    // NOT been defined yet
+    TEST_MUSTPASS(RTCPport != 44445);
+    TEST_MUSTPASS(strcmp(ipaddr, "127.0.0.1") != 0);
+
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 55555));
+    TEST_MUSTPASS(base->GetSendDestination(ch, port, ipaddr, sourcePort,
+                                           RTCPport));
+    MARK();
+    TEST_MUSTPASS(port != 44444);
+    TEST_MUSTPASS(sourcePort != 55555); // should be equal to local port
+    TEST_MUSTPASS(RTCPport != 44445);
+    TEST_MUSTPASS(strcmp(ipaddr, "127.0.0.1") != 0);
+
+    base->DeleteChannel(0);
+    ch = base->CreateChannel();
+
+    TEST_MUSTPASS(base->SetSendDestination(ch, 44444, "127.0.0.1"));
+    // NULL as IP-address input should work as well
+    TEST_MUSTPASS(base->GetSendDestination(ch, port, NULL, sourcePort,
+                                           RTCPport));
+    MARK();
+    TEST_MUSTPASS(port != 44444);
+    TEST_MUSTPASS(sourcePort != 0);
+    TEST_MUSTPASS(RTCPport != 44445);
+
+    TEST_MUSTPASS(base->DeleteChannel(ch));
+
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    // >> end of GetLocalReceiver
+    // ------------------------------------------------------------------------
+
+    // ------------------------------------------------------------------------
+    // >> StartReceive
+    // >> StopReceive
+    //
+    // State: VE initialized, no existing channels
+
+    TEST(StartReceive);
+    ANL();
+    TEST(StopReceive);
+    ANL();
+
+    // call without existing channel
+    TEST_MUSTPASS(!base->StartReceive(0));
+    MARK();
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+    TEST_MUSTPASS(!base->StopReceive(0));
+    MARK();
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+    ch = base->CreateChannel();
+
+    // sockets must be created first
+    TEST_MUSTPASS(!base->StartReceive(0));
+    MARK();
+    TEST_ERROR(VE_SOCKETS_NOT_INITED);
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 55555));
+    TEST_MUSTPASS(base->StartReceive(0));
+    MARK(); // should work this time
+
+    // enable again (should work)
+    TEST_MUSTPASS(base->StartReceive(0));
+    MARK();
+
+    // Stop/Start (should work)
+    TEST_MUSTPASS(base->StopReceive(0));
+    MARK();
+    TEST_MUSTPASS(base->StartReceive(0));
+    MARK();
+
+    // Verify in loopback
+    TEST_MUSTPASS(base->SetSendDestination(ch, 55555, "127.0.0.1"));
+    TEST_MUSTPASS(base->StartSend(ch));
+    Play(ch, 1000, true, true);
+    TEST_MUSTPASS(base->StopSend(ch));
+    TEST_MUSTPASS(base->StopReceive(0));
+    MARK();
+
+    base->DeleteChannel(0);
+    ch = base->CreateChannel();
+
+    // Ensure that it is OK to add delay between SetLocalReceiver and StarListen
+    TEST_LOG("\nspeak after 2 seconds and ensure that no delay is added:\n");
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 55555));
+
+    Sleep(2000, true); // adding emulated delay here
+
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->SetSendDestination(ch, 55555, "127.0.0.1"));
+    TEST_MUSTPASS(base->StartSend(ch));
+    Play(ch, 2000, true, true);
+    TEST_MUSTPASS(base->StopSend(ch));
+    TEST_MUSTPASS(base->StopReceive(0));
+
+    TEST_MUSTPASS(base->DeleteChannel(ch));
+    ANL();
+
+    // Multi-channel tests
+
+    for (i = 0; i < base->MaxNumOfChannels(); i++)
+    {
+        ch = base->CreateChannel();
+        TEST_MUSTPASS(base->SetLocalReceiver(ch, 11111+2*i));
+        TEST_MUSTPASS(base->StartReceive(ch));
+        MARK();
+    }
+    for (i = 0; i < base->MaxNumOfChannels(); i++)
+    {
+        TEST_MUSTPASS(base->StopReceive(i));
+        MARK();
+        base->DeleteChannel(i);
+    }
+    for (i = 0; i < base->MaxNumOfChannels(); i++)
+    {
+        ch = base->CreateChannel();
+        TEST_MUSTPASS(base->SetLocalReceiver(ch, 11111+2*i));
+        TEST_MUSTPASS(base->StartReceive(ch));
+        MARK();
+        TEST_MUSTPASS(base->StopReceive(ch));
+        MARK();
+        base->DeleteChannel(ch);
+    }
+
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    // >> end of StartReceive/StopReceive
+    // ------------------------------------------------------------------------
+
+    // ------------------------------------------------------------------------
+    // >> StartPlayout
+    // >> StopPlayout
+    //
+    // State: VE initialized, no existing channels
+
+    TEST(StartPlayout);
+    ANL();
+    TEST(StopPlayout);
+    ANL();
+
+    // call without existing channel
+    TEST_MUSTPASS(!base->StartPlayout(0));
+    MARK();
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+    TEST_MUSTPASS(!base->StopPlayout(0));
+    MARK();
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+    ch = base->CreateChannel();
+
+    TEST_MUSTPASS(base->StartPlayout(ch));
+    MARK();
+    TEST_MUSTPASS(base->StartPlayout(ch));
+    MARK();
+    TEST_MUSTPASS(base->StopPlayout(ch));
+    MARK();
+    TEST_MUSTPASS(base->StopPlayout(ch));
+    MARK();
+
+    base->DeleteChannel(ch);
+
+    // Multi-channel tests
+    const int MaxNumberOfPlayingChannels(kVoiceEngineMaxNumOfActiveChannels);
+
+    for (i = 0; i < MaxNumberOfPlayingChannels; i++)
+    {
+        ch = base->CreateChannel();
+        TEST_MUSTPASS(base->StartPlayout(ch));
+        MARK();
+    }
+    for (i = 0; i < MaxNumberOfPlayingChannels; i++)
+    {
+        TEST_MUSTPASS(base->StopPlayout(i));
+        MARK();
+        base->DeleteChannel(i);
+    }
+    for (i = 0; i < MaxNumberOfPlayingChannels; i++)
+    {
+        ch = base->CreateChannel();
+        TEST_MUSTPASS(base->StartPlayout(ch));
+        MARK();
+        TEST_MUSTPASS(base->StopPlayout(ch));
+        MARK();
+        base->DeleteChannel(ch);
+    }
+
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    // >> end of StartPlayout/StopPlayout
+    // ------------------------------------------------------------------------
+
+    // ------------------------------------------------------------------------
+    // >> StartSend
+    // >> StopSend
+    //
+    // State: VE initialized, no existing channels
+
+    TEST(StartSend);
+    ANL();
+    TEST(StopSend);
+    ANL();
+
+    // call without existing channel
+    TEST_MUSTPASS(!base->StartSend(0));
+    MARK();
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+    TEST_MUSTPASS(!base->StopSend(0));
+    MARK();
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+    ch = base->CreateChannel();
+
+    // call without initialized destination
+    TEST_MUSTPASS(!base->StartSend(ch));
+    MARK();
+    TEST_ERROR(VE_DESTINATION_NOT_INITED);
+
+    // initialize destination and try again (should work even without existing
+    // sockets)
+    TEST_MUSTPASS(base->SetSendDestination(ch, 33333, "127.0.0.1"));
+    TEST_MUSTPASS(base->StartSend(ch));
+    MARK();
+    SLEEP(100);
+
+    // STATE: sockets should now have been created automatically at the first
+    // transmitted packet should be binded to 33333 and "0.0.0.0"
+
+    TEST_MUSTPASS(base->StopSend(ch));
+    MARK();
+
+    base->DeleteChannel(ch);
+    ch = base->CreateChannel();
+
+    // try loopback with unique send sockets (closed when channel is deleted or
+    // new source is set)
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 33333));
+    TEST_MUSTPASS(base->SetSendDestination(ch, 33333, "127.0.0.1", 44444));
+    TEST_MUSTPASS(base->StartSend(ch));
+    MARK();
+    TEST_MUSTPASS(base->StartReceive(ch));
+    Play(ch, 2000, true, true);
+    TEST_MUSTPASS(base->StopSend(ch));
+    MARK();
+    TEST_MUSTPASS(base->StopReceive(ch));
+
+    base->DeleteChannel(ch);
+    ANL();
+
+    // Multi-channel tests
+    for (i = 0; i < base->MaxNumOfChannels(); i++)
+    {
+        ch = base->CreateChannel();
+        TEST_MUSTPASS(base->SetLocalReceiver(ch, 33333 + 2*i));
+        TEST_MUSTPASS(base->SetSendDestination(ch, 33333 + 2*i, "127.0.0.1"));
+        TEST_MUSTPASS(base->StartSend(ch));
+        MARK();
+    }
+    for (i = 0; i < base->MaxNumOfChannels(); i++)
+    {
+        TEST_MUSTPASS(base->StopSend(i));
+        MARK();
+        base->DeleteChannel(i);
+    }
+    for (i = 0; i < base->MaxNumOfChannels(); i++)
+    {
+        ch = base->CreateChannel();
+        TEST_MUSTPASS(base->SetLocalReceiver(ch, 45633 + 2*i));
+        TEST_MUSTPASS(base->SetSendDestination(ch, 45633 + 2*i, "127.0.0.1"));
+        TEST_MUSTPASS(base->StartSend(ch));
+        MARK();
+        TEST_MUSTPASS(base->StopSend(ch));
+        MARK();
+        base->DeleteChannel(ch);
+    }
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    // >> end of StartSend/StopSend
+    // ------------------------------------------------------------------------
+
+    //////////////////////////////
+    // SetNetEQPlayoutMode
+    // GetNetEQPlayoutMode
+
+    TEST(SetNetEQPlayoutMode);
+    ANL();
+    TEST(GetNetEQPlayoutMode);
+    ANL();
+
+    NetEqModes mode;
+
+    ch = base->CreateChannel();
+
+    // invalid function calls (should fail)
+    TEST_MUSTPASS(!base->GetNetEQPlayoutMode(ch+1, mode));
+    MARK();
+    TEST_MUSTPASS(!base->SetNetEQPlayoutMode(ch+1, kNetEqDefault));
+    MARK();
+
+    // verify default mode (should be kNetEqDefault)
+    TEST_MUSTPASS(base->GetNetEQPlayoutMode(ch, mode));
+    MARK();
+    TEST_MUSTPASS(mode != kNetEqDefault);
+    TEST_MUSTPASS(base->SetNetEQPlayoutMode(ch, kNetEqStreaming));
+    MARK();
+    base->DeleteChannel(ch);
+
+    // ensure that default mode is set as soon as new channel is created
+    ch = base->CreateChannel();
+    TEST_MUSTPASS(base->GetNetEQPlayoutMode(ch, mode));
+    MARK();
+    TEST_MUSTPASS(mode != kNetEqDefault);
+    base->DeleteChannel(ch);
+
+    // verify Set/Get for all supported modes and max number of channels
+    for (i = 0; i < base->MaxNumOfChannels(); i++)
+    {
+        ch = base->CreateChannel();
+
+        // verify Set/Get for all supported modes
+        TEST_MUSTPASS(base->SetNetEQPlayoutMode(i, kNetEqDefault));
+        MARK();
+        TEST_MUSTPASS(base->GetNetEQPlayoutMode(i, mode));
+        MARK();
+        TEST_MUSTPASS(mode != kNetEqDefault);
+        TEST_MUSTPASS(base->SetNetEQPlayoutMode(i, kNetEqStreaming));
+        MARK();
+        TEST_MUSTPASS(base->GetNetEQPlayoutMode(i, mode));
+        MARK();
+        TEST_MUSTPASS(mode != kNetEqStreaming);
+        TEST_MUSTPASS(base->SetNetEQPlayoutMode(i, kNetEqFax));
+        MARK();
+        TEST_MUSTPASS(base->GetNetEQPlayoutMode(i, mode));
+        MARK();
+        TEST_MUSTPASS(mode != kNetEqFax);
+        SLEEP(50);
+    }
+
+    for (i = 0; i < base->MaxNumOfChannels(); i++)
+    {
+        base->DeleteChannel(i);
+    }
+
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    //////////////////////////////
+    // SetNetEQBGNMode
+    // GetNetEQBGNMode
+
+    TEST(SetNetEQBGNMode);
+    ANL();
+    TEST(GetNetEQBGNMode);
+    ANL();
+
+    NetEqBgnModes bgnMode;
+
+    ch = base->CreateChannel();
+
+    // invalid function calls (should fail)
+    TEST_MUSTPASS(!base->GetNetEQBGNMode(ch+1, bgnMode));
+    MARK();
+    TEST_MUSTPASS(!base->SetNetEQBGNMode(ch+1, kBgnOn));
+    MARK();
+
+    // verify default mode (should be kBgnOn)
+    TEST_MUSTPASS(base->GetNetEQBGNMode(ch, bgnMode));
+    MARK();
+    TEST_MUSTPASS(bgnMode != kBgnOn);
+    base->DeleteChannel(ch);
+
+    // ensure that default mode is set as soon as new channel is created
+    ch = base->CreateChannel();
+    TEST_MUSTPASS(base->GetNetEQBGNMode(ch, bgnMode));
+    MARK();
+    TEST_MUSTPASS(bgnMode != kBgnOn);
+    base->DeleteChannel(ch);
+
+    // verify Set/Get for all supported modes and max number of channels
+    for (i = 0; i < base->MaxNumOfChannels(); i++)
+    {
+        ch = base->CreateChannel();
+
+        // verify Set/Get for all supported modes
+        TEST_MUSTPASS(base->SetNetEQBGNMode(i, kBgnOn));
+        MARK();
+        TEST_MUSTPASS(base->GetNetEQBGNMode(i, bgnMode));
+        MARK();
+        TEST_MUSTPASS(bgnMode != kBgnOn);
+        TEST_MUSTPASS(base->SetNetEQBGNMode(i, kBgnFade));
+        MARK();
+        TEST_MUSTPASS(base->GetNetEQBGNMode(i, bgnMode));
+        MARK();
+        TEST_MUSTPASS(bgnMode != kBgnFade);
+        TEST_MUSTPASS(base->SetNetEQBGNMode(i, kBgnOff));
+        MARK();
+        TEST_MUSTPASS(base->GetNetEQBGNMode(i, bgnMode));
+        MARK();
+        TEST_MUSTPASS(bgnMode != kBgnOff);
+        SLEEP(50);
+    }
+
+    for (i = 0; i < base->MaxNumOfChannels(); i++)
+    {
+        base->DeleteChannel(i);
+    }
+
+    // Verify real-time performance for all playout modes in full duplex
+
+    ch = base->CreateChannel();
+
+    TEST_MUSTPASS(base->SetLocalReceiver(ch , 12345));
+    TEST_MUSTPASS(base->SetSendDestination(ch, 12345, "127.0.0.1"));
+
+    TEST_MUSTPASS(base->StartReceive(ch));
+    TEST_MUSTPASS(base->StartSend(ch));
+    TEST_MUSTPASS(base->StartPlayout(ch));
+
+    TEST_MUSTPASS(base->SetNetEQPlayoutMode(ch, kNetEqDefault));
+    MARK();
+    TEST_LOG("\nenjoy full duplex using kNetEqDefault playout mode...\n");
+    PAUSE
+
+    TEST_MUSTPASS(base->SetNetEQPlayoutMode(ch, kNetEqStreaming));
+    MARK();
+    TEST_LOG("\nenjoy full duplex using kNetEqStreaming playout mode...\n");
+    PAUSE
+
+    TEST_MUSTPASS(base->SetNetEQPlayoutMode(ch, kNetEqFax));
+    MARK();
+    TEST_LOG("\nenjoy full duplex using kNetEqFax playout mode...\n");
+    PAUSE
+
+    TEST_MUSTPASS(base->StopSend(ch));
+    TEST_MUSTPASS(base->StopPlayout(ch));
+    TEST_MUSTPASS(base->StopReceive(ch));
+
+    base->DeleteChannel(ch);
+
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    /////////////////////
+    // Full duplex tests
+
+    ch = base->CreateChannel(); // We must delete this channel first to be able
+                                // to reuse port 12345
+
+    // start with default case, also test non-default RTCP port
+#ifdef _TEST_RTP_RTCP_
+    TEST_MUSTPASS(rtp->SetRTCP_CNAME(ch, "Johnny"));
+#endif
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 12345, 12349));
+    TEST_MUSTPASS(base->SetSendDestination(ch, 12345, "127.0.0.1", kVoEDefault,
+                                           12349));
+
+    TEST_MUSTPASS(base->StartReceive(ch));
+    TEST_MUSTPASS(base->StartSend(ch));
+    TEST_MUSTPASS(base->StartPlayout(ch));
+
+    TEST_LOG("full duplex is now activated (1)\n");
+    TEST_LOG("waiting for RTCP packet...\n");
+
+    SLEEP(7000); // Make sure we get RTCP packet
+    PAUSE;
+
+    // Verify that we got RTCP packet from correct source port
+#ifdef _TEST_RTP_RTCP_
+    char tmpStr[64] = { 0 };
+    TEST_MUSTPASS(rtp->GetRemoteRTCP_CNAME(ch, tmpStr));
+    TEST_MUSTPASS(_stricmp("Johnny", tmpStr));
+#endif
+    int rtpPort(0), rtcpPort(0);
+    char ipAddr[64] = { 0 };
+    TEST_MUSTPASS(netw->GetSourceInfo(ch, rtpPort, rtcpPort, ipAddr));
+    TEST_MUSTPASS(12349 != rtcpPort);
+    TEST_MUSTPASS(base->StopSend(ch));
+    TEST_MUSTPASS(base->StopPlayout(ch));
+    TEST_MUSTPASS(base->StopReceive(ch));
+
+    // Call StartSend before StartReceive
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 12345));
+    TEST_MUSTPASS(base->SetSendDestination(ch, 12345, "127.0.0.1"));
+
+    TEST_MUSTPASS(base->StartSend(ch));
+    TEST_MUSTPASS(base->StartReceive(ch));
+    TEST_MUSTPASS(base->StartPlayout(ch));
+
+    TEST_LOG("\nfull duplex is now activated (2)\n");
+
+    PAUSE
+
+    TEST_MUSTPASS(base->StopSend(ch));
+    TEST_MUSTPASS(base->StopPlayout(ch));
+    TEST_MUSTPASS(base->StopReceive(ch));
+
+    // Try again using same ports
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 12345));
+    TEST_MUSTPASS(base->SetSendDestination(ch, 12345, "127.0.0.1"));
+
+    TEST_MUSTPASS(base->StartSend(ch));
+    TEST_MUSTPASS(base->StartReceive(ch));
+    TEST_MUSTPASS(base->StartPlayout(ch));
+
+    TEST_LOG("\nfull duplex is now activated (3)\n");
+    TEST_LOG("waiting for RTCP packet...\n");
+
+    SLEEP(7000); // Make sure we get RTCP packet
+    PAUSE
+
+    // Verify correct RTCP source port
+    TEST_MUSTPASS(netw->GetSourceInfo(ch, rtpPort, rtcpPort, ipAddr));
+    TEST_MUSTPASS(12345+1 != rtcpPort);
+    TEST_MUSTPASS(base->StopSend(ch));
+    TEST_MUSTPASS(base->StopPlayout(ch));
+    TEST_MUSTPASS(base->StopReceive(ch));
+
+    base->DeleteChannel(ch);
+    ch = base->CreateChannel();
+
+    // Try with extra send socket
+    TEST_MUSTPASS(base->SetLocalReceiver(ch , 22222));
+    TEST_MUSTPASS(base->SetSendDestination(ch, 22222, "127.0.0.1", 11111));
+
+    TEST_MUSTPASS(base->StartReceive(ch));
+    TEST_MUSTPASS(base->StartSend(ch));
+    TEST_MUSTPASS(base->StartPlayout(ch));
+
+    TEST_LOG("\nfull duplex is now activated (4)\n");
+
+    PAUSE
+
+    TEST_MUSTPASS(base->StopSend(ch));
+    TEST_MUSTPASS(base->StopPlayout(ch));
+    TEST_MUSTPASS(base->StopReceive(ch));
+
+    // repeat default case starting with a fresh channel
+
+    base->DeleteChannel(ch);
+    ch = base->CreateChannel();
+    TEST_MUSTPASS(base->SetLocalReceiver(ch , 12345));
+    TEST_MUSTPASS(base->SetSendDestination(ch, 12345, "127.0.0.1"));
+
+    TEST_MUSTPASS(base->StartReceive(ch));
+    TEST_MUSTPASS(base->StartSend(ch));
+    TEST_MUSTPASS(base->StartPlayout(ch));
+
+    TEST_LOG("\nfull duplex is now activated (5)\n");
+
+    PAUSE
+
+    TEST_MUSTPASS(base->StopSend(ch));
+    TEST_MUSTPASS(base->StopPlayout(ch));
+    TEST_MUSTPASS(base->StopReceive(ch));
+
+    // restart call again
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 12345));
+    TEST_MUSTPASS(base->StartReceive(ch));
+    TEST_MUSTPASS(base->StartPlayout(ch));
+    TEST_MUSTPASS(base->StartSend(ch));
+
+    TEST_LOG("\nfull duplex is now activated (6)\n");
+
+    PAUSE
+
+    TEST_MUSTPASS(base->StopSend(ch));
+    TEST_MUSTPASS(base->StopPlayout(ch));
+    TEST_MUSTPASS(base->StopReceive(ch));
+
+    // force sending from new socket
+    TEST_MUSTPASS(base->SetLocalReceiver(ch , 12345));
+    TEST_MUSTPASS(base->SetSendDestination(ch, 12345, "127.0.0.1", 12350,
+                                           12359));
+    TEST_MUSTPASS(base->StartReceive(ch));
+    TEST_MUSTPASS(base->StartPlayout(ch));
+    TEST_MUSTPASS(base->StartSend(ch));
+    TEST_LOG("\nfull duplex is now activated (7)\n");
+
+    PAUSE
+
+    // Test getting send settings
+    TEST_MUSTPASS(base->GetSendDestination(ch, rtpPort, ipAddr, sourcePort,
+                                           rtcpPort));
+    TEST_MUSTPASS(12345 != rtpPort);
+    TEST_MUSTPASS(_stricmp("127.0.0.1", ipAddr));
+    TEST_MUSTPASS(12350 != sourcePort);
+    TEST_MUSTPASS(12359 != rtcpPort);
+
+    TEST_MUSTPASS(base->StopSend(ch));
+    TEST_MUSTPASS(base->StopPlayout(ch));
+    TEST_MUSTPASS(base->StopReceive(ch));
+
+    // new channel and new port
+    ch = base->CreateChannel();
+
+    TEST_MUSTPASS(base->SetLocalReceiver(ch , 33221));
+    TEST_MUSTPASS(base->SetSendDestination(ch, 33221, "127.0.0.1"));
+
+    TEST_MUSTPASS(base->StartReceive(ch));
+    TEST_MUSTPASS(base->StartPlayout(ch));
+    TEST_MUSTPASS(base->StartSend(ch));
+
+    TEST_LOG("\nfull duplex is now activated (8)\n");
+
+    PAUSE
+
+    TEST_MUSTPASS(base->StopSend(ch));
+    TEST_MUSTPASS(base->StopPlayout(ch));
+    TEST_MUSTPASS(base->StopReceive(ch));
+
+    base->DeleteChannel(ch);
+    ch = base->CreateChannel();
+
+#ifndef MAC_IPHONE
+    // bind to local IP and try again
+    strcpy(localIp, "127.0.0.1");
+#else
+    localIp = "127.0.0.1";
+#endif
+
+    TEST_MUSTPASS(base->SetLocalReceiver(ch, 33221, 12349, localIp));
+    TEST_MUSTPASS(base->SetSendDestination(ch, 33221, localIp));
+
+    TEST_MUSTPASS(base->StartReceive(ch));
+    TEST_MUSTPASS(base->StartPlayout(ch));
+    TEST_MUSTPASS(base->StartSend(ch));
+
+    TEST_LOG("\nfull duplex is now activated (9)\n");
+
+    PAUSE
+
+    TEST_MUSTPASS(base->GetLocalReceiver(ch, rtpPort, rtcpPort, ipAddr));
+    TEST_MUSTPASS(33221 != rtpPort);
+    TEST_MUSTPASS(_stricmp(localIp, ipAddr));
+    TEST_MUSTPASS(12349 != rtcpPort);
+
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    //////////////////////
+    // Trace filter tests
+
+#ifdef _USE_EXTENDED_TRACE_
+    TEST(SetTraceFilter); ANL();
+
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(GetFilename(""
+        "VoEBase_trace_filter.txt"))); MARK();
+    SLEEP(100);
+
+    // Test a few different filters, verify in trace file
+    // Each SetTraceFilter calls should be seen once, no more, no less
+    TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceNone)); MARK();
+    SLEEP(300);
+    // API call and info should NOT be seen in log
+    TEST_MUSTPASS(base->SetOnHoldStatus(0, true)); MARK();
+    // API call and error should NOT be seen in log
+    TEST_MUSTPASS(!base->SetOnHoldStatus(999, true)); MARK();
+
+    TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceApiCall |
+                                              kTraceCritical |
+                                              kTraceError |
+                                              kTraceWarning)); MARK();
+    SLEEP(300);
+    // API call should and info should NOT be seen in log
+    TEST_MUSTPASS(base->SetOnHoldStatus(0, false)); MARK();
+    // API call and error should be seen in log
+    TEST_MUSTPASS(!base->SetOnHoldStatus(999, true)); MARK();
+
+    TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceApiCall | kTraceInfo));
+    MARK();
+    SLEEP(300);
+    // API call and info should be seen in log
+    TEST_MUSTPASS(base->SetOnHoldStatus(0, true)); MARK();
+    // API call should and error should NOT be seen in log
+    TEST_MUSTPASS(!base->SetOnHoldStatus(999, true)); MARK();
+
+    // Back to default
+    TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceAll)); MARK();
+    SLEEP(300);
+
+    AOK(); ANL();
+#endif
+
+    // ------------------------------------------------------------------------
+    // >> Multiple instance testing
+    //
+    // We should support 8 instances simultaneously 
+    // and at least one should be able to have a call running 
+
+    // One instance is already created
+    VoiceEngine* instVE[7];
+    VoEBase* baseVE[7];
+    for (int instNum = 0; instNum < 7; instNum++)
+    {
+        instVE[instNum] = VoiceEngine::Create();
+        baseVE[instNum] = VoEBase::GetInterface(instVE[instNum]);
+        TEST_MUSTPASS(baseVE[instNum]->Init());
+        TEST_MUSTPASS(baseVE[instNum]->CreateChannel());
+    }
+
+    TEST_LOG("Created 7 more instances of VE, make sure audio is ok...\n\n");
+    PAUSE
+
+    for (int instNum = 0; instNum < 7; instNum++)
+    {
+        TEST_MUSTPASS(baseVE[instNum]->DeleteChannel(0));
+        TEST_MUSTPASS(baseVE[instNum]->Terminate());
+        TEST_MUSTPASS(baseVE[instNum]->Release());
+        VoiceEngine::Delete(instVE[instNum]);
+    }
+
+    AOK();
+    ANL();
+
+    //////////////
+    // Close down
+
+    TEST_MUSTPASS(base->StopSend(ch));
+    TEST_MUSTPASS(base->StopPlayout(ch));
+    TEST_MUSTPASS(base->StopReceive(ch));
+    TEST_MUSTPASS(base->DeleteChannel(ch));
+
+
+    base->DeleteChannel(0);
+    TEST_MUSTPASS(base->Terminate());
+
+    return 0;
+}
+
+// ----------------------------------------------------------------------------
+//  VoEExtendedTest::TestCallReport
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestCallReport()
+{
+    // Get required sub-API pointers
+    VoEBase* base = _mgr.BasePtr();
+    VoEAudioProcessing* apm = _mgr.APMPtr();
+    VoECallReport* report = _mgr.CallReportPtr();
+    VoERTP_RTCP* rtp_rtcp = _mgr.RTP_RTCPPtr();
+    VoENetwork* netw = _mgr.NetworkPtr();
+    VoEFile* file = _mgr.FilePtr();
+
+    PrepareTest("CallReport");
+
+    // check if this interface is supported
+    if (!report)
+    {
+        TEST_LOG("VoECallReport is not supported!");
+        return -1;
+    }
+
+#ifdef _USE_EXTENDED_TRACE_
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(GetFilename("VoECallReport_trace.txt")));
+    TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStateInfo |
+                                              kTraceStateInfo |
+                                              kTraceWarning |
+                                              kTraceError |
+                                              kTraceCritical |
+                                              kTraceApiCall |
+                                              kTraceMemory |
+                                              kTraceInfo));
+#endif
+
+    TEST_MUSTPASS(base->Init());
+    TEST_MUSTPASS(base->CreateChannel());
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 12345));
+    TEST_MUSTPASS(base->SetSendDestination(0, 12345, "127.0.0.1"));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    TEST_MUSTPASS(base->StartPlayout(0));
+    TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0, micFile, true ,true));
+
+    ///////////////////////////
+    // Actual test starts here
+
+    // TODO(xians), enable the tests when APM is ready
+    /*
+    TEST(ResetCallReportStatistics);
+    ANL();
+    TEST_MUSTPASS(!report->ResetCallReportStatistics(-2));
+    MARK(); // not OK
+    TEST_MUSTPASS(!report->ResetCallReportStatistics(1));
+    MARK(); // not OK
+    TEST_MUSTPASS(report->ResetCallReportStatistics(0));
+    MARK(); // OK
+    TEST_MUSTPASS(report->ResetCallReportStatistics(-1));
+    MARK(); // OK
+    AOK();
+    ANL();
+
+    LevelStatistics stats;
+    bool enabled;
+    TEST(GetSpeechAndNoiseSummary);
+    ANL();
+    TEST_MUSTPASS(apm->GetMetricsStatus(enabled));
+    TEST_MUSTPASS(enabled != false);
+    // All values should be -100 dBm0 when metrics are disabled
+    TEST_MUSTPASS(report->GetSpeechAndNoiseSummary(stats));
+    MARK();
+    TEST_MUSTPASS(stats.noise_rx.min != -100);
+    TEST_MUSTPASS(stats.noise_rx.max != -100);
+    TEST_MUSTPASS(stats.noise_rx.average != -100);
+    TEST_MUSTPASS(stats.noise_tx.min != -100);
+    TEST_MUSTPASS(stats.noise_tx.max != -100);
+    TEST_MUSTPASS(stats.noise_tx.average != -100);
+    TEST_MUSTPASS(stats.speech_rx.min != -100);
+    TEST_MUSTPASS(stats.speech_rx.max != -100);
+    TEST_MUSTPASS(stats.speech_rx.average != -100);
+    TEST_MUSTPASS(stats.speech_tx.min != -100);
+    TEST_MUSTPASS(stats.speech_tx.max != -100);
+    TEST_MUSTPASS(stats.speech_tx.average != -100);
+    // 
+    TEST_MUSTPASS(apm->SetMetricsStatus(true));
+    SLEEP(7000);
+    // All values should *not* be -100 dBm0 when metrics are enabled (check
+    // Rx side only since user might be silent)
+    TEST_MUSTPASS(report->GetSpeechAndNoiseSummary(stats));
+    MARK();
+    TEST_MUSTPASS(stats.noise_rx.min == -100);
+    TEST_MUSTPASS(stats.noise_rx.max == -100);
+    TEST_MUSTPASS(stats.noise_rx.average == -100);
+    TEST_MUSTPASS(stats.speech_rx.min == -100);
+    TEST_MUSTPASS(stats.speech_rx.max == -100);
+    TEST_MUSTPASS(stats.speech_rx.average == -100);
+    AOK();
+    ANL();
+
+    EchoStatistics echo;
+    TEST(GetEchoMetricSummary);
+    ANL();
+    TEST_MUSTPASS(report->GetEchoMetricSummary(echo)); // all outputs will be
+                                       // -100 in loopback (skip further tests)
+    AOK();
+    ANL();
+
+    StatVal delays;
+    TEST(GetRoundTripTimeSummary);
+    ANL();
+    // All values should be >=0 since RTCP is now on
+    TEST_MUSTPASS(report->GetRoundTripTimeSummary(0, delays));
+    MARK();
+    TEST_MUSTPASS(delays.min == -1);
+    TEST_MUSTPASS(delays.max == -1);
+    TEST_MUSTPASS(delays.max == -1);
+    rtp_rtcp->SetRTCPStatus(0, false);
+    // All values should be -1 since RTCP is off
+    TEST_MUSTPASS(report->GetRoundTripTimeSummary(0, delays));
+    MARK();
+    TEST_MUSTPASS(delays.min != -1);
+    TEST_MUSTPASS(delays.max != -1);
+    TEST_MUSTPASS(delays.max != -1);
+    rtp_rtcp->SetRTCPStatus(0, true);
+    AOK();
+    ANL();
+
+    int nDead(0);
+    int nAlive(0);
+    TEST(GetDeadOrAliveSummary);
+    ANL();
+    // All results should be -1 since dead-or-alive is not active
+    TEST_MUSTPASS(report->GetDeadOrAliveSummary(0, nDead, nAlive) != -1);
+    MARK();
+    TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, true, 1));
+    SLEEP(2000);
+    // All results should be >= 0 since dead-or-alive is active
+    TEST_MUSTPASS(report->GetDeadOrAliveSummary(0, nDead, nAlive));
+    MARK();
+    TEST_MUSTPASS(nDead == -1);
+    TEST_MUSTPASS(nAlive == -1)
+    TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, false));
+    AOK();
+    ANL();
+
+    TEST(WriteReportToFile);
+    ANL();
+
+    // Greek and Coptic (see http://www.utf8-chartable.de/unicode-utf8-table.pl)
+    char fileNameUTF8[64];
+
+    fileNameUTF8[0] = (char) 0xce;
+    fileNameUTF8[1] = (char) 0xba;
+    fileNameUTF8[2] = (char) 0xce;
+    fileNameUTF8[3] = (char) 0xbb;
+    fileNameUTF8[4] = (char) 0xce;
+    fileNameUTF8[5] = (char) 0xbd;
+    fileNameUTF8[6] = (char) 0xce;
+    fileNameUTF8[7] = (char) 0xbe;
+    fileNameUTF8[8] = '.';
+    fileNameUTF8[9] = 't';
+    fileNameUTF8[10] = 'x';
+    fileNameUTF8[11] = 't';
+    fileNameUTF8[12] = 0;
+
+    TEST_MUSTPASS(!report->WriteReportToFile(NULL));
+    MARK();
+    TEST_MUSTPASS(report->WriteReportToFile("call_report.txt"));
+    MARK();
+    TEST_MUSTPASS(report->WriteReportToFile(fileNameUTF8));
+    MARK(); // should work with UTF-8 as well (κλνξ.txt)
+    AOK();
+    ANL();
+*/
+    TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0));
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    TEST_MUSTPASS(base->Terminate());
+
+    return 0;
+}
+
+// ----------------------------------------------------------------------------
+//  VoEExtendedTest::TestCodec
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestCodec()
+{
+    PrepareTest("Codec");
+
+    VoEBase* base = _mgr.BasePtr();
+    VoECodec* codec = _mgr.CodecPtr();
+    VoEFile* file = _mgr.FilePtr();
+    VoENetwork* netw = _mgr.NetworkPtr();
+
+#ifdef _USE_EXTENDED_TRACE_
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(GetFilename("VoECodec_trace.txt")));
+    TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStateInfo |
+                                              kTraceStateInfo |
+                                              kTraceWarning |
+                                              kTraceError |
+                                              kTraceCritical |
+                                              kTraceApiCall |
+                                              kTraceMemory |
+                                              kTraceInfo));
+#endif
+
+    TEST_MUSTPASS(base->Init());
+    TEST_MUSTPASS(base->CreateChannel());
+#ifdef WEBRTC_EXTERNAL_TRANSPORT
+    ExtendedTestTransport* ptrTransport(NULL);
+    ptrTransport = new ExtendedTestTransport(netw);
+    TEST_MUSTPASS(netw->RegisterExternalTransport(0, *ptrTransport));
+#else
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 12345));
+    TEST_MUSTPASS(base->SetSendDestination(0, 12345, "127.0.0.1"));
+#endif
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    TEST_MUSTPASS(base->StartPlayout(0));
+
+    ///////////////////////////
+    // Actual test starts here
+
+    int i;
+    int err;
+
+    CodecInst cinst;
+
+    /////////////////////////
+    // GetNumOfCodecs
+
+    int nCodecs;
+
+    TEST(GetNumOfCodecs);
+    ANL();
+    // validate #codecs
+    nCodecs = codec->NumOfCodecs();
+    MARK();
+    TEST_MUSTPASS(nCodecs < 0);
+    AOK();
+    ANL();
+
+    ///////////////////
+    // GetCodec
+
+    TEST(GetCodec);
+    ANL();
+    // scan all supported codecs 
+    nCodecs = codec->NumOfCodecs();
+    for (int index = 0; index < nCodecs; index++)
+    {
+        TEST_MUSTPASS(codec->GetCodec(index, cinst));
+        TEST_LOG("[%2d] %16s: fs=%6d, pt=%4d, rate=%7d, ch=%2d, size=%5d",
+                 index, cinst.plname, cinst.plfreq, cinst.pltype, cinst.rate,
+                 cinst.channels, cinst.pacsize);
+        if (cinst.pltype == -1)
+        {
+            TEST_LOG(" <= NOTE pt=-1\n");
+        } else
+        {
+            ANL();
+        }
+    }
+
+    // ensure that an invalid index parameter is detected
+    TEST_MUSTPASS(-1 != codec->GetCodec(-1, cinst));
+    nCodecs = codec->NumOfCodecs();
+    TEST_MUSTPASS(-1 != codec->GetCodec(nCodecs, cinst));
+    MARK();
+    // ensure that error code is VE_INVALID_LISTNR
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_INVALID_LISTNR);
+    AOK();
+    ANL();
+
+    ///////////////////////
+    // GetSendCodec
+
+    TEST(GetSendCodec);
+    ANL();
+
+    CodecInst defaultCodec;
+
+    // check the channel parameter
+    int nMaxChannels(base->MaxNumOfChannels());
+    TEST_MUSTPASS(-1 != codec->GetSendCodec(nMaxChannels-1, cinst));
+    MARK(); // not created
+    TEST_MUSTPASS(-1 != codec->GetSendCodec(nMaxChannels, cinst));
+    MARK(); // out of range
+    TEST_MUSTPASS(-1 != codec->GetSendCodec(-1, cinst));
+    MARK(); // out of range
+    TEST_MUSTPASS(codec->GetSendCodec(0, cinst));
+    MARK(); // OK
+
+    nCodecs = codec->NumOfCodecs();
+    for (int index = 0; index < nCodecs; index++)
+    {
+        TEST_MUSTPASS(codec->GetCodec(index, defaultCodec));
+        if (codec->SetSendCodec(0, defaultCodec) == 0)
+        {
+            TEST_MUSTPASS(codec->GetSendCodec(0, cinst));
+            MARK();
+            //TEST_LOG("[%2d] %s: fs=%d, pt=%d, rate=%d, ch=%d, size=%d\n", 
+            // index, cinst.plname, cinst.plfreq, cinst.pltype, cinst.rate,
+            // cinst.channels, cinst.pacsize);
+            TEST_MUSTPASS(cinst.pacsize != defaultCodec.pacsize);
+            TEST_MUSTPASS(cinst.plfreq != defaultCodec.plfreq);
+            TEST_MUSTPASS(cinst.pltype != defaultCodec.pltype);
+            TEST_MUSTPASS(cinst.rate != defaultCodec.rate);
+            TEST_MUSTPASS(cinst.channels != defaultCodec.channels);
+        }
+    }
+
+    ANL();
+    AOK();
+    ANL();
+
+    ///////////////////////
+    // SetSendCodec
+
+    TEST(SetSendCodec);
+    ANL();
+
+    // --- Scan all supported codecs and set default parameters
+
+    nCodecs = codec->NumOfCodecs();
+    for (int index = 0; index < nCodecs; index++)
+    {
+        // Get default (ACM) settings
+        TEST_MUSTPASS(codec->GetCodec(index, cinst));
+        defaultCodec = cinst;
+        TEST_LOG("[%2d] %s (default): fs=%d, pt=%d, rate=%d, ch=%d, size=%d\n",
+                 index, cinst.plname, cinst.plfreq, cinst.pltype, cinst.rate,
+                 cinst.channels, cinst.pacsize);
+
+        // Verify invalid codec names
+        if (!_stricmp("CN", cinst.plname) || !_stricmp("telephone-event",
+                                                       cinst.plname)
+            || !_stricmp("red", cinst.plname))
+        {
+            // default settings for invalid payload names (should give
+            // VE_INVALID_PLNAME)
+            TEST_MUSTPASS(!codec->SetSendCodec(0, cinst));
+            err = base->LastError();
+            TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+            continue;
+        }
+
+        // If we build the ACM with more codecs than we have payload types,
+        // some codecs will be given -1 as default payload type. This is a fix
+        // to ensure that we can complete these tests also for this case.
+        if (cinst.pltype == -1)
+        {
+            cinst.pltype = 97;
+        }
+
+        // --- Default settings
+
+        TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+        // --- Packet size
+
+        TEST_LOG("\npacsize : ");
+
+        for (int pacsize = 80; pacsize < 1440; pacsize += 80)
+        {
+            cinst.pacsize = pacsize;
+            if (-1 != codec->SetSendCodec(0, cinst))
+            {
+                // log valid packet size
+                TEST_LOG("%d ", pacsize);
+            } else
+            {
+                err = base->LastError();
+                TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+            }
+        }
+        cinst.pacsize = defaultCodec.pacsize;
+
+        // --- Audio channels (1/mono or 2/stereo)
+
+        TEST_LOG("\nchannels: ");
+        for (int channels = 1; channels < 4; channels++)
+        {
+            cinst.channels = channels;
+            if (-1 != codec->SetSendCodec(0, cinst))
+            {
+                // valid channels (only 1 should be OK)
+                TEST_LOG("%d ", channels);
+            } else
+            {
+                err = base->LastError();
+                TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+            }
+        }
+        cinst.channels = defaultCodec.channels;
+
+        // --- Payload frequency
+
+        TEST_LOG("\nplfreq  : ");
+        cinst.plfreq = defaultCodec.plfreq;
+        TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+        TEST_LOG("%d ", cinst.plfreq);
+
+        // --- Payload name
+
+        strcpy(cinst.plname, "INVALID");
+        TEST_MUSTPASS(-1 != codec->SetSendCodec(0, cinst))
+        {
+            // ensure that error code is VE_INVALID_PLNAME
+            err = base->LastError();
+            TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+        }
+
+        // restore default plname
+        strcpy(cinst.plname, defaultCodec.plname);
+
+        // --- Payload type (dynamic range is 96-127)
+
+        TEST_LOG("\npltype  : ");
+        // All PT should be OK, test a few different
+        cinst.pltype = defaultCodec.pltype;
+        TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+        TEST_LOG("%d ", cinst.pltype);
+        cinst.pltype = defaultCodec.pltype + 1;
+        TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+        TEST_LOG("%d ", cinst.pltype);
+        const int valid_pltypes[4] = { 0, 96, 117, 127 };
+        for (i = 0;
+            i < static_cast<int> (sizeof(valid_pltypes) / sizeof(int));
+            i++)
+        {
+            cinst.pltype = valid_pltypes[i];
+            TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+            TEST_LOG("%d ", cinst.pltype);
+        }
+        // Restore default
+        cinst.pltype = defaultCodec.pltype;
+
+        // --- Codec rate
+
+        TEST_LOG("\nrate    : ");
+        if (_stricmp("isac", cinst.plname) == 0)
+        {
+            // ISAC
+            if (cinst.plfreq == 16000)
+            {
+                int valid_rates[3] = { -1, 10000, 32000 };
+                // failed in RegisterPayload when rate is 32000
+                for (i = 0; i < static_cast<int> (sizeof(valid_rates)
+                    / sizeof(int)); i++)
+                {
+                    cinst.rate = valid_rates[i];
+                    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+                    TEST_LOG("%d ", cinst.rate);
+                }
+                cinst.rate = 0; // invalid
+                TEST_MUSTPASS(-1 != codec->SetSendCodec(0, cinst))
+                {
+                    // ensure that error code is VE_CANNOT_SET_SEND_CODEC
+                    err = base->LastError();
+                    TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+                }
+                ANL();
+            } else //ISACSWB
+            {
+                // rate changing fails in RegisterPayload
+                int valid_rates[8] = { -1, 10000, 25000, 32000, 35000, 45000,
+                        50000, 52000 };
+                for (i = 0; i < static_cast<int> (sizeof(valid_rates)
+                    / sizeof(int)); i++)
+                {
+                    cinst.rate = valid_rates[i];
+                    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+                    TEST_LOG("%d ", cinst.rate);
+                }
+                int invalid_rates[3] = { 0, 5000, 57000 }; // invalid
+                for (i = 0; i < static_cast<int> (sizeof(invalid_rates)
+                    / sizeof(int)); i++)
+                {
+                    cinst.rate = invalid_rates[i];
+                    TEST_MUSTPASS(-1 != codec->SetSendCodec(0, cinst))
+                    {
+                        // ensure that error code is VE_CANNOT_SET_SEND_CODEC
+                        err = base->LastError();
+                        TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+                    }
+                }
+                ANL();
+            }
+        }
+        else if (_stricmp("amr", cinst.plname) == 0)
+        {
+            int valid_rates[8] = { 4750, 5150, 5900, 6700, 7400, 7950, 10200,
+                    12200 };
+            for (i = 0; i
+                < static_cast<int> (sizeof(valid_rates) / sizeof(int)); i++)
+            {
+                cinst.rate = valid_rates[i];
+                TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+                TEST_LOG("%d ", cinst.rate);
+            }
+            ANL();
+        } else if (_stricmp("g7291", cinst.plname) == 0)
+        {
+            int valid_rates[12] = { 8000, 12000, 14000, 16000, 18000, 20000,
+                    22000, 24000, 26000, 28000, 30000, 32000 };
+            for (i = 0; i
+                < static_cast<int> (sizeof(valid_rates) / sizeof(int)); i++)
+            {
+                cinst.rate = valid_rates[i];
+                TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+                TEST_LOG("%d ", cinst.rate);
+            }
+            ANL();
+        } else if (_stricmp("amr-wb", cinst.plname) == 0)
+        {
+            int valid_rates[9] = { 7000, 9000, 12000, 14000, 16000, 18000,
+                    20000, 23000, 24000 };
+            for (i = 0; i
+                < static_cast<int> (sizeof(valid_rates) / sizeof(int)); i++)
+            {
+                cinst.rate = valid_rates[i];
+                TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+                TEST_LOG("%d ", cinst.rate);
+            }
+            TEST_LOG(" <=> ");
+            ANL();
+        } else if (_stricmp("speex", cinst.plname) == 0)
+        {
+            // Valid speex rates are > 2000, testing some of them here
+            int valid_rates[9] = { 2001, 4000, 7000, 11000, 15000, 20000,
+                    25000, 33000, 46000 };
+            for (i = 0; i
+                < static_cast<int> (sizeof(valid_rates) / sizeof(int)); i++)
+            {
+                cinst.rate = valid_rates[i];
+                TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+                TEST_LOG("%d ", cinst.rate);
+            }
+            cinst.rate = 2000; // invalid
+            TEST_MUSTPASS(-1 != codec->SetSendCodec(0, cinst))
+            {
+                err = base->LastError();
+                TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+            }
+            ANL();
+        } else if (_stricmp("silk", cinst.plname) == 0)
+        {
+            // Valid Silk rates are 6000 - 40000, listing some of them here
+            int valid_rates[7] = { 6000, 10000, 15000, 20000, 25000, 32000,
+                    40000 };
+            for (i = 0; i
+                < static_cast<int> (sizeof(valid_rates) / sizeof(int)); i++)
+            {
+                cinst.rate = valid_rates[i];
+                TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+                TEST_LOG("%d ", cinst.rate);
+            }
+            cinst.rate = 5999; // invalid
+            TEST_MUSTPASS(-1 != codec->SetSendCodec(0, cinst))
+            {
+                err = base->LastError();
+                TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+            }
+            cinst.rate = 40001; // invalid
+            TEST_MUSTPASS(-1 != codec->SetSendCodec(0, cinst))
+            {
+                err = base->LastError();
+                TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+            }
+            ANL();
+        } else
+        {
+            // Use default rate for all other codecs.
+            cinst.rate = defaultCodec.rate;
+            TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+            TEST_LOG("%d ", cinst.rate);
+            cinst.rate = defaultCodec.rate + 17;
+            TEST_MUSTPASS(!codec->SetSendCodec(0, cinst));
+            err = base->LastError();
+            TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+            ANL();
+        }
+        cinst.rate = defaultCodec.rate;
+
+        // run some extra tests for L16
+        if (_stricmp("l16", cinst.plname) == 0)
+        {
+            if (8000 == cinst.plfreq)
+            {
+                // valid pacsizes: 80, 160, 240, 320
+                cinst.pacsize = 480; // only supported in combination with 16kHz
+                TEST_MUSTPASS(-1 != codec->SetSendCodec(0, cinst));
+                err = base->LastError();
+                TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+                cinst.pacsize = 640; // only supported in combination with 16kHz
+                TEST_MUSTPASS(-1 != codec->SetSendCodec(0, cinst));
+                err = base->LastError();
+                TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+            } else
+            {
+                // valid pacsizes: 160, 320, 480, 640
+                cinst.pacsize = 80; // only supported in combination with 8kHz
+                TEST_MUSTPASS(-1 != codec->SetSendCodec(0, cinst));
+                err = base->LastError();
+                TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+                cinst.pacsize = 240; // only supported in combination with 8kHz
+                TEST_MUSTPASS(-1 != codec->SetSendCodec(0, cinst));
+                err = base->LastError();
+                TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+            }
+        }
+        ANL();
+    } // for (int index = 0; index < nCodecs; index++)
+
+    // restore PCMU
+    const CodecInst tmp = { 0, "PCMU", 8000, 160, 1, 64000 };
+    TEST_MUSTPASS(codec->SetSendCodec(0, tmp));
+
+    ANL();
+    AOK();
+    ANL();
+
+    ///////
+    // VAD
+
+    const int VADSleep = 0;
+
+    bool disabledDTX;
+    VadModes mode;
+    bool enabled;
+
+    // verify default settings (should be OFF, kVadConventional and DTX enabled)
+    TEST_MUSTPASS(codec->GetVADStatus(0, enabled, mode, disabledDTX));
+    TEST_LOG("VAD: enabled=%d, mode=%d, disabledDTX=%d\n", enabled, mode,
+             disabledDTX);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(mode != kVadConventional);
+    TEST_MUSTPASS(disabledDTX != true);
+
+    // enable default VAD settings
+    TEST_MUSTPASS(codec->SetVADStatus(0, true));
+    TEST_MUSTPASS(codec->GetVADStatus(0, enabled, mode, disabledDTX));
+    TEST_LOG("VAD: enabled=%d, mode=%d, disabledDTX=%d\n", enabled, mode,
+             disabledDTX);
+    TEST_MUSTPASS(enabled != true);
+    TEST_MUSTPASS(mode != kVadConventional);
+    TEST_MUSTPASS(disabledDTX != false);
+    SLEEP(VADSleep);
+
+    // set kVadConventional mode
+    TEST_MUSTPASS(codec->SetVADStatus(0, true, kVadConventional));
+    TEST_MUSTPASS(codec->GetVADStatus(0, enabled, mode, disabledDTX));
+    TEST_LOG("VAD: enabled=%d, mode=%d, disabledDTX=%d\n", enabled, mode,
+             disabledDTX);
+    TEST_MUSTPASS(mode != kVadConventional);
+    SLEEP(VADSleep);
+
+    // set kVadAggressiveLow mode
+    TEST_MUSTPASS(codec->SetVADStatus(0, true, kVadAggressiveLow));
+    TEST_MUSTPASS(codec->GetVADStatus(0, enabled, mode, disabledDTX));
+    TEST_LOG("VAD: enabled=%d, mode=%d, disabledDTX=%d\n", enabled, mode,
+             disabledDTX);
+    TEST_MUSTPASS(mode != kVadAggressiveLow);
+    SLEEP(VADSleep);
+
+    // set kVadAggressiveMid mode
+    TEST_MUSTPASS(codec->SetVADStatus(0, true, kVadAggressiveMid));
+    TEST_MUSTPASS(codec->GetVADStatus(0, enabled, mode, disabledDTX));
+    TEST_LOG("VAD: enabled=%d, mode=%d, disabledDTX=%d\n", enabled, mode,
+             disabledDTX);
+    TEST_MUSTPASS(mode != kVadAggressiveMid);
+    SLEEP(VADSleep);
+
+    // set kVadAggressiveMid mode
+    TEST_MUSTPASS(codec->SetVADStatus(0, true, kVadAggressiveHigh));
+    TEST_MUSTPASS(codec->GetVADStatus(0, enabled, mode, disabledDTX));
+    TEST_LOG("VAD: enabled=%d, mode=%d, disabledDTX=%d\n", enabled, mode,
+             disabledDTX);
+    TEST_MUSTPASS(mode != kVadAggressiveHigh);
+    SLEEP(VADSleep);
+
+    // turn DTX OFF (audio should not be affected by VAD decisions)
+    TEST_MUSTPASS(codec->SetVADStatus(0, true, kVadConventional, true));
+    TEST_MUSTPASS(codec->GetVADStatus(0, enabled, mode, disabledDTX));
+    TEST_LOG("VAD: enabled=%d, mode=%d, disabledDTX=%d\n", enabled, mode,
+             disabledDTX);
+    TEST_MUSTPASS(disabledDTX != true);
+    SLEEP(VADSleep);
+
+    // try to enable DTX again (should fail since VAD is disabled)
+    TEST_MUSTPASS(codec->SetVADStatus(0, false, kVadConventional, false));
+    TEST_MUSTPASS(codec->GetVADStatus(0, enabled, mode, disabledDTX));
+    TEST_LOG("VAD: enabled=%d, mode=%d, disabledDTX=%d\n", enabled, mode,
+             disabledDTX);
+    TEST_MUSTPASS(disabledDTX == false);
+    SLEEP(VADSleep);
+
+    // disable VAD 
+    TEST_MUSTPASS(codec->SetVADStatus(0, false));
+    TEST_MUSTPASS(codec->GetVADStatus(0, enabled, mode, disabledDTX));
+    TEST_LOG("VAD: enabled=%d, mode=%d, disabledDTX=%d\n", enabled, mode,
+             disabledDTX);
+    TEST_MUSTPASS(enabled != false);
+    SLEEP(VADSleep);
+
+    // restore default VAD 
+    TEST_MUSTPASS(codec->SetVADStatus(0, true));
+    TEST_MUSTPASS(codec->SetVADStatus(0, false));
+    TEST_MUSTPASS(codec->GetVADStatus(0, enabled, mode, disabledDTX));
+    TEST_LOG("VAD: enabled=%d, mode=%d, disabledDTX=%d\n", enabled, mode,
+             disabledDTX);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(mode != kVadConventional);
+    TEST_MUSTPASS(disabledDTX != true);
+    SLEEP(VADSleep);
+
+    AOK();
+    ANL();
+    ANL();
+
+    //////////////////////
+    // GetRecCodec
+
+    TEST(GetRecCodec);
+    ANL();
+
+    // stop all streaming first
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+
+    // start loopback streaming (PCMU is default)
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    TEST_MUSTPASS(base->SetSendDestination(0,8000,"127.0.0.1"));
+    TEST_MUSTPASS(base->SetLocalReceiver(0,8000));
+#endif
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartPlayout(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    SLEEP(100); // ensure that at least one packets is received
+
+    // scan all supported and valid codecs
+    CodecInst newCodec;
+    for (i = 0; i < codec->NumOfCodecs(); i++)
+    {
+        TEST_MUSTPASS(codec->GetCodec(i, newCodec));
+        // test all valid send codecs
+        if (!_stricmp("red", newCodec.plname) || !_stricmp("cn",
+                                                           newCodec.plname)
+            || !_stricmp("telephone-event", newCodec.plname))
+        {
+            continue; // Ignore these
+        }
+        if (-1 != codec->SetSendCodec(0, newCodec))
+        {
+            SLEEP(150);
+            // verify correct detection
+            TEST_MUSTPASS(codec->GetRecCodec(0, cinst));
+            TEST_LOG("%s %s ", newCodec.plname, cinst.plname);
+            TEST_MUSTPASS(_stricmp(newCodec.plname, cinst.plname) != 0);
+            TEST_MUSTPASS(cinst.pltype != newCodec.pltype);
+            TEST_MUSTPASS(cinst.plfreq != newCodec.plfreq);
+        }
+    }
+
+    // stop streaming
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+#ifdef WEBRTC_CODEC_GSMAMR
+    //////////////////////////
+    // SetAMREncFormat
+
+    // Fresh channel
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    TEST_MUSTPASS(base->CreateChannel());
+
+    TEST(SetAMREncFormat); ANL();
+
+    //set another codec which is not AMR
+    TEST_MUSTPASS(codec->GetCodec(0, cinst));
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+    //try to change the encode format, tests should fail
+    TEST_MUSTPASS(-1 != codec->SetAMREncFormat(0)); MARK();
+    TEST_MUSTPASS(-1 != codec->SetAMREncFormat(0, kRfc3267BwEfficient));
+    MARK();
+    TEST_MUSTPASS(-1 != codec->SetAMREncFormat(0, kRfc3267OctetAligned));
+    MARK();
+    TEST_MUSTPASS(-1 != codec->SetAMREncFormat(0, kRfc3267FileStorage));
+    MARK();
+
+    //set AMR as encoder
+    strcpy(cinst.plname,"AMR");
+    cinst.channels=1; cinst.plfreq=8000; cinst.rate=12200; cinst.pltype=112;
+    cinst.pacsize=160;
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+    //try to change the encode format, tests should pass
+    TEST_MUSTPASS(codec->SetAMREncFormat(0)); MARK();
+    TEST_MUSTPASS(codec->SetAMREncFormat(0, kRfc3267BwEfficient)); MARK();
+    TEST_MUSTPASS(codec->SetAMREncFormat(0, kRfc3267OctetAligned)); MARK();
+    TEST_MUSTPASS(codec->SetAMREncFormat(0, kRfc3267FileStorage)); MARK();
+    TEST_MUSTPASS(-1 != codec->SetAMREncFormat(-1)); MARK();
+    TEST_MUSTPASS(codec->SetAMREncFormat(0)); MARK(); // restore default
+
+    ANL();
+    AOK();
+    ANL();
+
+    //////////////////////////
+    // SetAMRDecFormat
+
+    TEST(SetAMRDecFormat); ANL();
+
+    // It should not be possible to set AMR dec format before valid AMR decoder
+    // is registered
+    TEST_MUSTPASS(!codec->SetAMRDecFormat(0)); MARK();
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_AUDIO_CODING_MODULE_ERROR);
+
+    // Ensure that ACM::RegisterReceiveCodec(AMR) is called
+    TEST_MUSTPASS(codec->SetRecPayloadType(0, cinst));
+
+    // All these tests should now pass
+    TEST_MUSTPASS(codec->SetAMRDecFormat(0)); MARK();
+    TEST_MUSTPASS(codec->SetAMRDecFormat(0, kRfc3267BwEfficient)); MARK();
+    TEST_MUSTPASS(codec->SetAMRDecFormat(0, kRfc3267OctetAligned)); MARK();
+    TEST_MUSTPASS(codec->SetAMRDecFormat(0, kRfc3267FileStorage)); MARK();
+    TEST_MUSTPASS(-1 != codec->SetAMRDecFormat(-1)); MARK();
+    TEST_MUSTPASS(codec->SetAMRDecFormat(0)); MARK(); // restore default
+
+    ANL();
+    AOK();
+    ANL();
+#endif // #ifdef WEBRTC_CODEC_GSMAMR
+
+#ifdef WEBRTC_CODEC_GSMAMRWB
+    //////////////////////////
+    // SetAMRWbEncFormat
+
+    // Fresh channel
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    TEST_MUSTPASS(base->CreateChannel());
+
+    TEST(SetAMRWbEncFormat); ANL();
+
+    //set another codec which is not AMR-wb
+    TEST_MUSTPASS(codec->GetCodec(0, cinst));
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+    //try to change the encode format, tests should fail
+    TEST_MUSTPASS(-1 != codec->SetAMRWbEncFormat(0)); MARK();
+    TEST_MUSTPASS(-1 != codec->SetAMRWbEncFormat(0, kRfc3267BwEfficient));
+    MARK();
+    TEST_MUSTPASS(-1 != codec->SetAMRWbEncFormat(0, kRfc3267OctetAligned));
+    MARK();
+    TEST_MUSTPASS(-1 != codec->SetAMRWbEncFormat(0, kRfc3267FileStorage));
+    MARK();
+
+    //set AMR-wb as encoder
+    strcpy(cinst.plname,"AMR-WB");
+    cinst.channels=1; cinst.plfreq=16000; cinst.rate=20000;
+    cinst.pltype=112; cinst.pacsize=320;
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+    //try to change the encode format, tests should pass
+    TEST_MUSTPASS(codec->SetAMRWbEncFormat(0)); MARK();
+    TEST_MUSTPASS(codec->SetAMRWbEncFormat(0, kRfc3267BwEfficient)); MARK();
+    TEST_MUSTPASS(codec->SetAMRWbEncFormat(0, kRfc3267OctetAligned)); MARK();
+    TEST_MUSTPASS(codec->SetAMRWbEncFormat(0, kRfc3267FileStorage)); MARK();
+    TEST_MUSTPASS(-1 != codec->SetAMRWbEncFormat(-1)); MARK();
+    TEST_MUSTPASS(codec->SetAMRWbEncFormat(0)); MARK(); // restore default
+
+    ANL();
+    AOK();
+    ANL();
+
+    //////////////////////////
+    // SetAMRDecFormat
+
+    TEST(SetAMRWbDecFormat); ANL();
+
+    // It should not be possible to set AMR dec format before valid AMR decoder
+    // is registered
+    TEST_MUSTPASS(!codec->SetAMRWbDecFormat(0)); MARK();
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_AUDIO_CODING_MODULE_ERROR);
+
+    // Ensure that ACM::RegisterReceiveCodec(AMR) is called
+    TEST_MUSTPASS(codec->SetRecPayloadType(0, cinst));
+
+    // All these tests should now pass
+    TEST_MUSTPASS(codec->SetAMRWbDecFormat(0)); MARK();
+    TEST_MUSTPASS(codec->SetAMRWbDecFormat(0, kRfc3267BwEfficient)); MARK();
+    TEST_MUSTPASS(codec->SetAMRWbDecFormat(0, kRfc3267OctetAligned)); MARK();
+    TEST_MUSTPASS(codec->SetAMRWbDecFormat(0, kRfc3267FileStorage)); MARK();
+    TEST_MUSTPASS(-1 != codec->SetAMRWbDecFormat(-1)); MARK();
+    TEST_MUSTPASS(codec->SetAMRWbDecFormat(0)); MARK(); // restore default
+
+    ANL();
+    AOK();
+    ANL();
+#endif // #ifdef WEBRTC_CODEC_GSMAMRWB
+
+    ///////////////////////////////
+    // SetSendCNPayloadType
+
+    TEST(SetSendCNPayloadType);
+    ANL();
+
+    TEST_MUSTPASS(-1 != codec->SetSendCNPayloadType(-1, 0));
+    MARK(); // invalid channel
+
+    // Invalid payload range (only dynamic range [96,127]
+    TEST_MUSTPASS(-1 != codec->SetSendCNPayloadType(0, 0));
+    MARK(); // invalid PT
+    TEST_MUSTPASS(-1 != codec->SetSendCNPayloadType(0, 95));
+    MARK(); // invalid PT
+    TEST_MUSTPASS(-1 != codec->SetSendCNPayloadType(0, 128));
+    MARK(); // invalid PT
+    TEST_MUSTPASS(-1 != codec->SetSendCNPayloadType(0, -1));
+    MARK(); // invalid PT
+
+    // Not possible to change PT for 8000
+    TEST_MUSTPASS(!codec->SetSendCNPayloadType(0, 96, kFreq8000Hz));
+    MARK();
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_INVALID_PLFREQ);
+
+    // Try some dynamic for 16000 and 32000 as well
+    TEST_MUSTPASS(codec->SetSendCNPayloadType(0, 96, kFreq16000Hz));
+    MARK();
+    TEST_MUSTPASS(codec->SetSendCNPayloadType(0, 96, kFreq32000Hz));
+    MARK(); // same should work
+    TEST_MUSTPASS(codec->SetSendCNPayloadType(0, 127, kFreq16000Hz));
+    MARK();
+    TEST_MUSTPASS(codec->SetSendCNPayloadType(0, 127, kFreq32000Hz));
+    MARK();
+    TEST_MUSTPASS(codec->SetSendCNPayloadType(0, 100, kFreq32000Hz));
+    MARK();
+
+    ANL();
+    AOK();
+    ANL();
+
+    /////////////////////////////
+    // SetRecPayloadType
+
+    TEST(SetRecPayloadType);
+    ANL();
+
+    // scan all supported and valid codecs without changing payloads
+    nCodecs = codec->NumOfCodecs();
+    for (i = 0; i < nCodecs; i++)
+    {
+        TEST_MUSTPASS(codec->GetCodec(i, newCodec));
+        // If no default payload type is defined, we use 127
+        if (-1 == newCodec.pltype)
+        {
+            newCodec.pltype = 127;
+        }
+        TEST_MUSTPASS(codec->SetRecPayloadType(0, newCodec));
+        MARK(); // use default
+        newCodec.pltype = 99;
+        TEST_MUSTPASS(codec->SetRecPayloadType(0, newCodec));
+        MARK(); // use same PT on all
+        newCodec.pltype = -1;
+        TEST_MUSTPASS(codec->SetRecPayloadType(0, newCodec));
+        MARK(); // deregister all PTs
+    }
+
+    ANL();
+    AOK();
+    ANL();
+
+    /////////////////////////////
+    // GetRecPayloadType
+
+    TEST(GetRecPayloadType);
+    ANL();
+
+    CodecInst extraCodec;
+    for (i = 0; i < nCodecs; i++)
+    {
+        // Set defaults
+        TEST_MUSTPASS(codec->GetCodec(i, newCodec));
+        // If no default payload type is defined, we use 127
+        if (-1 == newCodec.pltype)
+        {
+            newCodec.pltype = 127;
+        }
+        TEST_MUSTPASS(codec->SetRecPayloadType(0, newCodec));
+        //TEST_LOG("[%2d] %s (SetRec): fs=%d, pt=%d, rate=%d, ch=%d, size=%d\n", 
+        //  i, newCodec.plname, newCodec.plfreq, newCodec.pltype, newCodec.rate,
+        // newCodec.channels, newCodec.pacsize);
+        extraCodec.pltype = -1; // don't know this yet
+        extraCodec.plfreq = newCodec.plfreq;
+        extraCodec.rate = newCodec.rate;
+        extraCodec.channels = newCodec.channels;
+        strcpy(extraCodec.plname, newCodec.plname);
+        // Verfify that setting is OK
+        TEST_MUSTPASS(codec->GetRecPayloadType(0, extraCodec));
+        //TEST_LOG("[%2d] %s (GetRec): fs=%d, pt=%d, rate=%d, ch=%d, size=%d\n", 
+        //  i, extraCodec.plname, extraCodec.plfreq, extraCodec.pltype,
+        // extraCodec.rate, extraCodec.channels, extraCodec.pacsize);
+        TEST_MUSTPASS(newCodec.pltype != extraCodec.pltype);
+        TEST_MUSTPASS(newCodec.plfreq != extraCodec.plfreq);
+        TEST_MUSTPASS(newCodec.channels != extraCodec.channels);
+    }
+
+    AOK();
+    ANL();
+
+    ////////////////////////////////////////////////////
+    // SetRecPayloadType - remove receive codecs
+
+    TEST(SetRecPayloadType - removing receive codecs);
+    ANL();
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    TEST_MUSTPASS(base->SetSendDestination(0, 8000, "127.0.0.1"));
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 8000));
+#endif
+    TEST_MUSTPASS(base->StartSend(0));
+    if (file)
+    {
+        TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0, micFile,
+                                                         true, true));
+    }
+
+    // Scan all supported and valid codecs and remove from receiving db, then
+    // restore
+    nCodecs = codec->NumOfCodecs();
+    for (i = 0; i < nCodecs; i++)
+    {
+        TEST_MUSTPASS(codec->GetCodec(i, cinst));
+        if (!_stricmp("red", cinst.plname) || !_stricmp("cn", cinst.plname)
+            || !_stricmp("telephone-event", cinst.plname))
+        {
+            continue; // Ignore these
+        }
+        TEST_LOG("Testing codec: %s", cinst.plname);
+        fflush(NULL);
+
+        if (-1 == cinst.pltype)
+        {
+            // If no default payload type is defined, we use 127,
+            // codec is not registered for receiving
+            cinst.pltype = 127;
+        } else
+        {
+            // Remove codec
+            memcpy(&extraCodec, &cinst, sizeof(CodecInst));
+            extraCodec.pltype = -1;
+            TEST_MUSTPASS(codec->SetRecPayloadType(0, extraCodec));
+        }
+
+        // Set send codec
+        TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+        // Verify no audio
+        TEST_MUSTPASS(base->StartReceive(0));
+        TEST_MUSTPASS(base->StartPlayout(0));
+        TEST_LOG("  silence");
+        fflush(NULL);
+        SLEEP(800);
+        TEST_MUSTPASS(base->StopPlayout(0));
+        TEST_MUSTPASS(base->StopReceive(0));
+
+        // Restore codec
+        TEST_MUSTPASS(codec->SetRecPayloadType(0, cinst));
+
+        // Verify audio
+        TEST_MUSTPASS(base->StartReceive(0));
+        TEST_MUSTPASS(base->StartPlayout(0));
+        TEST_LOG("  audio");
+        fflush(NULL);
+        SLEEP(800);
+        TEST_MUSTPASS(base->StopPlayout(0));
+        TEST_MUSTPASS(base->StopReceive(0));
+
+        if (127 == cinst.pltype)
+        {
+            // If no default payload type is defined, i.e. we have set pt to
+            //127 above,
+            // make sure we remove codec from receiving
+            cinst.pltype = -1;
+            TEST_MUSTPASS(codec->SetRecPayloadType(0, cinst));
+        }
+
+        ANL();
+    }
+
+    // Remove certain codecs
+    TEST_LOG("Removing receive codecs:");
+    for (i = 0; i < nCodecs; i++)
+    {
+        TEST_MUSTPASS(codec->GetCodec(i, cinst));
+        if (!_stricmp("ipcmwb", cinst.plname)
+            || !_stricmp("pcmu", cinst.plname) || !_stricmp("eg711a",
+                                                            cinst.plname))
+        {
+            TEST_LOG(" %s", cinst.plname);
+            memcpy(&extraCodec, &cinst, sizeof(CodecInst));
+            extraCodec.pltype = -1;
+            TEST_MUSTPASS(codec->SetRecPayloadType(0, extraCodec));
+        }
+    }
+    ANL();
+
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartPlayout(0));
+
+    // Test sending all codecs - verify audio/no audio depending on codec
+    TEST_LOG("Looping through send codecs \n");
+    TEST_LOG("Verify that removed codecs are not audible and the other are \n");
+    for (i = 0; i < nCodecs; i++)
+    {
+        TEST_MUSTPASS(codec->GetCodec(i, cinst));
+        if (!_stricmp("red", cinst.plname) || !_stricmp("cn", cinst.plname)
+            || !_stricmp("telephone-event", cinst.plname))
+        {
+            continue; // Ignore these
+        }
+        TEST_LOG("Testing codec: %s \n", cinst.plname);
+
+        // If no default payload type is defined, we use 127 and set receive
+        // payload type
+        if (-1 == cinst.pltype)
+        {
+            cinst.pltype = 127;
+            TEST_MUSTPASS(base->StopPlayout(0));
+            TEST_MUSTPASS(base->StopReceive(0));
+            TEST_MUSTPASS(codec->SetRecPayloadType(0, cinst));
+            TEST_MUSTPASS(base->StartReceive(0));
+            TEST_MUSTPASS(base->StartPlayout(0));
+        }
+
+        // Set send codec
+        TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+        // Verify audio/no audio
+        SLEEP(800);
+    }
+
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+
+    // Restore codecs
+    TEST_LOG("Restoring receive codecs:");
+    for (i = 0; i < nCodecs; i++)
+    {
+        TEST_MUSTPASS(codec->GetCodec(i, cinst));
+        if (!_stricmp("ipcmwb", cinst.plname) ||
+            !_stricmp("pcmu", cinst.plname) ||
+            !_stricmp("eg711a", cinst.plname))
+        {
+            TEST_LOG(" %s", cinst.plname);
+            memcpy(&extraCodec, &cinst, sizeof(CodecInst));
+            TEST_MUSTPASS(codec->SetRecPayloadType(0, cinst));
+        }
+    }
+    ANL();
+
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartPlayout(0));
+
+    // Test sending all codecs - verify audio
+    TEST_LOG("Looping through send codecs \n");
+    TEST_LOG("Verify that all codecs are audible \n");
+    for (i = 0; i < nCodecs; i++)
+    {
+        TEST_MUSTPASS(codec->GetCodec(i, cinst));
+        if (!_stricmp("red", cinst.plname) || !_stricmp("cn", cinst.plname)
+            || !_stricmp("telephone-event", cinst.plname))
+        {
+            continue; // Ignore these
+        }
+        TEST_LOG("Testing codec: %s \n", cinst.plname);
+
+        // If no default payload type is defined, we use 127 and set receive
+        // payload type
+        if (-1 == cinst.pltype)
+        {
+            cinst.pltype = 127;
+            TEST_MUSTPASS(base->StopPlayout(0));
+            TEST_MUSTPASS(base->StopReceive(0));
+            TEST_MUSTPASS(codec->SetRecPayloadType(0, cinst));
+            TEST_MUSTPASS(base->StartReceive(0));
+            TEST_MUSTPASS(base->StartPlayout(0));
+        }
+
+        // Set send codec
+        TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+        // Verify audio/no audio
+        SLEEP(800);
+    }
+
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+
+    // Fresh channel
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    TEST_MUSTPASS(base->CreateChannel());
+
+#if defined(WEBRTC_CODEC_ISAC)
+
+    /////////////////////////////////////
+    // SetISACInitTargetRate - wb
+
+    TEST(SetISACInitTargetRate);
+    ANL();
+
+    // set PCMU as sending codec
+    cinst.channels = 1;
+    cinst.pacsize = 160;
+    cinst.plfreq = 8000;
+    strcpy(cinst.plname, "PCMU");
+    cinst.pltype = 0;
+    cinst.rate = 64000;
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+    TEST_MUSTPASS(!codec->SetISACInitTargetRate(0, 10000));
+    MARK(); // should fail since iSAC is not active
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_CODEC_ERROR);
+
+    // set iSAC as sending codec (16kHz)
+    cinst.channels = 1;
+    cinst.plfreq = 16000;
+    strcpy(cinst.plname, "ISAC");
+    cinst.pltype = 103;
+    cinst.rate = -1; // adaptive rate
+    cinst.pacsize = 480; // 30ms
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+    TEST_MUSTPASS(!codec->SetISACInitTargetRate(1, 10000));
+    MARK(); // invalid channel
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_CHANNEL_NOT_VALID);
+
+    TEST_MUSTPASS(!codec->SetISACInitTargetRate(0, 500));
+    MARK(); // invalid target rates (too small)
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+
+    TEST_MUSTPASS(!codec->SetISACInitTargetRate(0, 33000));
+    MARK(); // invalid target rates (too large)
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+
+    TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 10000));
+    MARK(); // life is good now
+    TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 0));
+    MARK(); // 0 is a valid rate
+    TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 32000));
+    MARK(); // try max as well
+    TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 32000, true));
+    MARK();
+    TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 32000, false));
+    MARK();
+
+    cinst.pacsize = 960; // 60ms
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+    TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 32000, false));
+    MARK();
+
+    cinst.rate = 20000;
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+    TEST_MUSTPASS(!codec->SetISACInitTargetRate(0, 32000));
+    MARK(); // only works in adaptive mode
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_AUDIO_CODING_MODULE_ERROR);
+
+    cinst.rate = -1;
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+    TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 32000));
+    MARK(); // back to adaptive mode
+
+    ANL();
+    AOK();
+    ANL();
+
+    /////////////////////////////////////
+    // SetISACInitTargetRate - swb
+
+    TEST(ISACSWB SetISACInitTargetRate);
+    ANL();
+
+    // set iSAC as sending codec
+    cinst.channels = 1;
+    cinst.plfreq = 32000;
+    strcpy(cinst.plname, "ISAC");
+    cinst.pltype = 104;
+    cinst.rate = -1; // default rate
+    cinst.pacsize = 960; // 30ms
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+    TEST_MUSTPASS(!codec->SetISACInitTargetRate(1, 10000));
+    MARK(); // invalid channel
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_CHANNEL_NOT_VALID);
+
+    TEST_MUSTPASS(!codec->SetISACInitTargetRate(0, -1));
+    MARK(); // invalid target rates (too small)
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+    TEST_MUSTPASS(!codec->SetISACInitTargetRate(0, -1));
+    MARK(); // invalid target rates (too small)
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+
+    TEST_MUSTPASS(!codec->SetISACInitTargetRate(0, 500));
+    MARK(); // invalid target rates (too small)
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+
+    TEST_MUSTPASS(!codec->SetISACInitTargetRate(0, 57000));
+    MARK(); // invalid target rates (valid range is [10000, 56000])
+
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+
+    TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 10000));
+    MARK();
+    TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 0));
+    MARK();
+    TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 56000));
+    MARK(); // try max as well
+    TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 56000, true));
+    MARK();
+    TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 56000, false));
+    MARK();
+
+    ANL();
+    AOK();
+    ANL();
+
+    ////////////////////////////////
+    // SetISACMaxRate
+
+    TEST(SetISACMaxRate);
+    ANL();
+
+    // set PCMU as sending codec
+    cinst.channels = 1;
+    cinst.pacsize = 160;
+    cinst.plfreq = 8000;
+    strcpy(cinst.plname, "PCMU");
+    cinst.pltype = 0;
+    cinst.rate = 64000;
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+    TEST_MUSTPASS(!codec->SetISACMaxRate(0, 48000));
+    MARK(); // should fail since iSAC is not active
+    TEST_MUSTPASS(base->LastError() != VE_CODEC_ERROR);
+
+    // set iSAC as sending codec
+    cinst.channels = 1;
+    cinst.plfreq = 16000;
+    strcpy(cinst.plname, "ISAC");
+    cinst.pltype = 103;
+    cinst.rate = -1; // adaptive rate
+    cinst.pacsize = 480; // 30ms
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+    TEST_MUSTPASS(!codec->SetISACMaxRate(1, 48000));
+    MARK(); // invalid channel
+    TEST_MUSTPASS(base->LastError() != VE_CHANNEL_NOT_VALID);
+
+    TEST_MUSTPASS(!codec->SetISACMaxRate(0, 31900));
+    MARK(); // invalid target rates (too small)
+    TEST_MUSTPASS(base->LastError() != VE_INVALID_ARGUMENT);
+
+    TEST_MUSTPASS(!codec->SetISACMaxRate(0, 53500));
+    MARK(); // invalid target rates (too large)
+    TEST_MUSTPASS(base->LastError() != VE_INVALID_ARGUMENT);
+
+    TEST_MUSTPASS(codec->SetISACMaxRate(0, 32000));
+    MARK(); // life is good now
+    TEST_MUSTPASS(codec->SetISACMaxRate(0, 40000));
+    MARK();
+    TEST_MUSTPASS(codec->SetISACMaxRate(0, 48000));
+    MARK();
+    TEST_MUSTPASS(codec->SetISACMaxRate(0, 53400));
+    MARK(); // try max as well (default)
+
+    cinst.pacsize = 960; // 60ms
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+    TEST_MUSTPASS(codec->SetISACMaxRate(0, 48000));
+    MARK();
+
+    cinst.rate = 20000;
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+    TEST_MUSTPASS(codec->SetISACMaxRate(0, 40000));
+    MARK(); // also works in non-adaptive mode
+
+    ANL();
+    AOK();
+    ANL();
+
+    TEST(ISACSWB SetISACMaxRate);
+    ANL();
+    // set iSAC as sending codec
+    cinst.channels = 1;
+    cinst.plfreq = 32000;
+    strcpy(cinst.plname, "ISAC");
+    cinst.pltype = 104;
+    cinst.rate = 45000; // instantaneous mode
+    cinst.pacsize = 960; // 30ms
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+    TEST_MUSTPASS(!codec->SetISACMaxRate(1, 48000));
+    MARK(); // invalid channel
+    TEST_MUSTPASS(base->LastError() != VE_CHANNEL_NOT_VALID);
+
+    TEST_MUSTPASS(!codec->SetISACMaxRate(0, 31900));
+    MARK(); // invalid target rates (too small)
+    TEST_MUSTPASS(base->LastError() != VE_INVALID_ARGUMENT);
+
+    TEST_MUSTPASS(!codec->SetISACMaxRate(0, 107500));
+    MARK(); // invalid target rates (too large)
+    TEST_MUSTPASS(base->LastError() != VE_INVALID_ARGUMENT);
+
+    TEST_MUSTPASS(codec->SetISACMaxRate(0, 32000));
+    MARK(); // life is good now
+    TEST_MUSTPASS(codec->SetISACMaxRate(0, 40000));
+    MARK();
+    TEST_MUSTPASS(codec->SetISACMaxRate(0, 55000));
+    MARK();
+    TEST_MUSTPASS(codec->SetISACMaxRate(0, 80000));
+    MARK();
+    TEST_MUSTPASS(codec->SetISACMaxRate(0, 107000));
+    MARK(); // try max as well (default)
+
+
+    cinst.rate = -1; // adaptive mode
+    cinst.pacsize = 960; // 30ms
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+    TEST_MUSTPASS(!codec->SetISACMaxRate(1, 48000));
+    MARK(); // invalid channel
+    TEST_MUSTPASS(base->LastError() != VE_CHANNEL_NOT_VALID);
+
+    TEST_MUSTPASS(!codec->SetISACMaxRate(0, 31900));
+    MARK(); // invalid target rates (too small)
+    TEST_MUSTPASS(base->LastError() != VE_INVALID_ARGUMENT);
+
+    TEST_MUSTPASS(!codec->SetISACMaxRate(0, 107500));
+    MARK(); // invalid target rates (too large)
+    TEST_MUSTPASS(base->LastError() != VE_INVALID_ARGUMENT);
+
+    TEST_MUSTPASS(codec->SetISACMaxRate(0, 32000));
+    MARK(); // life is good now
+    TEST_MUSTPASS(codec->SetISACMaxRate(0, 40000));
+    MARK();
+    TEST_MUSTPASS(codec->SetISACMaxRate(0, 55000));
+    MARK();
+    TEST_MUSTPASS(codec->SetISACMaxRate(0, 80000));
+    MARK();
+    TEST_MUSTPASS(codec->SetISACMaxRate(0, 107000));
+    MARK(); // try max as well (default)
+
+    ANL();
+    AOK();
+    ANL();
+
+    ////////////////////////////////
+    // SetISACMaxPayloadSize
+
+    TEST(SetISACMaxPayloadSize);
+    ANL();
+
+    // set PCMU as sending codec
+    cinst.channels = 1;
+    cinst.pacsize = 160;
+    cinst.plfreq = 8000;
+    strcpy(cinst.plname, "PCMU");
+    cinst.pltype = 0;
+    cinst.rate = 64000;
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+    TEST_MUSTPASS(!codec->SetISACMaxPayloadSize(0, 120));
+    MARK(); // should fail since iSAC is not active
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_CODEC_ERROR);
+
+    // set iSAC as sending codec
+    cinst.channels = 1;
+    cinst.plfreq = 16000;
+    strcpy(cinst.plname, "ISAC");
+    cinst.pltype = 103;
+    cinst.rate = -1; // adaptive rate
+    cinst.pacsize = 480; // 30ms
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+    TEST_MUSTPASS(!codec->SetISACMaxPayloadSize(1, 120));
+    MARK(); // invalid channel
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_CHANNEL_NOT_VALID);
+
+    TEST_MUSTPASS(!codec->SetISACMaxPayloadSize(0, 100));
+    MARK(); // invalid size (too small)
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+
+    TEST_MUSTPASS(!codec->SetISACMaxPayloadSize(0, 410));
+    MARK(); // invalid size (too large)
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+
+    TEST_MUSTPASS(codec->SetISACMaxPayloadSize(0, 200));
+    MARK(); // life is good now
+    TEST_MUSTPASS(codec->SetISACMaxPayloadSize(0, 120));
+    MARK();
+    TEST_MUSTPASS(codec->SetISACMaxPayloadSize(0, 400));
+    MARK();
+
+    ANL();
+    AOK();
+    ANL();
+
+    TEST(ISACSWB SetISACMaxPayloadSize);
+    ANL();
+    // set iSAC as sending codec
+    cinst.channels = 1;
+    cinst.plfreq = 32000;
+    strcpy(cinst.plname, "ISAC");
+    cinst.pltype = 104;
+    cinst.rate = 45000; // default rate
+    cinst.pacsize = 960; // 30ms
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+    TEST_MUSTPASS(!codec->SetISACMaxPayloadSize(1, 100));
+    MARK(); // invalid channel
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_CHANNEL_NOT_VALID);
+
+    TEST_MUSTPASS(!codec->SetISACMaxPayloadSize(0, 100));
+    MARK(); // invalid size (too small)
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+
+    TEST_MUSTPASS(!codec->SetISACMaxPayloadSize(0, 610));
+    MARK(); // invalid size (too large)
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+
+    TEST_MUSTPASS(codec->SetISACMaxPayloadSize(0, 200));
+    MARK(); // life is good now
+    TEST_MUSTPASS(codec->SetISACMaxPayloadSize(0, 120));
+    MARK();
+    TEST_MUSTPASS(codec->SetISACMaxPayloadSize(0, 600));
+    MARK();
+
+    ANL();
+    AOK();
+    ANL();
+
+    // set iSAC as sending codec
+    // set iSAC-wb as sending codec
+#ifdef WEBRTC_EXTERNAL_TRANSPORT
+    TEST_MUSTPASS(netw->RegisterExternalTransport(0, *ptrTransport));
+#else
+    TEST_MUSTPASS(base->SetSendDestination(0, 8001, "127.0.0.1"));
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 8001));
+#endif
+    TEST_MUSTPASS(base->StartPlayout(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(
+        0, GetFilename("audio_long16.pcm"), true , true));
+    cinst.channels = 1;
+    TEST_LOG("Testing codec: Switch between iSAC-wb and iSAC-swb \n");
+    TEST_LOG("Testing codec: iSAC wideband \n");
+    strcpy(cinst.plname, "ISAC");
+    cinst.pltype = 103;
+    cinst.rate = -1; // default rate
+    cinst.pacsize = 480; // 30ms
+    cinst.plfreq = 16000;
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+    SLEEP(2000);
+    TEST_LOG("             : iSAC superwideband \n");
+    cinst.pltype = 104;
+    cinst.rate = -1; // default rate
+    cinst.pacsize = 960; // 30ms
+    cinst.plfreq = 32000;
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+    SLEEP(2000);
+    TEST_LOG("             : iSAC wideband \n");
+    strcpy(cinst.plname, "ISAC");
+    cinst.pltype = 103;
+    cinst.rate = -1; // default rate
+    cinst.pacsize = 480; // 30ms
+    cinst.plfreq = 16000;
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+    SLEEP(2000);
+    TEST_LOG("             : iSAC superwideband \n");
+    cinst.pltype = 104;
+    cinst.rate = -1; // default rate
+    cinst.pacsize = 960; // 30ms
+    cinst.plfreq = 32000;
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+    SLEEP(2000);
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(base->StopSend(0));
+#else
+    TEST_LOG("Skipping extended iSAC API tests - "
+        "WEBRTC_CODEC_ISAC not defined\n");
+#endif // #if defined(WEBRTC_CODEC_ISAC)
+#ifdef WEBRTC_EXTERNAL_TRANSPORT
+    TEST_MUSTPASS(netw->DeRegisterExternalTransport(0));
+    delete ptrTransport;
+#endif
+
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    TEST_MUSTPASS(base->Terminate());
+
+    return 0;
+}
+
+// ----------------------------------------------------------------------------
+//  VoEExtendedTest::TestDtmf
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestDtmf()
+{
+    PrepareTest("Dtmf");
+
+    VoEBase* base = _mgr.BasePtr();
+    VoEDtmf* dtmf = _mgr.DtmfPtr();
+    VoECodec* codec = _mgr.CodecPtr();
+    VoEVolumeControl* volume = _mgr.VolumeControlPtr();
+
+    //#ifdef _USE_EXTENDED_TRACE_
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(GetFilename("VoEDtmf_trace.txt")));
+    TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStateInfo |
+                                              kTraceStateInfo |
+                                              kTraceWarning |
+                                              kTraceError |
+                                              kTraceCritical |
+                                              kTraceApiCall |
+                                              kTraceMemory |
+                                              kTraceInfo));
+    //#endif
+
+    TEST_MUSTPASS(base->Init());
+    TEST_MUSTPASS(base->CreateChannel());
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 12345));
+    TEST_MUSTPASS(base->SetSendDestination(0, 12345, "127.0.0.1"));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    TEST_MUSTPASS(base->StartPlayout(0));
+
+    ///////////////////////////
+    // Actual test starts here
+
+    // SetDtmfFeedbackStatus
+    TEST(SetDtmfFeedbackStatus & GetDtmfFeedbackStatus);
+    ANL();
+    bool dtmfFeedback = false, dtmfDirectFeedback = true;
+    TEST_MUSTPASS(dtmf->GetDtmfFeedbackStatus(dtmfFeedback,
+                                              dtmfDirectFeedback));
+    TEST_MUSTPASS(!dtmfFeedback);
+    TEST_MUSTPASS(dtmfDirectFeedback);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0));
+    MARK();
+    SLEEP(500);
+
+    TEST_MUSTPASS(dtmf->SetDtmfFeedbackStatus(false, false));
+    TEST_MUSTPASS(dtmf->GetDtmfFeedbackStatus(dtmfFeedback,
+                                              dtmfDirectFeedback));
+    TEST_MUSTPASS(dtmfFeedback);
+    TEST_MUSTPASS(dtmfDirectFeedback);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0));
+    MARK();
+    SLEEP(500);
+
+    TEST_MUSTPASS(dtmf->SetDtmfFeedbackStatus(false, true));
+    TEST_MUSTPASS(dtmf->GetDtmfFeedbackStatus(dtmfFeedback,
+                                              dtmfDirectFeedback));
+    TEST_MUSTPASS(dtmfFeedback);
+    TEST_MUSTPASS(!dtmfDirectFeedback);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0));
+    MARK();
+    SLEEP(500);
+
+    TEST_MUSTPASS(dtmf->SetDtmfFeedbackStatus(true, false));
+    TEST_MUSTPASS(dtmf->GetDtmfFeedbackStatus(dtmfFeedback,
+                                              dtmfDirectFeedback));
+    TEST_MUSTPASS(!dtmfFeedback);
+    TEST_MUSTPASS(dtmfDirectFeedback);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0));
+    MARK();
+    SLEEP(500);
+
+    TEST_MUSTPASS(dtmf->SetDtmfFeedbackStatus(true, true));
+    TEST_MUSTPASS(dtmf->GetDtmfFeedbackStatus(dtmfFeedback,
+                                              dtmfDirectFeedback));
+    TEST_MUSTPASS(!dtmfFeedback);
+    TEST_MUSTPASS(!dtmfDirectFeedback);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0));
+    MARK();
+    SLEEP(500);
+    TEST_MUSTPASS(dtmf->SetDtmfFeedbackStatus(false, false));
+
+    AOK();
+    ANL();
+
+    // SendDtmf
+    TEST(SendDtmf);
+    ANL();
+
+    // Fail tests
+    // Event
+    // the eventcode is changed to unsigned char, so -1 will be interpreted as
+    // 255, 256->0
+    TEST_MUSTPASS(!dtmf->SendTelephoneEvent(0, -1, false, 160, 10));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    TEST_MUSTPASS(!dtmf->SendTelephoneEvent(0, 16, false, 160, 10));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    // Length
+    TEST_MUSTPASS(!dtmf->SendTelephoneEvent(0, 0, true, 99, 10));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    TEST_MUSTPASS(!dtmf->SendTelephoneEvent(0, 0, true, 60001, 10));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    TEST_MUSTPASS(!dtmf->SendTelephoneEvent(0, 20, true, -1, 10));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    // Volume
+    TEST_MUSTPASS(!dtmf->SendTelephoneEvent(0, 0, true, 160, -1));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    TEST_MUSTPASS(!dtmf->SendTelephoneEvent(0, 0, true, 160, 37));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    // Without sending
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(!dtmf->SendTelephoneEvent(0, 0, true));
+    MARK();
+    TEST_MUSTPASS(VE_NOT_SENDING != base->LastError());
+    TEST_MUSTPASS(base->StartSend(0));
+
+    // Testing Dtmf out-of-band: event, length and volume
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, true));
+    MARK();
+    SLEEP(500);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 16, true));
+    MARK();
+    SLEEP(500); // Flash, not audible
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, true, 100, 10));
+    MARK();
+    SLEEP(500);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, true, 400, 10));
+    MARK();
+    SLEEP(500);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, true, 160, 0));
+    MARK();
+    SLEEP(500);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, true, 160, 36));
+    MARK();
+    SLEEP(500);
+
+    // Testing Dtmf inband: event, length and volume
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, false));
+    MARK();
+    SLEEP(500);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 15, false));
+    MARK();
+    SLEEP(500);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, false, 100, 10));
+    MARK();
+    SLEEP(500);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, false, 400, 10));
+    MARK();
+    SLEEP(500);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, false, 160, 0));
+    MARK();
+    SLEEP(500);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, false, 160, 36));
+    MARK();
+    SLEEP(500);
+
+    // Testing other events out-of-band: event and length
+    // These are not audible
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 17, true, 100, 10));
+    MARK();
+    SLEEP(200);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 32, true, 100, 10));
+    MARK();
+    SLEEP(200);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 78, true, 100, 10));
+    MARK();
+    SLEEP(200);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 255, true, 100, 10));
+    MARK();
+    SLEEP(200);
+    // the minimum length is 100 for the telephoneevent
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 32, true, 100, 10));
+    MARK();
+    SLEEP(200);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 32, true, 1000, 10));
+    MARK();
+    SLEEP(1200);
+
+    AOK();
+    ANL();
+
+    // PlayDtmfTone
+    TEST(PlayDtmfTone);
+    ANL();
+    TEST_MUSTPASS(!dtmf->PlayDtmfTone(-1, 200, 10));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    TEST_MUSTPASS(!dtmf->PlayDtmfTone(16, 200, 10));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    TEST_MUSTPASS(!dtmf->PlayDtmfTone(0, 9, 10));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    TEST_MUSTPASS(!dtmf->PlayDtmfTone(0, 200, -1));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    TEST_MUSTPASS(!dtmf->PlayDtmfTone(0, 200, 37));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+
+    TEST_MUSTPASS(dtmf->PlayDtmfTone(0));
+    MARK();
+    SLEEP(500);
+    // the minimum length fo the DtmfTone is 100
+    TEST_MUSTPASS(dtmf->PlayDtmfTone(0, 100, 10));
+    MARK();
+    SLEEP(500);
+    TEST_MUSTPASS(dtmf->PlayDtmfTone(0, 2000, 10));
+    MARK();
+    SLEEP(2300);
+    TEST_MUSTPASS(dtmf->PlayDtmfTone(0, 200, 0));
+    MARK();
+    SLEEP(500);
+    TEST_MUSTPASS(dtmf->PlayDtmfTone(0, 200, 36));
+    MARK();
+    SLEEP(500);
+
+    AOK();
+    ANL();
+
+    // SetTelephoneEventDetection
+    TEST(SetTelephoneEventDetection);
+    ANL();
+    AOK();
+    ANL();
+
+    // Testing sending Dtmf under VAD/CN
+    TEST(SendDtmf - with VAD enabled);
+    ANL();
+    // Mute mic
+    TEST_MUSTPASS(volume->SetInputMute(0, true));
+    MARK();
+    // Enable VAD
+    TEST_MUSTPASS(codec->SetVADStatus(0, true));
+    MARK();
+    // Send Dtmf
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, true, 400));
+    MARK();
+    SLEEP(1000);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 9, true, 400));
+    MARK();
+    SLEEP(1000);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, true, 400));
+    MARK();
+    SLEEP(1000);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 9, true, 400));
+    MARK();
+    SLEEP(1000);
+    // Switch codec
+    CodecInst ci;
+#if (!defined(MAC_IPHONE) && !defined(ANDROID))
+    ci.channels = 1;
+    ci.pacsize = 480;
+    ci.plfreq = 16000;
+    strcpy(ci.plname, "ISAC");
+    ci.pltype = 103;
+    ci.rate = -1;
+#else
+    ci.pltype = 119;
+    strcpy(ci.plname, "isaclc");
+    ci.plfreq = 16000;
+    ci.pacsize = 320;
+    ci.channels = 1;
+    ci.rate = 40000;
+#endif
+    TEST_MUSTPASS(codec->SetSendCodec(0, ci));
+    MARK();
+    // Send Dtmf
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, true, 400));
+    MARK();
+    SLEEP(1000);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 9, true, 400));
+    MARK();
+    SLEEP(1000);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, true, 400));
+    MARK();
+    SLEEP(1000);
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 9, true, 400));
+    MARK();
+    SLEEP(1000);
+    SLEEP(4000);
+    // Disable VAD
+    TEST_MUSTPASS(codec->SetVADStatus(0, false));
+    MARK();
+    // Unmute
+    TEST_MUSTPASS(volume->SetInputMute(0, false));
+    MARK();
+
+    AOK();
+    ANL();
+
+    // SetSendTelephoneEventPayloadType
+    TEST(SetSendTelephoneEventPayloadType);
+    ANL();
+    TEST_MUSTPASS(!dtmf->SetSendTelephoneEventPayloadType(0, 128));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+
+    TEST_MUSTPASS(dtmf->SetSendTelephoneEventPayloadType(0, 96));
+    MARK();
+    TEST_MUSTPASS(dtmf->SetSendTelephoneEventPayloadType(0, 127));
+    MARK();
+    TEST_MUSTPASS(dtmf->SetSendTelephoneEventPayloadType(0, 106));
+    MARK(); // restore default
+
+    AOK();
+    ANL();
+
+#ifdef WEBRTC_DTMF_DETECTION
+    TEST(RegisterTelephoneEventDetection - several channels); ANL();
+
+    ci.channels = 1;
+    ci.pacsize = 160;
+    ci.plfreq = 8000;
+    ci.pltype = 0;
+    ci.rate = 64000;
+    strcpy(ci.plname, "PCMU");
+    TEST_MUSTPASS(codec->SetSendCodec(0, ci));
+
+    int ch2 = base->CreateChannel();
+    TEST_MUSTPASS(base->SetSendDestination(ch2, 8002, "127.0.0.1"));
+    TEST_MUSTPASS(base->SetLocalReceiver(ch2, 8002));
+    TEST_MUSTPASS(base->StartReceive(ch2));
+    TEST_MUSTPASS(codec->SetSendCodec(ch2, ci));
+    TEST_MUSTPASS(base->StartPlayout(ch2));
+    TEST_MUSTPASS(base->StartSend(ch2));
+    MARK();
+
+    DtmfCallback *d = new DtmfCallback();
+    TEST_MUSTPASS(dtmf->SetDtmfFeedbackStatus(false));
+
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    TEST_MUSTPASS(base->StartPlayout(0));
+
+    // In-band
+    TEST_MUSTPASS(dtmf->RegisterTelephoneEventDetection(0, kInBand, *d));
+    TEST_MUSTPASS(dtmf->RegisterTelephoneEventDetection(ch2, kInBand, *d));
+    TEST_LOG("\nSending in-band telephone events:");
+    for(int i = 0; i < 16; i++)
+    {
+        TEST_LOG("\n  %d ", i); fflush(NULL);
+        TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, i, false, 160, 10));
+        TEST_MUSTPASS(dtmf->SendTelephoneEvent(ch2, i, false, 160, 10));
+        SLEEP(500);
+    }
+    TEST_LOG("\nDetected %d events \n", d->counter);
+    TEST_MUSTPASS(d->counter != 32);
+    TEST_MUSTPASS(dtmf->DeRegisterTelephoneEventDetection(0));
+    TEST_MUSTPASS(dtmf->DeRegisterTelephoneEventDetection(ch2));
+
+    // Out-of-band
+    d->counter = 0;
+    TEST_MUSTPASS(dtmf->RegisterTelephoneEventDetection(0, kOutOfBand, *d));
+    TEST_MUSTPASS(dtmf->RegisterTelephoneEventDetection(ch2, kOutOfBand, *d));
+    TEST_LOG("\nSending out-band telephone events:");
+    for(int i = 0; i < 16; i++)
+    {
+        TEST_LOG("\n  %d ", i); fflush(NULL);
+        TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, i, true, 160, 10));
+        TEST_MUSTPASS(dtmf->SendTelephoneEvent(ch2, i, true, 160, 10));
+        SLEEP(500);
+    }
+    TEST_LOG("\nDetected %d events \n", d->counter);
+    TEST_MUSTPASS(d->counter != 32);
+    TEST_MUSTPASS(dtmf->DeRegisterTelephoneEventDetection(0));
+    TEST_MUSTPASS(dtmf->DeRegisterTelephoneEventDetection(ch2));
+    delete d;
+
+    AOK(); ANL();
+#endif
+
+    TEST_MUSTPASS(dtmf->SetDtmfFeedbackStatus(true, false));
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    TEST_MUSTPASS(base->Terminate());
+
+    return 0;
+}
+
+// ----------------------------------------------------------------------------
+//  VoEExtendedTest::TestEncryption
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestEncryption()
+{
+    PrepareTest("Encryption");
+
+    VoEBase* base = _mgr.BasePtr();
+    VoEFile* file = _mgr.FilePtr();
+    VoEEncryption* encrypt = _mgr.EncryptionPtr();
+
+#ifdef _USE_EXTENDED_TRACE_
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(
+        GetFilename("VoEEncryption_trace.txt")));
+    TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStateInfo |
+                                              kTraceStateInfo |
+                                              kTraceWarning |
+                                              kTraceError |
+                                              kTraceCritical |
+                                              kTraceApiCall |
+                                              kTraceMemory |
+                                              kTraceInfo));
+#endif
+    TEST_MUSTPASS(base->Init());
+    TEST_MUSTPASS(base->CreateChannel());
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 12345));
+    TEST_MUSTPASS(base->SetSendDestination(0, 12345, "127.0.0.1"));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    TEST_MUSTPASS(base->StartPlayout(0));
+    TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0, micFile, true ,true));
+
+    ///////////////////////////
+    // Actual test starts here
+
+    unsigned char key1[30] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5,
+            6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
+
+#ifdef WEBRTC_SRTP
+    unsigned char key2[30]; // Different than key1 in first position
+    memcpy(key2, key1, 30);
+    key2[0] = 99;
+    unsigned char key3[30]; // Different than key1 in last position
+    memcpy(key3, key1, 30);
+    key3[29] = 99;
+    unsigned char key4[29]; // Same as key1 but shorter
+    memcpy(key4, key1, 29);
+
+    TEST(SRTP - Fail tests); ANL();
+
+    // Send
+    // Incorrect parameters when not all protection is enabled
+    TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherNull, 30, kAuthHmacSha1,
+                                           20, 4, kNoProtection, key1));
+    TEST_MUSTPASS(VE_SRTP_ERROR != base->LastError());
+    MARK();
+    TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherNull, 30, kAuthHmacSha1,
+                                           20, 4, kEncryption key1));
+    TEST_MUSTPASS(VE_SRTP_ERROR != base->LastError());
+    MARK();
+    TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherNull, 30, kAuthHmacSha1,
+                                           20, 4, kAuthentication, key1));
+    TEST_MUSTPASS(VE_SRTP_ERROR != base->LastError());
+    MARK();
+    // Incorrect cipher key length
+    TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 15,
+                                           kAuthHmacSha1, 20, 4,
+                                           kEncryptionAndAuthentication, key1));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+    TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 257,
+                                           kAuthHmacSha1, 20, 4,
+                                           kEncryptionAndAuthentication, key1));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+    TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherNull, 15, kAuthHmacSha1,
+                                           20, 4, kEncryptionAndAuthentication,
+                                           key1));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+    TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherNull, 257, kAuthHmacSha1,
+                                           20, 4, kEncryptionAndAuthentication,
+                                           key1));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+    // Incorrect auth key length
+    TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+                                           kAuthHmacSha1, 21, 4,
+                                           kEncryptionAndAuthentication, key1));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+    TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+                                           kAuthNull, 257, 4,
+                                           kEncryptionAndAuthentication, key1));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+    // Incorrect auth tag length
+    TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+                                           kAuthHmacSha1, 20, 21,
+                                           kEncryptionAndAuthentication, key1));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+    TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+                                           kAuthNull, 20, 13,
+                                           kEncryptionAndAuthentication, key1));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+
+    // key NULL pointer
+    TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+                                           kAuthHmacSha1, 20, 4,
+                                           kEncryptionAndAuthentication, NULL));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+
+    // Same for receive
+    // Incorrect parameters when not all protection is enabled
+    TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherNull, 30, kAuthHmacSha1,
+                                              20, 4, kNoProtection, key1));
+    TEST_MUSTPASS(VE_SRTP_ERROR != base->LastError());
+    MARK();
+    TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherNull, 30, kAuthHmacSha1,
+                                              20, 4, kEncryption key1));
+    TEST_MUSTPASS(VE_SRTP_ERROR != base->LastError());
+    MARK();
+    TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherNull, 30, kAuthHmacSha1,
+                                              20, 4, kAuthentication, key1));
+    TEST_MUSTPASS(VE_SRTP_ERROR != base->LastError());
+    MARK();
+    // Incorrect cipher key length
+    TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 15,
+                                              kAuthHmacSha1, 20, 4,
+                                              kEncryptionAndAuthentication,
+                                              key1));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+    TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 257,
+                                              kAuthHmacSha1, 20, 4,
+                                              kEncryptionAndAuthentication,
+                                              key1));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+    TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherNull, 15,
+                                              kAuthHmacSha1, 20, 4,
+                                              kEncryptionAndAuthentication,
+                                              key1));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+    TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherNull, 257,
+                                              kAuthHmacSha1, 20, 4,
+                                              kEncryptionAndAuthentication,
+                                              key1));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+    // Incorrect auth key length
+    TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode,
+                                              30, kAuthHmacSha1, 21, 4,
+                                              kEncryptionAndAuthentication,
+                                              key1));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+    // it crashed the application
+    TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+                                              kAuthNull, 257, 4,
+                                              kEncryptionAndAuthentication,
+                                              key1));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+    // Incorrect auth tag length
+    TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+                                              kAuthHmacSha1, 20, 21,
+                                              kEncryptionAndAuthentication,
+                                              key1));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+    // it crashed the application
+    TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+                                              kAuthNull, 20, 13,
+                                              kEncryptionAndAuthentication,
+                                              key1));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+    // key NULL pointer
+    TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+                                              kAuthHmacSha1, 20, 4,
+                                              kEncryptionAndAuthentication,
+                                              NULL));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+    ANL();
+
+    TEST(SRTP - Should hear audio at all time); ANL();
+
+    // Authentication only
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherNull, 0, kAuthHmacSha1, 20,
+                                          4, kAuthentication, key1));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherNull, 0, kAuthHmacSha1,
+                                             20, 4, kAuthentication, key1));
+    MARK(); SLEEP(2000);
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    MARK(); SLEEP(2000);
+    ANL();
+
+    // No protection
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherNull, 0, kAuthNull, 0, 0,
+                                          kNoProtection, key1));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherNull, 0, kAuthNull, 0, 0,
+                                             kNoProtection, key1));
+    MARK(); SLEEP(2000);
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    MARK(); SLEEP(2000);
+
+    // Encryption only
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+                                          kAuthNull, 0, 0, kEncryption key1));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+                                             kAuthNull, 0, 0,
+                                             kEncryption key1));
+    MARK(); SLEEP(2000);
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    MARK(); SLEEP(2000);
+
+    // Authentication only
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherNull, 0, kAuthHmacSha1, 20,
+                                          4, kAuthentication, key1));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherNull, 0, kAuthHmacSha1,
+                                             20, 4, kAuthentication, key1));
+    MARK(); SLEEP(2000);
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    MARK(); SLEEP(2000);
+    ANL();
+
+    // Switching between keys
+    TEST(SRTP - Different keys - should hear audio at all time); ANL();
+
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+                                          kAuthHmacSha1, 20, 4,
+                                          kEncryptionAndAuthentication, key2));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+                                             kAuthHmacSha1, 20, 4,
+                                             kEncryptionAndAuthentication,
+                                             key2));
+    MARK(); SLEEP(2000);
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    MARK(); SLEEP(2000);
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+                                          kAuthHmacSha1, 20, 4,
+                                          kEncryptionAndAuthentication, key1));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+                                             kAuthHmacSha1, 20, 4,
+                                             kEncryptionAndAuthentication,
+                                             key1));
+    MARK(); SLEEP(2000);
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+                                          kAuthHmacSha1, 20, 4,
+                                          kEncryptionAndAuthentication, key1));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+                                             kAuthHmacSha1, 20, 4,
+                                             kEncryptionAndAuthentication,
+                                             key1));
+    MARK(); SLEEP(2000);
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+                                          kAuthHmacSha1, 20, 4,
+                                          kEncryptionAndAuthentication, key2));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+                                             kAuthHmacSha1, 20, 4,
+                                             kEncryptionAndAuthentication,
+                                             key2));
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 8000));
+    TEST_MUSTPASS(base->SetSendDestination(0, 8000, "127.0.0.1"));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartPlayout(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0, micFile, true ,true));
+    MARK(); SLEEP(2000);
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    MARK(); SLEEP(2000);
+    ANL();
+
+    // Testing different keys that should be silent
+    TEST(SRTP - Should be silent or garbage); ANL();
+
+    // key1 and key2
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+                                          kAuthHmacSha1, 20, 4,
+                                          kEncryptionAndAuthentication, key1));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+                                             kAuthHmacSha1, 20, 4,
+                                             kEncryptionAndAuthentication,
+                                             key2));
+    MARK(); SLEEP(2000);
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+                                          kAuthHmacSha1, 20, 4,
+                                          kEncryptionAndAuthentication, key2));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+                                             kAuthHmacSha1, 20, 4,
+                                             kEncryptionAndAuthentication,
+                                             key1));
+    MARK(); SLEEP(2000);
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+                                          kAuthNull, 0, 0, kEncryption key1));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+                                             kAuthNull, 0, 0,
+                                             kEncryption key2));
+    MARK(); SLEEP(2000);
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherNull, 0, kAuthHmacSha1,
+                                          20, 4, kAuthentication, key1));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherNull, 0, kAuthHmacSha1,
+                                             20, 4, kAuthentication, key2));
+    MARK(); SLEEP(2000);
+
+    // key1 and key3
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+                                          kAuthHmacSha1, 20, 4,
+                                          kEncryptionAndAuthentication, key1));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+                                             kAuthHmacSha1, 20, 4,
+                                             kEncryptionAndAuthentication,
+                                             key3));
+    MARK(); SLEEP(2000);
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+                                          kAuthHmacSha1, 20, 4,
+                                          kEncryptionAndAuthentication, key3));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+                                             kAuthHmacSha1, 20, 4,
+                                             kEncryptionAndAuthentication,
+                                             key1));
+    MARK(); SLEEP(2000);
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+                                          kAuthNull, 0, 0, kEncryption key1));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+                                             kAuthNull, 0, 0,
+                                             kEncryption key3));
+    MARK(); SLEEP(2000);
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherNull, 0, kAuthHmacSha1, 20,
+                                          4, kAuthentication, key1));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherNull, 0, kAuthHmacSha1,
+                                             20, 4, kAuthentication, key3));
+    MARK(); SLEEP(2000);
+
+    // key1 and key4
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+                                          kAuthHmacSha1, 20, 4,
+                                          kEncryptionAndAuthentication, key1));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+                                             kAuthHmacSha1, 20, 4,
+                                             kEncryptionAndAuthentication,
+                                             key4));
+    MARK(); SLEEP(2000);
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+                                          kAuthHmacSha1, 20, 4,
+                                          kEncryptionAndAuthentication, key4));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+                                             kAuthHmacSha1, 20, 4,
+                                             kEncryptionAndAuthentication,
+                                             key1));
+    MARK(); SLEEP(2000);
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+                                          kAuthNull, 0, 0, kEncryption key1));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+                                             kAuthNull, 0, 0,
+                                             kEncryption key4));
+    MARK(); SLEEP(2000);
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherNull, 0, kAuthHmacSha1, 20,
+                                          4, kAuthentication, key1));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherNull, 0, kAuthHmacSha1,
+                                             20, 4, kAuthentication, key4));
+    MARK(); SLEEP(2000);
+    ANL();
+
+    // Back to normal
+    TEST(SRTP - Back to normal - should hear audio); ANL();
+
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    MARK(); SLEEP(2000);
+    ANL();
+
+    // SRTCP tests
+    TEST(SRTCP - Ignore voice or not); ANL();
+    VoERTP_RTCP* rtp_rtcp = _mgr.RTP_RTCPPtr();
+    char tmpStr[32];
+
+    // First test that RTCP packet is received and OK without encryption
+
+    TEST_MUSTPASS(rtp_rtcp->SetRTCP_CNAME(0, "Henrik1"));
+    MARK(); SLEEP(8000);
+    TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCP_CNAME(0, tmpStr));
+    TEST_MUSTPASS(_stricmp("Henrik1", tmpStr));
+
+    // Enable SRTP and SRTCP send and receive
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+                                          kAuthHmacSha1,
+            20, 4, kEncryptionAndAuthentication, key1, true));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+                                             kAuthHmacSha1,
+            20, 4, kEncryptionAndAuthentication, key1, true));
+    TEST_MUSTPASS(rtp_rtcp->SetRTCP_CNAME(0, "Henrik2"));
+    MARK(); SLEEP(8000);
+    TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCP_CNAME(0, tmpStr));
+    TEST_MUSTPASS(_stricmp("Henrik2", tmpStr));
+
+    // Disable SRTP and SRTCP send
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(rtp_rtcp->SetRTCP_CNAME(0, "Henrik3"));
+    MARK(); SLEEP(8000);
+    TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCP_CNAME(0, tmpStr));
+    TEST_MUSTPASS(_stricmp("Henrik2", tmpStr)); // Should not have changed
+
+    // Enable SRTP send, but disable SRTCP send
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+                                          kAuthHmacSha1,
+            20, 4, kEncryptionAndAuthentication, key1));
+    TEST_MUSTPASS(rtp_rtcp->SetRTCP_CNAME(0, "Henrik4"));
+    MARK(); SLEEP(8000);
+    TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCP_CNAME(0, tmpStr));
+    TEST_MUSTPASS(_stricmp("Henrik2", tmpStr)); // Should not have changed
+
+    // Enable SRTP and SRTCP send, disable SRTP and SRTCP receive
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+                                          kAuthHmacSha1,
+            20, 4, kEncryptionAndAuthentication, key1, true));
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    TEST_MUSTPASS(rtp_rtcp->SetRTCP_CNAME(0, "Henrik5"));
+    MARK(); SLEEP(8000);
+    TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCP_CNAME(0, tmpStr));
+    TEST_MUSTPASS(_stricmp("Henrik2", tmpStr)); // Should not have changed
+
+    // Enable SRTP receive, but disable SRTCP receive
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+                                             kAuthHmacSha1,
+            20, 4, kEncryptionAndAuthentication, key1));
+    TEST_MUSTPASS(rtp_rtcp->SetRTCP_CNAME(0, "Henrik6"));
+    MARK(); SLEEP(8000);
+    TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCP_CNAME(0, tmpStr));
+    TEST_MUSTPASS(_stricmp("Henrik2", tmpStr)); // Should not have changed
+
+    // Disable all
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    TEST_MUSTPASS(rtp_rtcp->SetRTCP_CNAME(0, "Henrik7"));
+    MARK(); SLEEP(8000);
+    TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCP_CNAME(0, tmpStr));
+    TEST_MUSTPASS(_stricmp("Henrik7", tmpStr));
+    ANL();
+
+#else
+    TEST(SRTP disabled - Fail tests);
+    ANL();
+
+    TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherNull, 30, kAuthHmacSha1,
+            20, 4, kEncryptionAndAuthentication, key1));
+    TEST_MUSTPASS(VE_FUNC_NOT_SUPPORTED != base->LastError());
+    TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherNull, 30, kAuthHmacSha1,
+            20, 4, kEncryptionAndAuthentication, key1));
+    TEST_MUSTPASS(VE_FUNC_NOT_SUPPORTED != base->LastError());
+    TEST_MUSTPASS(!encrypt->DisableSRTPSend(0));
+    TEST_MUSTPASS(VE_FUNC_NOT_SUPPORTED != base->LastError());
+    TEST_MUSTPASS(!encrypt->DisableSRTPReceive(0));
+    TEST_MUSTPASS(VE_FUNC_NOT_SUPPORTED != base->LastError());
+    ANL();
+#endif
+    AOK();
+
+    TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0));
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    TEST_MUSTPASS(base->Terminate());
+
+    return 0;
+}
+
+// ----------------------------------------------------------------------------
+//  VoEExtendedTest::TestExternalMedia
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestExternalMedia()
+{
+    PrepareTest("VoEExternalMedia");
+
+    VoEBase* base = _mgr.BasePtr();
+    VoEExternalMedia* xmedia = _mgr.ExternalMediaPtr();
+
+    // check if this interface is supported
+    if (!xmedia)
+    {
+        TEST_LOG("VoEExternalMedia is not supported!");
+        return -1;
+    }
+
+#ifdef _USE_EXTENDED_TRACE_
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(
+            GetFilename("VoEExternalMedia_trace.txt")));
+    TEST_MUSTPASS(VoiceEngine::SetTraceFilter(
+            kTraceStateInfo | kTraceStateInfo | kTraceWarning |
+            kTraceError | kTraceCritical | kTraceApiCall |
+            kTraceMemory | kTraceInfo));
+#endif
+    TEST_MUSTPASS(base->Init());
+    TEST_MUSTPASS(base->CreateChannel());
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 12345));
+    TEST_MUSTPASS(base->SetSendDestination(0, 12345, "127.0.0.1"));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartPlayout(0));
+    TEST_MUSTPASS(base->StartSend(0));
+
+    int getLen = 0;
+    WebRtc_Word16 vector[32000];
+    memset(vector, 0, 32000 * sizeof(short));
+
+#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+
+    // ExternalPlayoutGetData
+    TEST(ExternalPlayoutGetData);
+    ANL();
+
+    TEST_MUSTPASS(!xmedia->SetExternalPlayoutStatus(true));
+    TEST_MUSTPASS(VE_ALREADY_SENDING != base->LastError());
+    TEST_MUSTPASS(!xmedia->ExternalPlayoutGetData(vector, 16000, 100, getLen));
+    TEST_MUSTPASS(VE_INVALID_OPERATION != base->LastError());
+
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(xmedia->SetExternalPlayoutStatus(true));
+    TEST_MUSTPASS(base->StartPlayout(0));
+
+    TEST_MUSTPASS(xmedia->ExternalPlayoutGetData(vector, 48000, 0, getLen));
+    TEST_MUSTPASS(480 != getLen);
+    SLEEP(10);
+    TEST_MUSTPASS(xmedia->ExternalPlayoutGetData(vector, 16000, 3000, getLen));
+    TEST_MUSTPASS(160 != getLen);
+    SLEEP(10);
+
+    TEST_MUSTPASS(!xmedia->ExternalPlayoutGetData(vector, 8000, 100, getLen));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    TEST_MUSTPASS(!xmedia->ExternalPlayoutGetData(vector, 16000, -1, getLen));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(xmedia->SetExternalPlayoutStatus(false));
+    TEST_MUSTPASS(base->StartPlayout(0));
+
+    // SetExternalRecording
+    TEST(SetExternalRecording);
+    ANL();
+
+    TEST_MUSTPASS(!xmedia->SetExternalRecordingStatus(true));
+    TEST_MUSTPASS(VE_ALREADY_SENDING != base->LastError());
+    TEST_MUSTPASS(!xmedia->ExternalRecordingInsertData(vector, 160, 16000, 20));
+    TEST_MUSTPASS(VE_INVALID_OPERATION != base->LastError());
+
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(xmedia->SetExternalRecordingStatus(true));
+    TEST_MUSTPASS(base->StartSend(0));
+
+    TEST_MUSTPASS(xmedia->ExternalRecordingInsertData(vector, 480, 48000, 0));
+    SLEEP(10);
+    TEST_MUSTPASS(xmedia->ExternalRecordingInsertData(vector, 640, 16000, 0));
+    SLEEP(40);
+
+    TEST_MUSTPASS(!xmedia->ExternalRecordingInsertData(vector, 160, 16000, -1));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    TEST_MUSTPASS(!xmedia->ExternalRecordingInsertData(vector, 80, 8000, 20));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    TEST_MUSTPASS(!xmedia->ExternalRecordingInsertData(vector, 0, 16000, 20));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    TEST_MUSTPASS(!xmedia->ExternalRecordingInsertData(vector, 80, 16000, 20));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    TEST_MUSTPASS(!xmedia->ExternalRecordingInsertData(vector, 500, 16000, 20));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(xmedia->SetExternalRecordingStatus(false));
+    TEST_MUSTPASS(base->StartSend(0));
+
+#else // #ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+    TEST_MUSTPASS(!xmedia->SetExternalPlayoutStatus(true));
+    TEST_MUSTPASS(VE_FUNC_NOT_SUPPORTED != base->LastError());
+    TEST_MUSTPASS(!xmedia->ExternalPlayoutGetData(vector, 16000, 100, getLen));
+    TEST_MUSTPASS(VE_FUNC_NOT_SUPPORTED != base->LastError());
+    TEST_MUSTPASS(!xmedia->SetExternalRecordingStatus(true));
+    TEST_MUSTPASS(VE_FUNC_NOT_SUPPORTED != base->LastError());
+    TEST_MUSTPASS(!xmedia->ExternalRecordingInsertData(vector, 160, 16000, 20));
+    TEST_MUSTPASS(VE_FUNC_NOT_SUPPORTED != base->LastError());
+
+#endif // #ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    TEST_MUSTPASS(base->Terminate());
+
+    ANL();
+    AOK();
+    return 0;
+}
+
+// ----------------------------------------------------------------------------
+//  VoEExtendedTest::TestFile
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestFile()
+{
+    PrepareTest("File");
+
+    VoEBase* base = _mgr.BasePtr();
+    VoEFile* file = _mgr.FilePtr();
+    VoECodec* codec = _mgr.CodecPtr();
+
+#ifdef _USE_EXTENDED_TRACE_
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(
+        GetFilename("VoEFile_trace.txt"))); MARK();
+    TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStateInfo |
+                                              kTraceStateInfo |
+                                              kTraceWarning |
+                                              kTraceError |
+                                              kTraceCritical |
+                                              kTraceApiCall |
+                                              kTraceMemory |
+                                              kTraceInfo));
+#endif
+
+    TEST_MUSTPASS(base->Init());
+    TEST_MUSTPASS(base->CreateChannel());
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 12345));
+    TEST_MUSTPASS(base->SetSendDestination(0, 12345, "127.0.0.1"));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartPlayout(0));
+    TEST_MUSTPASS(base->StartSend(0));
+
+    ///////////////////////////
+    // Actual test starts here
+
+    const int dT(100);
+
+    TEST(StartPlayingFileLocally);
+    ANL();
+    TEST(StopPlayingFileLocally);
+    ANL();
+
+    base->StopPlayout(0);
+    TEST_MUSTPASS(file->StartPlayingFileLocally(
+        0, GetResource("audio_long16.pcm"))); MARK();
+    base->StartPlayout(0);
+    MARK(); // file should be mixed in and played out
+    SLEEP(dT);
+    TEST_MUSTPASS(!file->StartPlayingFileLocally(
+        0, GetResource("audio_long16.pcm")));
+    MARK(); // should fail (must stop first)
+    TEST_MUSTPASS(base->LastError() != VE_ALREADY_PLAYING);
+    TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+    MARK();
+    TEST_MUSTPASS(file->StartPlayingFileLocally(
+        0, GetResource("audio_long16.pcm")));
+    MARK(); // should work again (restarts file)
+    SLEEP(dT);
+    TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+    MARK();
+    TEST_MUSTPASS(file->StartPlayingFileLocally(
+        0, GetResource("audio_long16.pcm"), false, kFileFormatPcm16kHzFile));
+    MARK();
+    SLEEP(dT);
+    TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+    MARK();
+    TEST_MUSTPASS(file->StartPlayingFileLocally(
+        0, GetResource("audio_long8.pcm"), false, kFileFormatPcm8kHzFile));
+    MARK();
+    SLEEP(dT);
+    TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+    MARK();
+    TEST_MUSTPASS(file->StartPlayingFileLocally(
+        0, GetResource("audio_long16.wav"), false, kFileFormatPcm8kHzFile));
+    MARK();
+    SLEEP(dT);
+    TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+    MARK();
+    TEST_MUSTPASS(file->StartPlayingFileLocally(
+        0, GetResource("audio_long8mulaw.wav"), false,
+        kFileFormatPcm8kHzFile));
+    MARK();
+    SLEEP(dT);
+
+    // add compressed tests here...
+
+    // TEST_MUSTPASS(file->StopPlayingFileLocally(0)); MARK();
+    // TEST_MUSTPASS(file->StartPlayingFileLocally(
+    //   0, GetResource("audio_short16.pcm"), true,
+    //   kFileFormatPcm16kHzFile)); MARK(); // loop
+    TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+    MARK();
+    TEST_MUSTPASS(file->StartPlayingFileLocally(
+        0, GetResource("audio_short16.pcm"), false,
+        kFileFormatPcm16kHzFile, 1.0, 0, 2000));
+    MARK(); // play segment
+    SLEEP(2500);
+    TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+    MARK();
+    TEST_MUSTPASS(!file->StartPlayingFileLocally(
+        0, GetResource("audio_short16.pcm"), false,
+        kFileFormatPcm16kHzFile, 1.0, 2000, 1000));
+    MARK(); // invalid segment
+    TEST_MUSTPASS(base->LastError() != VE_BAD_FILE);
+    TEST_MUSTPASS(!file->StartPlayingFileLocally(
+        0, GetResource("audio_short16.pcm"), false,
+        kFileFormatPcm16kHzFile, 1.0, 21000, 30000));
+    MARK(); // start > file size
+    TEST_MUSTPASS(base->LastError() != VE_BAD_FILE);
+    TEST_MUSTPASS(!file->StartPlayingFileLocally(
+        0, GetResource("audio_short16.pcm"), false,
+        kFileFormatPcm16kHzFile, 1.0, 100, 100));
+    MARK(); // invalid segment
+    TEST_MUSTPASS(base->LastError() != VE_BAD_FILE);
+    TEST_MUSTPASS(file->StartPlayingFileLocally(
+        0, GetResource("audio_long16.pcm")));
+    MARK(); // should work again (restarts file)
+    TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+    MARK();
+    TEST_MUSTPASS(!file->StartPlayingFileLocally(0, (InStream*)NULL));
+    MARK(); // just do it
+    TEST_MUSTPASS(base->LastError() != VE_BAD_FILE);
+
+    AOK();
+    ANL();
+
+    TEST(IsPlayingFileLocally);
+    ANL();
+
+    TEST_MUSTPASS(0 != file->IsPlayingFileLocally(0));
+    MARK(); // inactive
+    TEST_MUSTPASS(file->StartPlayingFileLocally(
+        0, GetResource("audio_long16.pcm")));
+    MARK();
+    TEST_MUSTPASS(1 != file->IsPlayingFileLocally(0));
+    MARK(); // active
+    AOK();
+    ANL();
+
+    TEST(ScaleLocalFilePlayout);
+    ANL();
+    TEST_MUSTPASS(file->ScaleLocalFilePlayout(0, 1.0));
+    MARK();
+    SLEEP(1000);
+    TEST_MUSTPASS(file->ScaleLocalFilePlayout(0, 0.0));
+    MARK();
+    SLEEP(1000);
+    TEST_MUSTPASS(file->ScaleLocalFilePlayout(0, 0.5));
+    MARK();
+    SLEEP(1000);
+    TEST_MUSTPASS(file->ScaleLocalFilePlayout(0, 0.25));
+    MARK();
+    SLEEP(1000);
+    TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+    MARK();
+    AOK();
+    ANL();
+
+    // Replace microphone with file and play out on remote side
+    // All channels, per channel
+    // Different mixing frequencies
+    TEST(StartPlayingFileAsMicrophone);
+    ANL();
+    TEST(IsPlayingFileAsMicrophone);
+    ANL();
+    TEST(ScaleFileAsMicrophonePlayout);
+    ANL();
+    CodecInst tempCodec;
+    for (int ch = -1; ch < 1; ++ch) // Channel -1 and 0
+    {
+        TEST_LOG("Testing channel = %d \n", ch);
+        for (int fs = 1; fs < 4; ++fs) // nb, wb and swb codecs
+        {
+            switch (fs)
+            {
+                case 1: // nb
+                    TEST_LOG("Testing with nb codec \n");
+                    tempCodec.channels = 1;
+                    tempCodec.pacsize = 160;
+                    tempCodec.plfreq = 8000;
+                    strcpy(tempCodec.plname, "PCMU");
+                    tempCodec.pltype = 0;
+                    tempCodec.rate = 64000;
+                    break;
+                case 2: // wb
+#ifdef WEBRTC_CODEC_ISAC
+                    TEST_LOG("Testing with wb codec \n");
+                    tempCodec.channels = 1;
+                    tempCodec.pacsize = 480;
+                    tempCodec.plfreq = 16000;
+                    strcpy(tempCodec.plname, "ISAC");
+                    tempCodec.pltype = 103;
+                    tempCodec.rate = 32000;
+                    break;
+#else
+                    TEST_LOG("NOT testing with wb codec - "
+                        "WEBRTC_CODEC_ISAC not defined \n");
+                    continue;
+#endif
+                case 3: // swb
+#ifdef WEBRTC_CODEC_PCM16
+                    TEST_LOG("Testing with swb codec \n");
+                    tempCodec.channels = 1;
+                    tempCodec.pacsize = 640;
+                    tempCodec.plfreq = 32000;
+                    strcpy(tempCodec.plname, "L16");
+                    tempCodec.pltype = 125;
+                    tempCodec.rate = 512000;
+                    break;
+#else
+                    TEST_LOG("NOT testing with swb codec -"
+                        " WEBRTC_CODEC_PCM16 not defined \n");
+                    continue;
+#endif
+            }
+            TEST_MUSTPASS(base->StopSend(0));
+            TEST_MUSTPASS(base->StopPlayout(0));
+            TEST_MUSTPASS(base->StopReceive(0));
+            TEST_MUSTPASS(codec->SetRecPayloadType(0, tempCodec));
+            TEST_MUSTPASS(base->StartReceive(0));
+            TEST_MUSTPASS(base->StartPlayout(0));
+            TEST_MUSTPASS(base->StartSend(0));
+            TEST_MUSTPASS(codec->SetSendCodec(0, tempCodec));
+
+            TEST_LOG("File 1 in 16 kHz no mix, 2 in 16 kHz mix,"
+                " 3 in 8 kHz no mix, 4 in 8 kHz mix \n");
+
+            TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(
+                ch, GetResource("audio_long16.pcm")));
+            MARK(); // don't mix
+            SLEEP(2000);
+            TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(ch));
+            MARK();
+            TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(
+                ch, GetResource("audio_long16.wav"), false, true,
+                kFileFormatWavFile));
+            MARK(); // mix
+            SLEEP(2000);
+            TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(ch));
+            MARK();
+            TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(
+                ch, GetResource("audio_long8.pcm"), false, false,
+                kFileFormatPcm8kHzFile));
+            MARK(); // don't mix
+            SLEEP(2000);
+            TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(ch));
+            MARK();
+            TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(
+                ch, GetResource("audio_long8.pcm"), false, true,
+                kFileFormatPcm8kHzFile));
+            MARK(); // mix
+            SLEEP(2000);
+            TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(ch));
+            MARK();
+            TEST_MUSTPASS(!file->StartPlayingFileAsMicrophone(
+                ch, (InStream*)NULL));
+            MARK(); // force error
+            AOK();
+            ANL();
+
+            TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(
+                ch, GetResource("audio_long16.pcm")));
+            TEST_MUSTPASS(1 != file->IsPlayingFileAsMicrophone(ch));
+            TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(ch));
+            TEST_MUSTPASS(0 != file->IsPlayingFileAsMicrophone(ch));
+            AOK();
+            ANL();
+
+            TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(
+                ch, GetResource("audio_long16.pcm")));
+            TEST_MUSTPASS(file->ScaleFileAsMicrophonePlayout(ch, 1.0));
+            MARK();
+            SLEEP(1000);
+            TEST_MUSTPASS(file->ScaleFileAsMicrophonePlayout(ch, 0.5));
+            MARK();
+            SLEEP(1000);
+            TEST_MUSTPASS(file->ScaleFileAsMicrophonePlayout(ch, 0.25));
+            MARK();
+            SLEEP(1000);
+            TEST_MUSTPASS(file->ScaleFileAsMicrophonePlayout(ch, 0.0));
+            MARK();
+            SLEEP(1000);
+            TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(ch));
+            MARK();
+            AOK();
+            ANL();
+        }
+    }
+
+    // Record speaker signal to file
+
+    CodecInst fcomp = { 0, "L16", 8000, 80, 1, 128000 };
+
+    TEST(StartRecordingPlayout);
+    ANL();
+    TEST(StopRecordingPlayout);
+    ANL();
+
+    TEST_MUSTPASS(file->StartRecordingPlayout(0,
+                                              GetFilename("rec_play16.pcm")));
+    MARK();
+    SLEEP(1000);
+    TEST_MUSTPASS(file->StopRecordingPlayout(0));
+    MARK();
+
+    fcomp.plfreq = 8000;
+    strcpy(fcomp.plname, "L16");
+    TEST_MUSTPASS(file->StartRecordingPlayout(0, GetFilename("rec_play8.wav"),
+                                              &fcomp));
+    SLEEP(1000);
+    TEST_MUSTPASS(file->StopRecordingPlayout(0));
+    MARK();
+
+    fcomp.plfreq = 16000;
+    strcpy(fcomp.plname, "L16");
+    TEST_MUSTPASS(file->StartRecordingPlayout(0, GetFilename("rec_play16.wav"),
+                                              &fcomp));
+    SLEEP(1000);
+    TEST_MUSTPASS(file->StopRecordingPlayout(0));
+    MARK();
+
+    fcomp.pltype = 0;
+    fcomp.plfreq = 8000;
+    strcpy(fcomp.plname, "PCMU");
+    fcomp.rate = 64000;
+    fcomp.pacsize = 160;
+    fcomp.channels = 1;
+
+    TEST_MUSTPASS(file->StartRecordingPlayout(0,
+                                              GetFilename("rec_play_pcmu.wav"),
+                                              &fcomp));
+    SLEEP(1000);
+    TEST_MUSTPASS(file->StopRecordingPlayout(0));
+    MARK();
+
+    fcomp.pltype = 8;
+    fcomp.plfreq = 8000;
+    strcpy(fcomp.plname, "PCMA");
+    TEST_MUSTPASS(file->StartRecordingPlayout(0,
+                                              GetFilename("rec_play_pcma.wav"),
+                                              &fcomp));
+    SLEEP(1000);
+    TEST_MUSTPASS(file->StopRecordingPlayout(0));
+    MARK();
+
+    fcomp.pltype = 97;
+    fcomp.pacsize = 240;
+    fcomp.rate = 13300;
+    fcomp.plfreq = 8000;
+    strcpy(fcomp.plname, "ILBC");
+    TEST_MUSTPASS(file->StartRecordingPlayout(0,
+                                              GetFilename("rec_play.ilbc"),
+                                              &fcomp));
+    SLEEP(1000);
+    TEST_MUSTPASS(file->StopRecordingPlayout(0));
+    MARK();
+
+    TEST_MUSTPASS(file->StartRecordingPlayout(
+        -1, GetFilename("rec_play16_mixed.pcm")));
+    MARK();
+    SLEEP(1000);
+    TEST_MUSTPASS(file->StopRecordingPlayout(-1));
+    MARK();
+
+    // TEST_MUSTPASS(file->StopPlayingFileLocally(0)); // Why should this work?
+
+    TEST_LOG("\nplaying out...\n");
+    TEST_MUSTPASS(file->StartPlayingFileLocally(
+        0, GetFilename("rec_play.ilbc"), false, kFileFormatCompressedFile));
+    MARK();
+    SLEEP(2000);
+
+    AOK();
+    ANL();
+
+    // Record microphone signal to file
+
+    TEST(StartRecordingMicrophone);
+    ANL();
+    TEST(StopRecordingMicrophone);
+    ANL();
+
+    TEST_MUSTPASS(file->StartRecordingMicrophone(GetFilename("rec_mic16.pcm")));
+    MARK();
+    SLEEP(1000);
+    TEST_MUSTPASS(file->StopRecordingMicrophone());
+    MARK();
+
+    base->StopSend(0);
+    TEST_MUSTPASS(file->StartRecordingMicrophone(GetFilename("rec_mic16.pcm")));
+    MARK(); // record without sending as well
+    SLEEP(1000);
+    TEST_MUSTPASS(file->StopRecordingMicrophone());
+    MARK();
+    base->StartSend(0); // restore sending
+
+    fcomp.plfreq = 8000;
+    strcpy(fcomp.plname, "L16");
+    TEST_MUSTPASS(file->StartRecordingMicrophone(
+        GetFilename("rec_play8.wav"), &fcomp));
+    SLEEP(1000);
+    TEST_MUSTPASS(file->StopRecordingMicrophone());
+    MARK();
+
+    fcomp.plfreq = 16000;
+    strcpy(fcomp.plname, "L16");
+    TEST_MUSTPASS(file->StartRecordingMicrophone(
+        GetFilename("rec_play16.wav"), &fcomp));
+    SLEEP(1000);
+    TEST_MUSTPASS(file->StopRecordingMicrophone());
+    MARK();
+
+    // FT#1810, the following test is to make sure StartRecordingCall will
+    // record both mic and file
+    TEST_LOG("StartRecordingCall, record both mic and file in specific"
+        " channels \n");
+    TEST_LOG("Create maxnumofchannels \n");
+    for (int i = 1; i < base->MaxNumOfChannels(); i++)
+    {
+        int ch = base->CreateChannel();
+        TEST_MUSTPASS(ch == -1);
+        TEST_MUSTPASS(base->StopPlayout(ch));
+    }
+
+    TEST_MUSTPASS(base->SetSendDestination(1, 12356, "127.0.0.1"));
+    TEST_MUSTPASS(base->SetLocalReceiver(1, 12356));
+    TEST_MUSTPASS(base->StartReceive(1));
+    TEST_MUSTPASS(base->StopPlayout(1));
+    TEST_MUSTPASS(base->StartSend(1));
+    TEST_MUSTPASS(base->StartPlayout(1));
+
+    TEST_LOG("ALways playing audio_long16.pcm for "
+        "channel 0 in background \n");
+    fcomp.plfreq = 16000;
+    strcpy(fcomp.plname, "L16");
+    TEST_LOG("Recording microphone to L16, please speak \n");
+    TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(
+        0, GetResource("audio_long16.pcm"), true , true));
+    TEST_MUSTPASS(file->StartRecordingMicrophone(
+        GetFilename("rec_play_ch.wav"), &fcomp));
+    MARK();
+    SLEEP(3000);
+    TEST_MUSTPASS(file->StopRecordingMicrophone());
+    MARK();
+    TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0));
+    TEST_LOG("Playing recording file, you should only hear what you said \n");
+    TEST_MUSTPASS(file->StartPlayingFileLocally(
+        0, GetFilename("rec_play_ch.wav"), false, kFileFormatWavFile));
+    SLEEP(2500);
+    TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+    TEST_LOG("Recording microphone 0 to L16, please speak \n");
+    TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(
+        -1, GetResource("audio_long16.pcm"), true , true));
+    TEST_MUSTPASS(file->StartRecordingMicrophone(
+        GetFilename("rec_play_ch_0.wav"), &fcomp));
+    MARK();
+    SLEEP(3000);
+    TEST_MUSTPASS(file->StopRecordingMicrophone());
+    MARK();
+    TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(-1));
+    TEST_LOG("Playing recording file, you should hear what you said and"
+        " audio_long16.pcm \n");
+    TEST_MUSTPASS(file->StartPlayingFileLocally(
+        0, GetFilename("rec_play_ch_0.wav"), false, kFileFormatWavFile));
+    SLEEP(2500);
+    TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+    TEST_LOG("Recording microphone to ilbc, please speak \n");
+    strcpy(fcomp.plname, "ilbc");
+    fcomp.plfreq = 8000;
+    fcomp.pacsize = 160;
+    fcomp.rate = 15200;
+    fcomp.channels = 1;
+    fcomp.pltype = 97;
+    TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(
+        0, GetResource("audio_long16.pcm"), true , true));
+    TEST_MUSTPASS(file->StartRecordingMicrophone(
+        GetFilename("rec_play_ch_0.ilbc"), &fcomp));
+    MARK();
+    SLEEP(3000);
+    TEST_MUSTPASS(file->StopRecordingMicrophone());
+    MARK();
+    TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0));
+    TEST_LOG("Playing recording file, you should only hear what you said \n");
+    TEST_MUSTPASS(file->StartPlayingFileLocally(
+        0, GetFilename("rec_play_ch_0.ilbc"), false,
+        kFileFormatCompressedFile));
+    SLEEP(2500);
+    TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+    for (int i = 1; i < base->MaxNumOfChannels(); i++)
+    {
+        TEST_MUSTPASS(base->DeleteChannel(i));
+    }
+
+    AOK();
+    ANL();
+
+    // Record mixed (speaker + microphone) signal to file
+
+
+#if !defined(MAC_IPHONE) && !defined(ANDROID)
+    TEST(StartRecordingSpeakerStereo);
+    ANL();
+    TEST(StopRecordingSpeakerStereo);
+    ANL();
+
+    VoEHardware* hardware = _mgr.HardwarePtr();
+    TEST_MUSTPASS(NULL == hardware);
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StopSend(0));
+#if defined(_WIN32)
+    TEST_MUSTPASS(hardware->SetRecordingDevice(-1));
+    TEST_MUSTPASS(hardware->SetPlayoutDevice(-1));
+#else
+    TEST_MUSTPASS(hardware->SetRecordingDevice(0));
+    TEST_MUSTPASS(hardware->SetPlayoutDevice(0));
+#endif
+    TEST_MUSTPASS(base->StartPlayout(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    MARK();
+
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StopSend(0));
+#if defined(_WIN32)
+    TEST_MUSTPASS(hardware->SetRecordingDevice(-1));
+    TEST_MUSTPASS(hardware->SetPlayoutDevice(-1));
+#else
+    TEST_MUSTPASS(hardware->SetRecordingDevice(0));
+    TEST_MUSTPASS(hardware->SetPlayoutDevice(0));
+#endif
+    TEST_MUSTPASS(base->StartPlayout(0));
+    TEST_MUSTPASS(base->StartSend(0));
+
+    AOK();
+    ANL();
+#else
+    TEST_LOG("Skipping stereo record tests -"
+        " MAC_IPHONE or ANDROID is defined \n");
+#endif // #if !defined(MAC_IPHONE) && !defined(ANDROID)
+    // Conversion between different file formats
+
+#if defined(MAC_IPHONE) || defined(ANDROID)
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StopSend(0));
+#endif
+
+    TEST(ConvertPCMToWAV);
+    ANL();
+
+    TEST_MUSTPASS(file->ConvertPCMToWAV(
+        GetResource("audio_long16.pcm"),
+        GetFilename("singleUserDemoConv.wav")));
+    MARK();
+    TEST_MUSTPASS(!file->ConvertPCMToWAV((InStream*)NULL,
+        (OutStream*)NULL)); MARK();    // invalid stream handles
+    AOK();
+    ANL();
+
+    TEST(ConvertWAVToPCM);
+    ANL();
+
+    TEST_MUSTPASS(file->ConvertWAVToPCM(
+        GetResource("audio_long16.wav"),
+        GetFilename("singleUserDemoConv.pcm")));
+    MARK();
+    TEST_MUSTPASS(!file->ConvertWAVToPCM((InStream*)NULL, (OutStream*)NULL));
+    MARK(); // invalid stream handles
+    AOK();
+    ANL();
+
+    TEST(ConvertPCMToCompressed);
+    ANL();
+
+    fcomp.plfreq = 16000;
+    strcpy(fcomp.plname, "L16");
+    TEST_MUSTPASS(!file->ConvertPCMToCompressed(
+        GetResource("audio_long16.pcm"),
+        GetFilename("singleUserDemoConv16_dummy.wav"), &fcomp));
+    MARK(); // should not be supported
+
+    fcomp.plfreq = 8000; strcpy(fcomp.plname, "ilbc"); fcomp.pacsize = 160;
+    fcomp.rate=15200; fcomp.pltype=97; fcomp.channels=1;
+    TEST_MUSTPASS(file->ConvertPCMToCompressed(
+        GetResource("audio_long16.pcm"),
+        GetFilename("singleUserDemoConv.ilbc"), &fcomp)); MARK();
+    AOK(); ANL();
+
+    TEST(ConvertCompressedToPCM);
+    ANL();
+
+    TEST_MUSTPASS(file->ConvertCompressedToPCM(
+        GetFilename("singleUserDemoConv.ilbc"),
+        GetFilename("singleUserDemoConv_ilbc.pcm"))); MARK();
+    TEST_MUSTPASS(!file->ConvertCompressedToPCM(
+        GetResource("audio_long16.pcm"),
+        GetFilename("singleUserDemoConv_dummy.pcm"))); MARK();
+    AOK(); ANL();
+
+#if defined(MAC_IPHONE) || defined(ANDROID)
+    TEST_MUSTPASS(base->StartPlayout(0));
+    TEST_MUSTPASS(base->StartSend(0));
+#endif
+
+    // Misc file functions
+
+    TEST(GetFileDuration);
+    ANL();
+
+    int dur;
+
+    TEST_MUSTPASS(file->GetFileDuration(
+        GetResource("audio_long16.pcm"), dur));
+    TEST_MUSTPASS(file->GetFileDuration(
+        GetResource("audio_long8.pcm"), dur, kFileFormatPcm8kHzFile));
+    TEST_MUSTPASS(file->GetFileDuration(
+        GetResource("audio_long16.pcm"), dur, kFileFormatPcm16kHzFile));
+    TEST_MUSTPASS(file->GetFileDuration(
+        GetResource("audio_long16.wav"), dur, kFileFormatPcm8kHzFile));
+    TEST_MUSTPASS(file->GetFileDuration(
+        GetFilename("singleUserDemoConv.ilbc"), dur,
+        kFileFormatCompressedFile));
+
+    AOK();
+    ANL();
+
+    TEST(GetPlaybackPosition);
+    ANL();
+
+    int pos;
+
+    TEST_MUSTPASS(file->StartPlayingFileLocally(
+        0, GetResource("audio_long16.pcm")));
+    SLEEP(1000);
+    TEST_MUSTPASS(file->GetPlaybackPosition(0, pos));
+    MARK(); // position should be ~1000
+    SLEEP(1000);
+    TEST_MUSTPASS(file->GetPlaybackPosition(0, pos));
+    MARK(); // position should be ~2000
+    // SLEEP(70*1000);
+    // file is no longer playing
+    // TEST_MUSTPASS(file->GetPlaybackPosition(0, pos)); MARK();
+    TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+    AOK();
+    ANL();
+
+    // These tests are related to defect 5136
+    // They play .wav files with different sample freq for 5s
+    char localFiles[7][50] = { "audio_tiny8.wav",
+            "audio_tiny11.wav", "audio_tiny16.wav",
+            "audio_tiny22.wav", "audio_tiny32.wav",
+            "audio_tiny44.wav", "audio_tiny48.wav" };
+    char freq[7][5] = { "8", "11", "16", "22", "32", "44.1", "48" };
+    TEST_MUSTPASS(base->StopReceive(0));
+    for (int i = 0; i < 7; i++)
+    {
+        TEST_LOG("Playing file %s, in %s KHz \n", localFiles[i], freq[i]);
+        TEST_MUSTPASS(file->StartPlayingFileLocally(
+            0, GetResource(localFiles[i]),false, kFileFormatWavFile, 1));
+        SLEEP(4500); // The file should not end
+        TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+    }
+
+    // TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0)); // Should not work
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    TEST_MUSTPASS(base->Terminate());
+
+    AOK();
+    ANL();
+
+    return 0;
+}
+
+// ----------------------------------------------------------------------------
+//  VoEExtendedTest::TestHardware
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestHardware()
+{
+    PrepareTest("Hardware");
+
+    VoEBase* base = _mgr.BasePtr();
+    VoEHardware* hardware = _mgr.HardwarePtr();
+
+#ifdef _USE_EXTENDED_TRACE_
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(GetFilename(
+        "VoEHardware_trace.txt")));
+    TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStateInfo |
+                                              kTraceStateInfo |
+                                              kTraceWarning |
+                                              kTraceError |
+                                              kTraceCritical |
+                                              kTraceApiCall |
+                                              kTraceMemory |
+                                              kTraceInfo));
+#endif
+
+    // Set/GetAudioDeviceLayer
+    TEST(Set/GetAudioDeviceLayer);
+    ANL();
+    AudioLayers wantedLayer = kAudioPlatformDefault;
+    AudioLayers givenLayer;
+
+#if defined(_WIN32)
+    wantedLayer = kAudioWindowsCore;
+    hardware->SetAudioDeviceLayer(wantedLayer);
+    TEST_LOG("If you run on XP or below, CoreAudio "
+        "should not be able to set.\n");
+    TEST_LOG("If you run on Vista or above, CoreAudio "
+        "should be able to set.\n");
+    TEST_LOG("Verify that this is the case.\n");
+
+    TEST_MUSTPASS(base->Init());
+
+    TEST_MUSTPASS(hardware->GetAudioDeviceLayer(givenLayer));
+    if(givenLayer == kAudioWindowsCore)
+    {
+        TEST_LOG("CoreAudio was set\n");
+    }
+    else
+    {
+        TEST_LOG("CoreAudio was *not* set\n");
+    }
+
+    TEST_MUSTPASS(base->Terminate());
+
+    wantedLayer = kAudioWindowsWave;
+    TEST_MUSTPASS(hardware->SetAudioDeviceLayer(wantedLayer));
+    TEST_LOG("Wave audio should always be able to set.\n");
+
+    TEST_MUSTPASS(base->Init());
+
+    TEST_MUSTPASS(hardware->GetAudioDeviceLayer(givenLayer));
+    if(givenLayer == kAudioWindowsWave)
+    {
+        TEST_LOG("Wave audio was set\n");
+    }
+    else
+    {
+        TEST_LOG("Wave audio was not set\n");
+    }
+
+    TEST_MUSTPASS(base->Terminate());
+    // end _WIN32
+#elif defined(WEBRTC_LINUX) && !defined(ANDROID)
+    wantedLayer = kAudioLinuxPulse;
+    TEST_MUSTPASS(hardware->SetAudioDeviceLayer(wantedLayer));
+    TEST_LOG("If you run on Linux with no/unsupported PA version, PulseAudio "
+        "7should not be able to set.\n");
+    TEST_LOG("If you run on Linux with supported PA version running, PulseAudio"
+        " should be able to set.\n");
+    TEST_LOG("Verify that this is the case.\n");
+
+    TEST_MUSTPASS(base->Init());
+
+    TEST_MUSTPASS(hardware->GetAudioDeviceLayer(givenLayer));
+    if(givenLayer == kAudioLinuxPulse)
+    {
+        TEST_LOG("\nPulseAudio was set\n");
+    }
+    else
+    {
+        TEST_LOG("\nPulseAudio was not set\n");
+    }
+
+    TEST_MUSTPASS(base->Terminate());
+
+    wantedLayer = kAudioLinuxAlsa;
+    TEST_MUSTPASS(hardware->SetAudioDeviceLayer(wantedLayer));
+    TEST_LOG("ALSA audio should always be able to set.\n");
+
+    TEST_MUSTPASS(base->Init());
+
+    TEST_MUSTPASS(hardware->GetAudioDeviceLayer(givenLayer));
+    if(givenLayer == kAudioLinuxAlsa)
+    {
+        TEST_LOG("\nALSA audio was set\n");
+    }
+    else
+    {
+        TEST_LOG("\nALSA audio was not set\n");
+    }
+
+    TEST_MUSTPASS(base->Terminate());
+#endif // defined(WEBRTC_LINUX) && !defined(ANDROID)
+    // Invalid arguments
+    wantedLayer = (AudioLayers) 17;
+    TEST_MUSTPASS(-1 != hardware->SetAudioDeviceLayer(wantedLayer));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+
+    // Basic usage
+    wantedLayer = kAudioPlatformDefault;
+    TEST_MUSTPASS(hardware->SetAudioDeviceLayer(wantedLayer));
+    TEST_MUSTPASS(hardware->GetAudioDeviceLayer(givenLayer));
+    TEST_MUSTPASS(givenLayer != wantedLayer);
+    MARK();
+
+    TEST_MUSTPASS(base->Init());
+    TEST_MUSTPASS(base->CreateChannel());
+
+    wantedLayer = kAudioPlatformDefault;
+    TEST_MUSTPASS(-1 != hardware->SetAudioDeviceLayer(wantedLayer));
+    TEST_MUSTPASS(VE_ALREADY_INITED != base->LastError());
+    MARK();
+    TEST_MUSTPASS(hardware->GetAudioDeviceLayer(givenLayer));
+    MARK();
+    switch (givenLayer)
+    {
+        case kAudioPlatformDefault:
+            // already set above
+            break;
+        case kAudioWindowsCore:
+            TEST_LOG("\nRunning kAudioWindowsCore\n");
+            break;
+        case kAudioWindowsWave:
+            TEST_LOG("\nRunning kAudioWindowsWave\n");
+            break;
+        case kAudioLinuxAlsa:
+            TEST_LOG("\nRunning kAudioLinuxAlsa\n");
+            break;
+        case kAudioLinuxPulse:
+            TEST_LOG("\nRunning kAudioLinuxPulse\n");
+            break;
+        default:
+            TEST_LOG("\nERROR: Running unknown audio layer!!\n");
+            return -1;
+    }
+    ANL();
+
+#if !defined(MAC_IPHONE) && !defined(ANDROID)
+    // GetRecording/PlayoutDeviceStatus
+    TEST(Getrecording/PlayoutDeviceStatus);
+    ANL();
+    bool isRecAvailable = false;
+    bool isPlayAvailable = false;
+    TEST_MUSTPASS(hardware->GetRecordingDeviceStatus(isRecAvailable));
+    TEST_MUSTPASS(!isRecAvailable);
+    MARK();
+    TEST_MUSTPASS(hardware->GetPlayoutDeviceStatus(isPlayAvailable));
+    TEST_MUSTPASS(!isPlayAvailable);
+    MARK();
+
+    ANL();
+
+    int nRec = 0, nPlay = 0;
+    char devName[128];
+    char guidName[128];
+    int idx;
+
+    TEST_MUSTPASS(hardware->GetNumOfPlayoutDevices(nPlay));
+
+    // GetPlayoutDeviceName
+    TEST(GetPlayoutDeviceName);
+    ANL();
+    TEST_MUSTPASS(-1 != hardware->GetPlayoutDeviceName(nPlay, devName,
+                                                       guidName));
+    TEST_MUSTPASS(VE_CANNOT_RETRIEVE_DEVICE_NAME != base->LastError());
+    MARK();
+    TEST_MUSTPASS(-1 != hardware->GetPlayoutDeviceName(-2, devName, guidName));
+    TEST_MUSTPASS(VE_CANNOT_RETRIEVE_DEVICE_NAME != base->LastError());
+    MARK();
+    TEST_MUSTPASS(-1 != hardware->GetPlayoutDeviceName(nPlay+1, devName,
+                                                       guidName));
+    TEST_MUSTPASS(VE_CANNOT_RETRIEVE_DEVICE_NAME != base->LastError());
+    MARK();
+    TEST_MUSTPASS(-1 != hardware->GetPlayoutDeviceName(0, NULL, guidName));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+    TEST_MUSTPASS(hardware->GetPlayoutDeviceName(0, devName, NULL));
+
+    // default tests
+    for (idx = 0; idx < nPlay; idx++)
+    {
+        TEST_MUSTPASS(hardware->GetPlayoutDeviceName(idx, devName, guidName));
+        MARK();
+        TEST_MUSTPASS(hardware->SetPlayoutDevice(idx));
+    }
+
+    ANL();
+
+    TEST_MUSTPASS(hardware->GetNumOfRecordingDevices(nRec));
+
+    // GetRecordingDeviceName
+    TEST(GetRecordingDeviceName);
+    ANL();
+    TEST_MUSTPASS(-1 != hardware->GetRecordingDeviceName(nRec, devName,
+                                                         guidName));
+    TEST_MUSTPASS(VE_CANNOT_RETRIEVE_DEVICE_NAME != base->LastError());
+    MARK();
+    TEST_MUSTPASS(-1 != hardware->GetRecordingDeviceName(-2, devName, guidName));
+    TEST_MUSTPASS(VE_CANNOT_RETRIEVE_DEVICE_NAME != base->LastError());
+    MARK();
+    TEST_MUSTPASS(-1 != hardware->GetRecordingDeviceName(nRec+1, devName,
+                                                         guidName));
+    TEST_MUSTPASS(VE_CANNOT_RETRIEVE_DEVICE_NAME != base->LastError());
+    MARK();
+    TEST_MUSTPASS(-1 != hardware->GetRecordingDeviceName(0, NULL, guidName));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+    TEST_MUSTPASS(hardware->GetRecordingDeviceName(0, devName, NULL));
+
+    // default tests
+    for (idx = 0; idx < nRec; idx++)
+    {
+        TEST_MUSTPASS(hardware->GetRecordingDeviceName(idx, devName, guidName));
+        MARK();
+        TEST_MUSTPASS(hardware->SetRecordingDevice(idx));
+    }
+    ANL();
+
+    // // SetRecordingDevice
+    TEST(SetRecordingDevice);
+    ANL();
+    TEST_MUSTPASS(hardware->SetRecordingDevice(0));
+    MARK();
+    TEST_MUSTPASS(hardware->SetRecordingDevice(0, kStereoLeft));
+    MARK();
+    TEST_MUSTPASS(hardware->SetRecordingDevice(0, kStereoRight));
+    MARK();
+    ANL();
+
+    // SetPlayoutDevice
+    TEST(SetPlayoutDevice);
+    ANL();
+#if defined(_WIN32)
+    TEST_MUSTPASS(hardware->SetPlayoutDevice(-1)); MARK();
+#else
+    TEST_MUSTPASS(hardware->SetPlayoutDevice(0));
+    MARK();
+#endif
+    ANL();
+#endif // #if !defined(MAC_IPHONE) && !defined(ANDROID)
+#if defined(MAC_IPHONE)
+    TEST(ResetSoundDevice); ANL();
+
+    for (int p=0; p<=60; p+=20)
+    {
+        TEST_LOG("Resetting sound device several times with pause %d ms\n", p);
+        for (int l=0; l<50; ++l)
+        {
+            TEST_MUSTPASS(hardware->ResetAudioDevice()); MARK();
+            SLEEP(p);
+        }
+        ANL();
+    }
+
+    TEST_LOG("Start streaming - verify the audio after each batch of resets \n");
+    TEST_MUSTPASS(base->SetSendDestination(0, 8000, "127.0.0.1"));
+    TEST_MUSTPASS(base->SetLocalReceiver(0,8000));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartPlayout(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    SLEEP(2000);
+
+    SLEEP(2000);
+    for (int p=0; p<=60; p+=20)
+    {
+        TEST_LOG("Resetting sound device several time with pause %d ms\n", p);
+        for (int l=0; l<20; ++l)
+        {
+            TEST_MUSTPASS(hardware->ResetAudioDevice()); MARK();
+            SLEEP(p);
+        }
+        ANL();
+        SLEEP(2000);
+    }
+
+    TEST_LOG("Stop streaming \n");
+    TEST_MUSTPASS(base->StartSend(0));
+    TEST_MUSTPASS(base->StartPlayout(0));
+    TEST_MUSTPASS(base->StartReceive(0));
+#endif // defined(MAC_IPHONE))
+#ifdef MAC_IPHONE
+    TEST_LOG("\nNOTE: Always run hardware tests also without extended tests "
+        "enabled,\nsince the extended tests are pre-streaming tests only.\n");
+#endif
+
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    TEST_MUSTPASS(base->Terminate());
+
+    ANL();
+    AOK();
+
+    return 0;
+}
+
+// ----------------------------------------------------------------------------
+//  VoEExtendedTest::TestNetEqStats
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestNetEqStats()
+{
+    PrepareTest("NetEqStats (!EMPTY!)");
+
+    AOK();
+    ANL();
+
+    return 0;
+}
+
+// ----------------------------------------------------------------------------
+//  VoEExtendedTest::TestNetwork
+//
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestNetwork()
+{
+    PrepareTest("Network");
+
+#ifdef ANDROID
+    int sleepTime = 200;
+    int sleepTime2 = 250;
+#elif defined(MAC_IPHONE) // MAC_IPHONE needs more delay for getSourceInfo()
+    int sleepTime = 150;
+    int sleepTime2 = 200;
+#else
+    int sleepTime = 100;
+    int sleepTime2 = 200;
+#endif
+
+    const int truncLen(5);
+
+    VoEBase* base = _mgr.BasePtr();
+    VoEFile* file = _mgr.FilePtr();
+    VoENetwork* netw = _mgr.NetworkPtr();
+    VoERTP_RTCP* rtp_rtcp = _mgr.RTP_RTCPPtr();
+
+#ifdef _USE_EXTENDED_TRACE_
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(GetFilename(
+        "VoENetwork_trace.txt")));
+    TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStateInfo |
+                                              kTraceStateInfo |
+                                              kTraceWarning |
+                                              kTraceError |
+                                              kTraceCritical |
+                                              kTraceApiCall |
+                                              kTraceMemory |
+                                              kTraceInfo));
+#endif
+
+    TEST_MUSTPASS(base->Init());
+
+    // ------------------------------------------------------------------------
+    // >> GetLocalIP
+    //
+    // State: VE initialized, no existing channels
+
+    TEST(GetLocalIP);
+    ANL();
+
+#ifdef MAC_IPHONE
+    // Should fail
+    TEST_MUSTPASS(!netw->GetLocalIP(NULL, 0)); MARK();
+    TEST_ERROR(VE_FUNC_NOT_SUPPORTED);
+
+    ANL();
+    printf("NOTE: Local IP must be set in source code (line %d) \n",
+           __LINE__ + 1);
+    const char* localIP = "192.168.1.4";
+
+#else
+    char localIP[64];
+
+    // invalid parameter
+    TEST_MUSTPASS(!netw->GetLocalIP(NULL));
+    MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+
+    // default function calls (IPv4)
+    TEST_MUSTPASS(netw->GetLocalIP(localIP));
+    MARK();
+    TEST_LOG("[local IPv4: %s]\n", localIP);
+    TEST_MUSTPASS(netw->GetLocalIP(localIP));
+    MARK();
+
+#if !defined(WEBRTC_MAC) && !defined(ANDROID)
+    // default function calls (IPv6)
+    TEST_MUSTPASS(netw->GetLocalIP(localIP, true));
+    MARK();
+    TEST_LOG("[local IPv6: %s]\n", localIP);
+    TEST_MUSTPASS(netw->GetLocalIP(localIP, true));
+    MARK();
+#endif
+
+    // one last call to ensure that local
+    TEST_MUSTPASS(netw->GetLocalIP(localIP));
+    MARK();
+#endif
+
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    // >> end of GetLocalIP
+    // ------------------------------------------------------------------------
+
+    // ------------------------------------------------------------------------
+    // >> GetSourceInfo
+    //
+    // - VE initialized
+    // - no existing channels
+
+    TEST(GetSourceInfo);
+    ANL();
+
+    int rtpPort(0);
+    int rtcpPort(0);
+    char ipaddr[64] = { 0 };
+    ExtendedTestTransport* ptrTransport(NULL);
+
+    // call without valid channel
+    TEST_MUSTPASS(!netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+    TEST_MUSTPASS(base->CreateChannel());
+
+    // NULL as input string 
+    TEST_MUSTPASS(!netw->GetSourceInfo(0, rtpPort, rtcpPort, NULL));
+    MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+
+    // call when external transport is enabled
+    ptrTransport = new ExtendedTestTransport(netw);
+    TEST_MUSTPASS(netw->RegisterExternalTransport(0, *ptrTransport));
+    TEST_MUSTPASS(!netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_ERROR(VE_EXTERNAL_TRANSPORT_ENABLED);
+    delete ptrTransport;
+
+    // call when external transport is disabled (no packet received yet)
+    TEST_MUSTPASS(netw->DeRegisterExternalTransport(0));
+    TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_MUSTPASS(rtpPort != 0);
+    TEST_MUSTPASS(rtcpPort != 0);
+    TEST_MUSTPASS(strcmp(ipaddr, "") != 0);
+    // send and receive packets with default settings for a while
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 8000));
+    TEST_MUSTPASS(base->SetSendDestination(0, 8000, "127.0.0.1"));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    SLEEP(sleepTime2); // does not guarantee RTCP
+
+    // verify remote parameters (exclude RTCP)
+    TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_MUSTPASS(rtpPort != 8000);
+    TEST_MUSTPASS(strcmp(ipaddr, "127.0.0.1") != 0);
+
+    // ensure that valid results are maintained after StopListen
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_MUSTPASS(rtpPort != 8000);
+    TEST_MUSTPASS(strcmp(ipaddr, "127.0.0.1") != 0);
+
+    // verify that results are maintained after new call to SetLocalReceiver
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 8000));
+    TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_MUSTPASS(rtpPort != 8000);
+    TEST_MUSTPASS(strcmp(ipaddr, "127.0.0.1") != 0);
+
+    // STATE: not listening, not sending
+    // send and receive packets with other settings for a while
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 9005));
+    TEST_MUSTPASS(base->SetSendDestination(0, 9005, "127.0.0.1"));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    SLEEP(sleepTime);
+
+    // STATE: listening, sending
+
+    // verify new remote parameters 
+    TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_MUSTPASS(rtpPort != 9005);
+    TEST_MUSTPASS(strcmp(ipaddr, "127.0.0.1") != 0);
+
+    // restart sending to and from local IP 
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 9005, kVoEDefault, localIP));
+    TEST_MUSTPASS(base->SetSendDestination(0, 9005, localIP));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    SLEEP(sleepTime);
+
+    // verify new remote parameters
+    TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_MUSTPASS(rtpPort != 9005);
+    TEST_MUSTPASS(strcmp(ipaddr, localIP) != 0); // should not be "127.0.0.1"
+
+    // use non-default source port in outgoing packets
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 9005));
+    TEST_MUSTPASS(base->SetSendDestination(0, 9005, "127.0.0.1", 9010));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    SLEEP(sleepTime);
+
+    // verify new remote parameters 
+    TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_MUSTPASS(rtpPort != 9010);
+    TEST_MUSTPASS(strcmp(ipaddr, "127.0.0.1") != 0);
+
+    // STATE: listening and sending using an extra local socket
+
+    // stop/start sending
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    SLEEP(sleepTime);
+
+    // verify that the unique source port is maintained for the extra socket
+    TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_MUSTPASS(rtpPort != 9010);
+    TEST_MUSTPASS(strcmp(ipaddr, "127.0.0.1") != 0);
+
+    // set new source port for outgoing packets (9010 -> 9020)
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->SetSendDestination(0, 9005, "127.0.0.1", 9020));
+    TEST_MUSTPASS(base->StartSend(0));
+    SLEEP(sleepTime);
+#ifdef MAC_IPHONE
+    SLEEP(500); // Need extra pause for some reason
+#endif
+
+    // verify that the unique source port is set for the new extra socket
+    TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_MUSTPASS(rtpPort != 9020);
+    // STATE: listening and sending using an extra local socket
+
+    // remove extra send socket and restart call again
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(base->DeleteChannel(0)); // delete channel => destroys the
+                                           // extra socket
+    TEST_MUSTPASS(base->CreateChannel()); // new channel uses one socket only
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 8000)); // use new port as well
+    TEST_MUSTPASS(base->SetSendDestination(0, 8000, "127.0.0.1"));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    SLEEP(sleepTime);
+
+    // verify that remote info is correct
+    TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_MUSTPASS(rtpPort != 8000);
+    TEST_MUSTPASS(strcmp(ipaddr, "127.0.0.1") != 0);
+
+    // STATE: listening and sending using shared socket
+
+    // use non-default source port in outgoing packets to create extra send
+    // socket
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 7000));
+    TEST_MUSTPASS(base->SetSendDestination(0, 7000, "127.0.0.1", 7010));
+                                        // RTP src is 7010 => RTCP src = 7011
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    SLEEP(sleepTime);
+    // verify new remote parameters 
+    TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_MUSTPASS(rtpPort != 7010);
+    TEST_MUSTPASS(strcmp(ipaddr, "127.0.0.1") != 0);
+
+    // check RTCP port as well (should be 7010 + 1 = 7011)
+    Sleep(8000, true);
+    TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_MUSTPASS(rtpPort != 7010);
+    TEST_MUSTPASS(rtcpPort != 7011);
+    TEST_MUSTPASS(strcmp(ipaddr, "127.0.0.1") != 0);
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+
+    TEST_MUSTPASS(base->DeleteChannel(0));
+
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    // >> end of GetSourceInfo
+    // ------------------------------------------------------------------------
+
+    // ------------------------------------------------------------------------
+    // >> SetExternalTransport
+    //
+    // - VE initialized
+    // - no existing channels
+    // - no media
+    //
+
+    TEST(SetExternalTransport);
+    ANL();
+
+    ptrTransport = new ExtendedTestTransport(netw);
+
+    // call without valid channel
+    TEST_MUSTPASS(!netw->DeRegisterExternalTransport(0));
+    MARK();
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+    TEST_MUSTPASS(base->CreateChannel());
+
+    // different valid call combinations
+    TEST_MUSTPASS(netw->RegisterExternalTransport(0, *ptrTransport));
+    MARK();
+    TEST_MUSTPASS(netw->DeRegisterExternalTransport(0));
+    MARK();
+    TEST_MUSTPASS(netw->DeRegisterExternalTransport(0));
+    MARK();
+    TEST_MUSTPASS(netw->RegisterExternalTransport(0, *ptrTransport));
+    MARK();
+    TEST_MUSTPASS(!netw->RegisterExternalTransport(0, *ptrTransport));
+    MARK(); // must deregister first
+    TEST_MUSTPASS(netw->DeRegisterExternalTransport(0));
+    MARK();
+
+    // STATE: external transport is disabled
+
+    // initialize sending and ensure that external transport can't be enabled
+    TEST_MUSTPASS(base->SetSendDestination(0, 1234, "127.0.0.2"));
+    TEST_MUSTPASS(!netw->RegisterExternalTransport(0, *ptrTransport));
+    MARK();
+    TEST_ERROR(VE_SEND_SOCKETS_CONFLICT);
+
+    // restart channel to ensure that "initialized sender" state is cleared
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    TEST_MUSTPASS(base->CreateChannel());
+
+    // initialize receiving and ensure that external transport can't be enabled
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 5678));
+    TEST_MUSTPASS(!netw->RegisterExternalTransport(0, *ptrTransport));
+    MARK();
+    TEST_ERROR(VE_RECEIVE_SOCKETS_CONFLICT);
+
+    // restart channel to ensure that "initialized receiver" state is cleared
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    TEST_MUSTPASS(base->CreateChannel());
+
+    // enable external transport and verify that "emulated loopback" works
+    TEST_MUSTPASS(netw->RegisterExternalTransport(0, *ptrTransport));
+    MARK();
+    TEST_MUSTPASS(base->StartSend(0)); // should only start recording
+    TEST_MUSTPASS(!netw->RegisterExternalTransport(0, *ptrTransport));
+    MARK(); // should fail
+    TEST_MUSTPASS(netw->DeRegisterExternalTransport(0));
+    MARK();
+    TEST_MUSTPASS(netw->RegisterExternalTransport(0, *ptrTransport));
+    MARK();
+    Play(0, 2000, true, true); // play file as mic and verify loopback audio
+    TEST_MUSTPASS(netw->DeRegisterExternalTransport(0));
+    MARK();
+
+    // STATE: external transport is disabled
+#if defined(ANDROID) || defined(MAC_IPHONE)
+    int testError = VE_FUNC_NOT_SUPPORTED;
+#else
+    int testError = VE_EXTERNAL_TRANSPORT_ENABLED;
+#endif
+
+    // check all APIs that should fail when external transport is enabled
+    int DSCP, priority, serviceType, overrideDSCP, nBytes(0);
+    bool useSetSockopt, enabled;
+    TEST_MUSTPASS(netw->RegisterExternalTransport(0, *ptrTransport));
+    MARK();
+    TEST_MUSTPASS(!base->SetLocalReceiver(0, 12345));
+    TEST_ERROR(VE_EXTERNAL_TRANSPORT_ENABLED);
+    TEST_MUSTPASS(!base->GetLocalReceiver(0, rtpPort, rtcpPort, ipaddr));
+    TEST_ERROR(VE_EXTERNAL_TRANSPORT_ENABLED);
+    TEST_MUSTPASS(!base->SetSendDestination(0, 12345, "127.0.0.1"));
+    TEST_ERROR(VE_EXTERNAL_TRANSPORT_ENABLED);
+    TEST_MUSTPASS(!base->GetSendDestination(0, rtpPort, ipaddr, rtpPort,
+                                            rtcpPort));
+    TEST_ERROR(VE_EXTERNAL_TRANSPORT_ENABLED);
+    TEST_MUSTPASS(!netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+    TEST_ERROR(VE_EXTERNAL_TRANSPORT_ENABLED);
+    TEST_MUSTPASS(!netw->EnableIPv6(0))
+    TEST_ERROR(testError);
+    TEST_MUSTPASS(netw->IPv6IsEnabled(0) != false)
+    TEST_ERROR(VE_EXTERNAL_TRANSPORT_ENABLED);
+    TEST_MUSTPASS(!netw->SetSourceFilter(0, 12345, 12346));
+    TEST_ERROR(VE_EXTERNAL_TRANSPORT_ENABLED);
+    TEST_MUSTPASS(!netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+    TEST_ERROR(VE_EXTERNAL_TRANSPORT_ENABLED);
+
+    // modified i VoE 3.4 (can be called also for external transport)
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+
+#if (!defined(_WIN32) && !defined(WEBRTC_LINUX) && !defined(WEBRTC_MAC)) || defined(WEBRTC_EXTERNAL_TRANSPORT)
+    testError = VE_FUNC_NOT_SUPPORTED;
+#else
+    testError = VE_EXTERNAL_TRANSPORT_ENABLED;
+#endif
+    TEST_MUSTPASS(!netw->SetSendTOS(0, 0));
+    TEST_ERROR(testError);
+    TEST_MUSTPASS(!netw->GetSendTOS(0, DSCP, priority, useSetSockopt));
+    TEST_ERROR(testError);
+#if !defined(_WIN32) || defined(WEBRTC_EXTERNAL_TRANSPORT)
+    testError = VE_FUNC_NOT_SUPPORTED;
+#else
+    testError = VE_EXTERNAL_TRANSPORT_ENABLED;
+#endif
+    TEST_MUSTPASS(!netw->SetSendGQoS(0, false, 0));
+    TEST_ERROR(testError);
+    TEST_MUSTPASS(!netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+    TEST_ERROR(testError);
+    char dummy[1] = { 'a' };
+    TEST_MUSTPASS(!netw->SendUDPPacket(0, dummy, 1, nBytes));
+    TEST_ERROR(VE_EXTERNAL_TRANSPORT_ENABLED);
+
+    // always disable external transport before deleting the Transport object;
+    // will lead to crash for RTCP transmission otherwise
+    TEST_MUSTPASS(netw->DeRegisterExternalTransport(0));
+    MARK();
+    delete ptrTransport;
+
+    TEST_MUSTPASS(base->DeleteChannel(0));
+
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    // >> end of SetExternalTransport
+    // ------------------------------------------------------------------------
+
+    // ------------------------------------------------------------------------
+    // >> EnableIPv6
+    //
+    // - VE initialized
+    // - no existing channels
+    // - no media
+    // - NOTE: set _ENABLE_IPV6_TESTS_ to include these tests
+    // - http://www.microsoft.com/resources/documentation/windows/xp/all/
+    //   proddocs/en-us/sag_ip_v6_pro_rt_enable.mspx?mfr=true
+    // >> ipv6 install
+    // >> ipv6 [-v] if [IfIndex]
+    // >> ping6 ::1
+    // >> ping6 fe80::1
+
+#ifdef _ENABLE_IPV6_TESTS_
+
+    TEST(EnableIPv6); ANL();
+
+    // call without valid channel
+    TEST_MUSTPASS(!netw->EnableIPv6(0)); MARK();
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+    TEST_MUSTPASS(base->CreateChannel());
+
+    // call with enabled external transport
+    ptrTransport = new ExtendedTestTransport(netw);
+    TEST_MUSTPASS(netw->RegisterExternalTransport(0, *ptrTransport));
+    TEST_MUSTPASS(!netw->EnableIPv6(0)); MARK();
+    TEST_ERROR(VE_EXTERNAL_TRANSPORT_ENABLED);
+    TEST_MUSTPASS(netw->DeRegisterExternalTransport(0));
+    delete ptrTransport;
+
+    // Test "locking" to IPv4
+    TEST_MUSTPASS(netw->IPv6IsEnabled(0)); MARK(); // After this call we cannot
+                                                   // enable IPv6
+    TEST_MUSTPASS(!netw->EnableIPv6(0)); MARK(); // Should fail
+
+    // Check that IPv6 address is invalid
+    TEST_MUSTPASS(!base->SetSendDestination(0, 8000, "::1")); MARK(); // fail
+
+    // New channel
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    TEST_MUSTPASS(base->CreateChannel());
+
+    // valid default call
+    TEST_MUSTPASS(netw->EnableIPv6(0)); MARK();
+    TEST_MUSTPASS(netw->GetLocalIP(localIP)); MARK(); // should still read IPv4
+    TEST_LOG("[local IPv4: %s]", localIP);
+
+    // ensure that Ipv6 is enabled
+    TEST_MUSTPASS(netw->IPv6IsEnabled(0) != true);
+
+    // check that IPv4 address is invalid
+    TEST_MUSTPASS(!base->SetSendDestination(0, 8000, "127.0.0.1"));
+    TEST_ERROR(VE_INVALID_IP_ADDRESS);
+
+    // verify usage of IPv6 loopback address
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 8000));
+    // IPv6 loopback address is 0:0:0:0:0:0:0:1
+    TEST_MUSTPASS(base->SetSendDestination(0, 8000, "::1"));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(!netw->EnableIPv6(0)); MARK(); // Should fail
+    TEST_MUSTPASS(base->StartSend(0));
+    Play(0, 2000, true, true);
+    ANL();
+
+    // Restart channel
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    TEST_MUSTPASS(base->CreateChannel());
+
+    TEST_MUSTPASS(netw->EnableIPv6(0)); MARK();
+    // ensure that Ipv6 is enabled
+    TEST_MUSTPASS(netw->IPv6IsEnabled(0) != true);
+
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 8000));
+    TEST_MUSTPASS(base->StartReceive(0));
+    // IPv6 loopback address is 0:0:0:0:0:0:0:1
+    TEST_MUSTPASS(base->SetSendDestination(0, 8000, "::1"));
+    TEST_MUSTPASS(base->StartPlayout(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    file->StartPlayingFileAsMicrophone(0, micFile, true ,true);
+    SLEEP(500); // ensure that we receieve some packets
+
+    // SetSourceFilter and GetSourceFilter
+    TEST(SetSourceFilter and GetSourceFilter for IPv6); ANL();
+    char sourceIp[64] =
+    {   0};
+    char filterIp[64] =
+    {   0};
+    TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, sourceIp));
+    TEST_LOG("Source port: %d \n", rtpPort);
+    TEST_LOG("Source RTCP port: %d \n", rtcpPort);
+    TEST_LOG("Source IP: %s \n", sourceIp);
+    TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, filterIp));
+    TEST_LOG("Filter port RTP: %d \n", rtpPort);
+    TEST_LOG("Filter port RTCP: %d \n", rtcpPort);
+    TEST_LOG("Filter IP: %s \n", filterIp);
+    TEST_MUSTPASS(0 != rtpPort);
+    TEST_MUSTPASS(0 != rtcpPort);
+    TEST_MUSTPASS(filterIp[0] != '\0');
+    TEST_LOG("Set filter IP to %s => should hear audio\n", sourceIp);
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, sourceIp));
+    TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, filterIp));
+    TEST_MUSTPASS(0 != rtpPort);
+    TEST_MUSTPASS(0 != rtcpPort);
+    TEST_MUSTPASS(_stricmp(filterIp, sourceIp));
+    SLEEP(1500);
+    TEST_LOG("Set filter IP to ::10:10:10 => should *not* hear audio\n");
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, "::10:10:10"));
+    TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, filterIp));
+    TEST_MUSTPASS(_stricmp(filterIp, "::10:10:10"));
+    SLEEP(1500);
+    TEST_LOG("Disable IP filter => should hear audio again\n");
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, "::0"));
+    TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, filterIp));
+    TEST_MUSTPASS(_stricmp(filterIp, "::"));
+    SLEEP(1500);
+    TEST_LOG("Set filter IP to ::10:10:10 => should *not* hear audio\n");
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, "::10:10:10"));
+    SLEEP(1500);
+    TEST_LOG("Disable IP filter => should hear audio again\n");
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, NULL));
+    TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, filterIp));
+    TEST_MUSTPASS(filterIp[0] != '\0');
+    SLEEP(1500);
+    TEST_LOG("Set filter IP to ::10:10:10 => should *not* hear audio\n");
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, "::10:10:10"));
+    SLEEP(1500);
+    TEST_LOG("Disable IP filter => should hear audio again\n");
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, "::"));
+    TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, filterIp));
+    TEST_MUSTPASS(_stricmp(filterIp, "::"));
+    SLEEP(1500);
+
+    file->StopPlayingFileAsMicrophone(0);
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+
+    TEST_MUSTPASS(base->DeleteChannel(0));
+
+#endif // #ifdef _ENABLE_IPV6_TESTS_
+    // >> end of EnableIPv6
+    // ------------------------------------------------------------------------
+
+    // ------------------------------------------------------------------------
+    // >> SetSourceFilter
+    //
+    // - VE initialized
+    // - no existing channels
+    // - no media
+
+    TEST(SetSourceFilter);
+    ANL();
+
+    // call without valid channel
+    TEST_MUSTPASS(!netw->SetSourceFilter(0, 12345));
+    MARK();
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+    TEST_MUSTPASS(base->CreateChannel());
+
+    // invalid parameters
+    TEST_MUSTPASS(!netw->SetSourceFilter(0, 65536));
+    MARK();
+    TEST_ERROR(VE_INVALID_PORT_NMBR);
+    TEST_MUSTPASS(!netw->SetSourceFilter(0, 12345, 65536));
+    MARK();
+    TEST_ERROR(VE_INVALID_PORT_NMBR);
+    TEST_MUSTPASS(!netw->SetSourceFilter(0, 12345, 12346, "300.300.300.300"));
+    MARK();
+    TEST_ERROR(VE_INVALID_IP_ADDRESS);
+
+    // STATE: RTP filter port is 12345, RTCP filter port is 12346
+
+    // disable all filters and ensure that media is received
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, NULL));
+    MARK();
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 2000, kVoEDefault, localIP));
+    TEST_MUSTPASS(base->SetSendDestination(0, 2000, localIP));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    SLEEP(sleepTime);
+
+    TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+    TEST_MUSTPASS(rtpPort != 2000);
+    TEST_MUSTPASS(rtcpPort != 2001);
+    TEST_MUSTPASS(strcmp(ipaddr, localIP) != 0);
+
+    // clear states and restart loopback session
+    TEST_MUSTPASS(base->DeleteChannel(0)); // clear source info state
+    TEST_MUSTPASS(base->CreateChannel());
+
+    // set RTP filter to port 2002 and verify that source 2000 is blocked
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 2002, 0, NULL));;
+    MARK();
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 2000, kVoEDefault, localIP));
+    TEST_MUSTPASS(base->SetSendDestination(0, 2000, localIP));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    SLEEP(sleepTime);
+    TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+    TEST_MUSTPASS(rtpPort != 0);
+    TEST_MUSTPASS(strcmp(ipaddr, "") != 0);
+
+    // ensure that received packets originates from 2002 and that they now pass
+    // the filter
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(base->StopSend(0));
+    // RTP source is 2002
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 2002, kVoEDefault, localIP));
+    TEST_MUSTPASS(base->SetSendDestination(0, 2002, localIP));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    SLEEP(sleepTime);
+    TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+    TEST_MUSTPASS(rtpPort != 2002);
+    TEST_MUSTPASS(strcmp(ipaddr, localIP) != 0);
+
+    // clear states and restart loopback session
+    TEST_MUSTPASS(base->DeleteChannel(0)); // clear source info state
+    TEST_MUSTPASS(base->CreateChannel());
+
+    // set IP filter to local IP and verify that default loopback stream is
+    // blocked
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, localIP));;
+    MARK();
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 2000));
+    TEST_MUSTPASS(base->SetSendDestination(0, 2000, "127.0.0.1"));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    SLEEP(sleepTime);
+    TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+    TEST_MUSTPASS(rtpPort != 0);
+    TEST_MUSTPASS(strcmp(ipaddr, "") != 0);
+
+    // ensure that received packets originates from the local IP and that they
+    // now pass the filter
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(base->StopSend(0));
+    // should pass the filter
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 2000, kVoEDefault, localIP));
+    TEST_MUSTPASS(base->SetSendDestination(0, 2000, localIP));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    SLEEP(sleepTime);
+    TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+    TEST_MUSTPASS(rtpPort != 2000);
+    TEST_MUSTPASS(strcmp(ipaddr, localIP) != 0);
+
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(base->StopSend(0));
+
+    // STATE: no active media, IP filter is active
+
+    // disable all filters 
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, NULL));;
+    MARK();
+    TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+    TEST_MUSTPASS(rtpPort != 0);
+    TEST_MUSTPASS(rtcpPort != 0);
+    TEST_MUSTPASS(strcmp(ipaddr, "") != 0);
+
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    // >> end of SetSourceFilter
+    // ------------------------------------------------------------------------
+
+    // ------------------------------------------------------------------------
+    // >> GetSourceFilter
+    //
+    // - VE initialized
+    // - no existing channels
+    // - no media
+
+    TEST(GetSourceFilter);
+    ANL();
+
+    // call without valid channel
+    TEST_MUSTPASS(!netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+    TEST_MUSTPASS(base->CreateChannel());
+
+    // invalid input parameters
+    TEST_MUSTPASS(!netw->GetSourceFilter(0, rtpPort, rtcpPort, NULL));
+    MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+
+    // valid call without any filter set
+    TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_MUSTPASS(rtpPort != 0);
+    TEST_MUSTPASS(rtcpPort != 0);
+    TEST_MUSTPASS(strcmp(ipaddr, "") != 0);
+
+    // STATE: no active media and no enabled filters
+
+    // set different filters and verify that they "bite"
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 54321, 0, NULL));
+    TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_MUSTPASS(rtpPort != 54321);
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, NULL));
+    TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_MUSTPASS(rtpPort != 0);
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 15425, NULL));
+    TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_MUSTPASS(rtcpPort != 15425);
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, NULL));
+    TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_MUSTPASS(rtcpPort != 0);
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, "192.168.199.19"));
+    TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_MUSTPASS(strcmp(ipaddr, "192.168.199.19") != 0);
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, NULL));
+    TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_MUSTPASS(strcmp(ipaddr, "") != 0);
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, "0.0.0.0"));
+    TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_MUSTPASS(strcmp(ipaddr, "0.0.0.0") != 0);
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, NULL));
+    TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+    MARK();
+    TEST_MUSTPASS(strcmp(ipaddr, "") != 0);
+
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    // >> end of GetSourceFilter
+    // ------------------------------------------------------------------------
+
+    // ------------------------------------------------------------------------
+    // >> RegisterDeadOrAliveObserver
+    // >> DeRegisterDeadOrAliveObserver
+    //
+    // - VE initialized
+    // - no existing channels
+    // - no media
+
+    TEST(RegisterDeadOrAliveObserver);
+    ANL();
+    TEST(DeRegisterDeadOrAliveObserver);
+    ANL();
+
+    // call without valid channel
+    TEST_MUSTPASS(!netw->RegisterDeadOrAliveObserver(0, *this));
+    MARK();
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+    TEST_MUSTPASS(base->CreateChannel());
+
+    TEST_MUSTPASS(netw->RegisterDeadOrAliveObserver(0, *this));
+    MARK();
+    TEST_MUSTPASS(!netw->RegisterDeadOrAliveObserver(0, *this));
+    MARK(); // already registered
+    TEST_ERROR(VE_INVALID_OPERATION);
+    TEST_MUSTPASS(netw->DeRegisterDeadOrAliveObserver(0));
+    MARK();
+    TEST_MUSTPASS(netw->DeRegisterDeadOrAliveObserver(0));
+    MARK(); // OK to do it again
+    TEST_MUSTPASS(netw->RegisterDeadOrAliveObserver(0, *this));
+    MARK();
+    TEST_MUSTPASS(netw->DeRegisterDeadOrAliveObserver(0));
+    MARK();
+
+    TEST_MUSTPASS(base->DeleteChannel(0));
+
+    // STATE: dead-or-alive observer is disabled
+
+    // >> end of RegisterDeadOrAliveObserver
+    // ------------------------------------------------------------------------
+
+    // ------------------------------------------------------------------------
+    // >> SetPeriodicDeadOrAliveStatus
+    // >> GetPeriodicDeadOrAliveStatus
+    //
+    // - VE initialized
+    // - no existing channels
+    // - no media
+
+    // call without valid channel
+    TEST_MUSTPASS(!netw->SetPeriodicDeadOrAliveStatus(0, false));
+    MARK();
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+    TEST_MUSTPASS(base->CreateChannel());
+
+    // Invalid paramters
+    TEST_MUSTPASS(!netw->SetPeriodicDeadOrAliveStatus(0, true, 0));
+    MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+    TEST_MUSTPASS(!netw->SetPeriodicDeadOrAliveStatus(0, true, 151));
+    MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+    TEST_MUSTPASS(!netw->SetPeriodicDeadOrAliveStatus(1, true, 10));
+    MARK();
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+    int sampleTime(0);
+
+    // Valid parameters
+    TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, true, 1));
+    MARK();
+    TEST_MUSTPASS(netw->GetPeriodicDeadOrAliveStatus(0, enabled, sampleTime));
+    TEST_MUSTPASS(enabled != true);
+    TEST_MUSTPASS(sampleTime != 1);
+    TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, true, 150));
+    MARK();
+    TEST_MUSTPASS(netw->GetPeriodicDeadOrAliveStatus(0, enabled, sampleTime));
+    TEST_MUSTPASS(enabled != true);
+    TEST_MUSTPASS(sampleTime != 150);
+    TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, false));
+    MARK();
+    TEST_MUSTPASS(netw->GetPeriodicDeadOrAliveStatus(0, enabled, sampleTime));
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(sampleTime != 150); // ensure last set time isnt modified
+
+    StartMedia(0, 2000, true, true, true);
+
+    // STATE: full duplex media is active
+
+    // test the dead-or-alive mechanism
+    TEST_MUSTPASS(netw->RegisterDeadOrAliveObserver(0, *this));
+    MARK();
+    TEST_LOG("\nVerify that Alive callbacks are received (dT=2sec): ");
+    fflush(NULL);
+    TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, true, 2));
+    SLEEP(6000);
+    TEST_LOG("\nChange dT to 1 second: ");
+    fflush(NULL);
+    TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, true, 1));
+    SLEEP(6000);
+    TEST_LOG("\nDisable dead-or-alive callbacks: ");
+    fflush(NULL);
+    TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, false));
+    SLEEP(6000);
+    TEST_LOG("\nStop sending and enable callbacks again.\n");
+    TEST_LOG("Verify that Dead callbacks are received (dT=2sec): ");
+    fflush(NULL);
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, true, 2));
+    SLEEP(6000);
+    TEST_MUSTPASS(base->StartSend(0));
+    TEST_LOG("\nRestart sending.\n");
+    TEST_LOG("Verify that Alive callbacks are received again (dT=2sec): ");
+    fflush(NULL);
+    SLEEP(6000);
+    TEST_LOG("\nDisable dead-or-alive callbacks.");
+    fflush(NULL);
+    TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, false));
+    TEST_MUSTPASS(netw->DeRegisterDeadOrAliveObserver(0));
+    MARK();
+
+    StopMedia(0);
+
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    // >> end of SetPeriodicDeadOrAliveStatus
+    // ------------------------------------------------------------------------
+
+    // ------------------------------------------------------------------------
+    // >> SetPacketTimeoutNotification
+    // >> GetPacketTimeoutNotification
+    //
+    // - VE initialized
+    // - no existing channels
+    // - no media
+    // - NOTE: dynamic tests are performed in standard test
+
+    int timeOut(0);
+
+    TEST(SetPacketTimeoutNotification);
+    ANL();
+    TEST(GetPacketTimeoutNotification);
+    ANL();
+
+    // call without existing valid channel
+    TEST_MUSTPASS(!netw->SetPacketTimeoutNotification(0, false));
+    MARK();
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+    TEST_MUSTPASS(base->CreateChannel());
+
+    // invalid function calls
+    TEST_MUSTPASS(!netw->SetPacketTimeoutNotification(0, true, 0));
+    MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+    TEST_MUSTPASS(!netw->SetPacketTimeoutNotification(0, true, 151));
+    MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+
+    // valid function calls (no active media)
+    TEST_MUSTPASS(netw->SetPacketTimeoutNotification(0, true, 2));
+    MARK();
+    TEST_MUSTPASS(netw->GetPacketTimeoutNotification(0, enabled, timeOut));
+    MARK();
+    TEST_MUSTPASS(enabled != true);
+    TEST_MUSTPASS(timeOut != 2);
+    TEST_MUSTPASS(netw->SetPacketTimeoutNotification(0, false));
+    MARK();
+    TEST_MUSTPASS(netw->GetPacketTimeoutNotification(0, enabled, timeOut));
+    MARK();
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(netw->SetPacketTimeoutNotification(0, true, 10));
+    MARK();
+    TEST_MUSTPASS(netw->GetPacketTimeoutNotification(0, enabled, timeOut));
+    MARK();
+    TEST_MUSTPASS(enabled != true);
+    TEST_MUSTPASS(timeOut != 10);
+    TEST_MUSTPASS(netw->SetPacketTimeoutNotification(0, true, 2));
+    MARK();
+    TEST_MUSTPASS(netw->GetPacketTimeoutNotification(0, enabled, timeOut));
+    MARK();
+    TEST_MUSTPASS(enabled != true);
+    TEST_MUSTPASS(timeOut != 2);
+    TEST_MUSTPASS(netw->SetPacketTimeoutNotification(0, false));
+    MARK();
+    TEST_MUSTPASS(netw->GetPacketTimeoutNotification(0, enabled, timeOut));
+    MARK();
+    TEST_MUSTPASS(enabled != false);
+
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    ANL();
+    AOK();
+    ANL();
+    ANL();
+
+    // >> end of SetPacketTimeoutNotification
+    // ------------------------------------------------------------------------
+
+    // ------------------------------------------------------------------------
+    // >> SendUDPPacket
+    //
+    // - VE initialized
+    // - no existing channels
+    // - no media
+
+
+    // >> end of SendUDPPacket
+    // ------------------------------------------------------------------------
+
+    // ------------------------------------------------------------------------
+    // >> SetSendTOS
+    //
+    // - VE initialized
+    // - no existing channels
+    // - no media
+
+    TEST(SetSendTOS);
+    ANL();
+#if defined(_WIN32) || defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
+
+    // call without existing valid channel
+
+    TEST_MUSTPASS(!netw->SetSendTOS(0, 0)); MARK();
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+    TEST_MUSTPASS(base->CreateChannel());
+
+    // trivial invalid function calls
+    TEST_MUSTPASS(!netw->SetSendTOS(0, -1)); MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+    TEST_MUSTPASS(!netw->SetSendTOS(0, 64)); MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+    TEST_MUSTPASS(!netw->SetSendTOS(0, 1, -2)); MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+    TEST_MUSTPASS(!netw->SetSendTOS(0, 1, 8)); MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+    TEST_MUSTPASS(!netw->SetSendTOS(0, 1)); MARK();
+    TEST_ERROR(VE_SOCKET_ERROR); // must create sockets first
+
+#ifdef _WIN32
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 3000));
+
+    // enable ToS using SetSockopt (should work without local binding)
+    TEST_MUSTPASS(netw->SetSendTOS(0, 1, -1, true)); MARK();
+    TEST_MUSTPASS(netw->GetSendTOS(0, DSCP, priority, useSetSockopt)); MARK();
+    TEST_MUSTPASS(DSCP != 1);
+    TEST_MUSTPASS(priority != 0);
+    TEST_MUSTPASS(useSetSockopt != true);
+
+    // try to disable SetSockopt while ToS is enabled (should fail)
+    TEST_MUSTPASS(!netw->SetSendTOS(0, 1, -1, false)); MARK();
+    TEST_ERROR(VE_TOS_INVALID); // must disable ToS before enabling SetSockopt
+
+    // disable ToS to be able to stop using SetSockopt
+    TEST_MUSTPASS(netw->SetSendTOS(0, 0, -1, true)); MARK(); // disable ToS
+    TEST_MUSTPASS(netw->GetSendTOS(0, DSCP, priority, useSetSockopt)); MARK();
+    TEST_MUSTPASS(DSCP != 0);
+    TEST_MUSTPASS(priority != 0);
+    TEST_MUSTPASS(useSetSockopt != true);
+
+    // to use the "non-SetSockopt" method, local binding is required,
+    // trying without it should fail
+    TEST_MUSTPASS(!netw->SetSendTOS(0, 1, -1, false)); MARK();
+    TEST_ERROR(VE_TOS_ERROR); // must bind to local IP first
+
+    // bind to local IP and try again (should work this time)
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 12345, kVoEDefault, localIP));
+    TEST_LOG("\nThis test needs to be run as administrator\n");
+    TEST_MUSTPASS(netw->SetSendTOS(0, 1, -1, false)); MARK();
+    TEST_MUSTPASS(netw->GetSendTOS(0, DSCP, priority, useSetSockopt)); MARK();
+    TEST_MUSTPASS(DSCP != 1);
+    TEST_MUSTPASS(priority != 0);
+    TEST_MUSTPASS(useSetSockopt != false);
+
+    // STATE: binded to local IP, local port is 12345 and DSCP is 1 (not using
+    // SetSockopt)
+
+    // verify loopback audio with the current settings
+    TEST_MUSTPASS(base->SetSendDestination(0, 12345, localIP));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    Play(0, 2000, true, true); // file should be played out here...
+
+#ifdef _SEND_TO_REMOTE_IP_
+    // Send to remote destination and verify the DSCP using Wireshark.
+    // Use filter ip.src == "RemoteIP".
+    TEST_LOG("\nUse Wireshark and verify a correctly received DSCP at the "
+        "remote side!\n");
+    TEST_LOG("Sending approx. 5 packets to %s:%d for each DSCP below:\n",
+             RemoteIP, RemotePort);
+    TEST_MUSTPASS(base->SetSendDestination(0, RemotePort, RemoteIP));
+    TEST_LOG("  DSCP is set to 0x%02x\n", 1);
+    SLEEP(100);
+
+    // Change the DSCP while sending is active and verify on remote side.
+    TEST_MUSTPASS(netw->SetSendTOS(0, 2));
+    TEST_MUSTPASS(netw->GetSendTOS(0, DSCP, priority, useSetSockopt));
+    TEST_LOG("  DSCP is set to 0x%02x\n", DSCP);
+    SLEEP(100);
+
+    // Change the DSCP while sending is active and verify on remote side.
+    TEST_MUSTPASS(netw->SetSendTOS(0, 63));
+    TEST_MUSTPASS(netw->GetSendTOS(0, DSCP, priority, useSetSockopt));
+    TEST_LOG("  DSCP is set to 0x%02x\n", DSCP);
+    SLEEP(100);
+
+    // stop and resume sending
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    TEST_MUSTPASS(netw->GetSendTOS(0, DSCP, priority, useSetSockopt));
+    TEST_LOG("  DSCP is set to 0x%02x\n", DSCP);
+    SLEEP(100);
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(netw->SetSendTOS(0, 0));
+#endif // _SEND_TO_REMOTE_IP_
+    // Windows priority tests (priority cannot be set using setsockopt on Win)
+    TEST_LOG("Testing priority\n");
+    TEST_MUSTPASS(base->SetSendDestination(0, 12345, localIP));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(!netw->SetSendTOS(0, 0, 3, true)); // Should fail
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+    TEST_MUSTPASS(netw->SetSendTOS(0, 0, 3, false));
+    TEST_MUSTPASS(base->StartSend(0));
+    Play(0, 2000, true, true); // file should be played out here...
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(netw->SetSendTOS(0, 1, 3, false));
+    TEST_MUSTPASS(base->StartSend(0));
+    Play(0, 2000, true, true); // file should be played out here...
+
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    TEST_MUSTPASS(base->CreateChannel());
+#endif // _WIN32
+    // STATE: no media, disabled ToS, no defined receiver
+
+    // Repeat tests above but using setsockopt() this time.
+    // Binding to local IP should not be required.
+
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 12345, kVoEDefault));
+    TEST_MUSTPASS(netw->SetSendTOS(0, 10, -1, true)); MARK();
+    TEST_MUSTPASS(netw->GetSendTOS(0, DSCP, priority, useSetSockopt)); MARK();
+    TEST_MUSTPASS(DSCP != 10);
+    TEST_MUSTPASS(priority != 0);
+    TEST_MUSTPASS(useSetSockopt != true);
+
+    // STATE: *not* binded to local IP, local port is 12345 and DSCP is 10
+    // (using SetSockopt)
+
+    // verify loopback audio with the current settings
+    TEST_MUSTPASS(base->SetSendDestination(0, 12345, "127.0.0.1"));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    Play(0, 2000, true, true); // file should be played out here...
+
+#ifdef _SEND_TO_REMOTE_IP_
+    // Send to remote destination and verify the DSCP using Wireshark.
+    // Use filter ip.src == "RemoteIP". 
+    TEST_LOG("\nUse Wireshark and verify a correctly received DSCP at the"
+        " remote side!\n");
+    TEST_LOG("Sending approx. 5 packets to %s:%d for each DSCP below:\n",
+             RemoteIP, RemotePort);
+    TEST_MUSTPASS(base->SetSendDestination(0, RemotePort, RemoteIP));
+    TEST_MUSTPASS(netw->GetSendTOS(0, DSCP, priority, useSetSockopt));
+    TEST_LOG("  DSCP is set to 0x%02x (setsockopt)\n", DSCP);
+    SLEEP(100);
+
+    // Change the DSCP while sending is active and verify on remote side.
+    TEST_MUSTPASS(netw->SetSendTOS(0, 20, -1, true)); // use setsockopt()
+    TEST_MUSTPASS(netw->GetSendTOS(0, DSCP, priority, useSetSockopt));
+    TEST_LOG("  DSCP is set to 0x%02x (setsockopt)\n", DSCP);
+    SLEEP(100);
+
+    // Change the DSCP while sending is active and verify on remote side.
+    TEST_MUSTPASS(netw->SetSendTOS(0, 61, -1, true)); // use setsockopt()
+    TEST_MUSTPASS(netw->GetSendTOS(0, DSCP, priority, useSetSockopt));
+    TEST_LOG("  DSCP is set to 0x%02x (setsockopt)\n", DSCP);
+    SLEEP(100);
+
+    // stop and resume sending
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    TEST_MUSTPASS(netw->GetSendTOS(0, DSCP, priority, useSetSockopt));
+    TEST_LOG("  DSCP is set to 0x%02x (setsockopt)\n", DSCP);
+    SLEEP(100);
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(netw->SetSendTOS(0, 0, -1, true));
+#endif // _SEND_TO_REMOTE_IP_
+#if defined(WEBRTC_LINUX)
+    // Linux priority tests (using setsockopt)
+    TEST_LOG("Testing priority\n");
+    TEST_MUSTPASS(base->SetSendDestination(0, 12345, localIP));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(netw->SetSendTOS(0, 0, 3, true));
+    TEST_MUSTPASS(base->StartSend(0));
+    Play(0, 2000, true, true); // file should be played out here...
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(netw->SetSendTOS(0, 1, 3, true));
+    TEST_MUSTPASS(base->StartSend(0));
+    Play(0, 2000, true, true); // file should be played out here...
+#endif // #if defined(WEBRTC_LINUX)
+#if !defined(_WIN32) && !defined(WEBRTC_LINUX)
+    // Fail tests for other than Wind and Linux
+    TEST_MUSTPASS(!netw->SetSendTOS(0, 0, 3, false)); // Should fail
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+#endif // #if !defined(_WIN32) && !defined(WEBRTC_LINUX)
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    ANL(); AOK(); ANL(); ANL();
+
+    // END #if defined(_WIN32) || defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
+#else
+    TEST_LOG( "Skipping ToS tests -  _WIN32, LINUX, MAC is not defined or "
+        "ANDROID is defined");
+#endif
+
+    // >> end of SetSendTOS
+    // ------------------------------------------------------------------------
+
+    // ------------------------------------------------------------------------
+    // >> SetSendGQoS (Windows only)
+    //
+    // - VE initialized
+    // - no existing channels
+    // - no media
+    //
+    // From qos.h:
+    //
+    //  #define SERVICETYPE_NOTRAFFIC               0x00000000 
+    //  #define SERVICETYPE_BESTEFFORT              0x00000001 (*)
+    //  #define SERVICETYPE_CONTROLLEDLOAD          0x00000002 (*)
+    //  #define SERVICETYPE_GUARANTEED              0x00000003 (*)
+    //  #define SERVICETYPE_NETWORK_UNAVAILABLE     0x00000004
+    //  #define SERVICETYPE_GENERAL_INFORMATION     0x00000005 
+    //  #define SERVICETYPE_NOCHANGE                0x00000006
+    //  #define SERVICETYPE_NONCONFORMING           0x00000009
+    //  #define SERVICETYPE_NETWORK_CONTROL         0x0000000A
+    //  #define SERVICETYPE_QUALITATIVE             0x0000000D (*)
+    //  
+    //  #define SERVICE_BESTEFFORT                  0x80010000
+    //  #define SERVICE_CONTROLLEDLOAD              0x80020000
+    //  #define SERVICE_GUARANTEED                  0x80040000
+    //  #define SERVICE_QUALITATIVE                 0x80200000
+    //
+    //  (*) supported in WEBRTC VoE
+
+    TEST(SetSendGQoS);
+    ANL();
+#ifdef _WIN32
+
+    // call without existing valid channel
+    TEST_MUSTPASS(!netw->SetSendGQoS(0, false, 0)); MARK();
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+    TEST_MUSTPASS(base->CreateChannel());
+
+    // supported service type but no sockets
+    TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT)); MARK();
+    TEST_ERROR(VE_SOCKETS_NOT_INITED);
+
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 12345));
+
+    // supported service type but sender is not initialized
+    TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT)); MARK();
+    TEST_ERROR(VE_DESTINATION_NOT_INITED);
+
+    TEST_MUSTPASS(base->SetSendDestination(0, 12345, "127.0.0.1"));
+
+    // invalid service types
+    TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICETYPE_NOTRAFFIC)); MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+    TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICETYPE_NETWORK_UNAVAILABLE));
+    MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+    TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICETYPE_GENERAL_INFORMATION));
+    MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+    TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICETYPE_NOCHANGE)); MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+    TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICETYPE_NONCONFORMING));
+    MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+    TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICETYPE_NETWORK_CONTROL));
+    MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+    TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICE_BESTEFFORT)); MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+    TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICE_CONTROLLEDLOAD)); MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+    TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICE_GUARANTEED)); MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+    TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICE_QUALITATIVE)); MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+
+    // Is ToS enabled here?
+
+    // Settings which don't require binding to local IP
+
+    // set SERVICETYPE_BESTEFFORT 
+    TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT)); MARK();
+    TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+    MARK();
+    TEST_MUSTPASS(enabled != true);
+    TEST_MUSTPASS(serviceType != SERVICETYPE_BESTEFFORT);
+    TEST_MUSTPASS(overrideDSCP != false);
+
+    // set SERVICETYPE_CONTROLLEDLOAD
+    TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_CONTROLLEDLOAD));
+    MARK();
+    TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+    MARK();
+    TEST_MUSTPASS(enabled != true);
+    TEST_MUSTPASS(serviceType != SERVICETYPE_CONTROLLEDLOAD);
+    TEST_MUSTPASS(overrideDSCP != false);
+
+    // set SERVICETYPE_GUARANTEED
+    TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_GUARANTEED)); MARK();
+    TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+    MARK();
+    TEST_MUSTPASS(enabled != true);
+    TEST_MUSTPASS(serviceType != SERVICETYPE_GUARANTEED);
+    TEST_MUSTPASS(overrideDSCP != false);
+
+    // set SERVICETYPE_QUALITATIVE
+    TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_QUALITATIVE)); MARK();
+    TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+    MARK();
+    TEST_MUSTPASS(enabled != true);
+    TEST_MUSTPASS(serviceType != SERVICETYPE_QUALITATIVE);
+    TEST_MUSTPASS(overrideDSCP != false);
+
+    // disable GQoS
+    TEST_MUSTPASS(netw->SetSendGQoS(0, false, 0)); MARK();
+    TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+    MARK();
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(serviceType != SERVICETYPE_QUALITATIVE);
+    TEST_MUSTPASS(overrideDSCP != false);
+
+    // STATE: diabled QGoS, sockets exists, sending side is initialized, no media
+
+    // Loopback tests using the four different GQoS settings
+
+    TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT)); MARK();
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    ANL();
+    TEST_LOG("[SERVICETYPE_BESTEFFORT]");
+    Play(0, 2000, true, true); // file should be played out here...
+
+    TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_CONTROLLEDLOAD)); MARK();
+    ANL();
+    TEST_LOG("[SERVICETYPE_CONTROLLEDLOAD]");
+    Play(0, 2000, true, true); // file should be played out here...
+
+    TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_GUARANTEED)); MARK();
+    ANL();
+    TEST_LOG("[SERVICETYPE_GUARANTEED]");
+    Play(0, 2000, true, true); // file should be played out here...
+
+    TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_QUALITATIVE)); MARK();
+    ANL();
+    TEST_LOG("[SERVICETYPE_QUALITATIVE]");
+    Play(0, 2000, true, true); // file should be played out here...
+
+#ifdef _SEND_TO_REMOTE_IP_
+    // Send to remote destination and verify the DSCP mapping using Wireshark.
+    // Use filter ip.src == "RemoteIP". 
+
+    // Modify the send destination on the fly
+    TEST_MUSTPASS(base->SetSendDestination(0, RemotePort, RemoteIP));
+
+    TEST_LOG("\nUse Wireshark and verify a correctly received DSCP mapping at"
+        " the remote side!\n");
+    TEST_LOG("Sending approx. 5 packets to %s:%d for each GQoS setting below:\n",
+             RemoteIP, RemotePort);
+    TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT));
+    TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+    TEST_LOG("  serviceType is set to SERVICETYPE_BESTEFFORT (0x%02x), should "
+        "be mapped to DSCP = 0x00\n", serviceType);
+    SLEEP(100);
+    TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_CONTROLLEDLOAD));
+    TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+    TEST_LOG("  serviceType is set to SERVICETYPE_CONTROLLEDLOAD (0x%02x), "
+        "should be mapped to DSCP = 0x18\n", serviceType);
+    SLEEP(100);
+    TEST_MUSTPASS(netw->SetSendGQoS(0, false, 0));
+    TEST_LOG("  QoS is disabled, should give DSCP = 0x%02x\n", 0);
+    SLEEP(100);
+    TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_GUARANTEED));
+    TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+    TEST_LOG("  serviceType is set to SERVICETYPE_GUARANTEED (0x%02x), should "
+        "be mapped to DSCP = 0x28\n", serviceType);
+    SLEEP(100);
+    TEST_MUSTPASS(netw->SetSendGQoS(0, false, 0));
+    TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_QUALITATIVE));
+    TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+    TEST_LOG("  serviceType is set to SERVICETYPE_QUALITATIVE (0x%02x), should"
+        " be mapped to DSCP = 0x00\n", serviceType);
+    SLEEP(100);
+#endif // _SEND_TO_REMOTE_IP_
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(base->StopSend(0));
+
+    // STATE: sockets exists, sending side is initialized, no media
+
+    // Repeat test above but this time using overrideDSCP.
+
+    // Some initial loopack tests.
+    // NOTE - override DSCP requres binding to local IP.
+
+    // should not work since QoS is enabled
+    TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT, 3));
+    MARK();
+    TEST_ERROR(VE_TOS_GQOS_CONFLICT);
+
+    // disble QoS and try to override again (should fail again since local
+    // binding is not done yet)
+    TEST_MUSTPASS(netw->SetSendGQoS(0, false, 0));
+    TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT, 3));
+    MARK();
+    TEST_ERROR(VE_GQOS_ERROR);
+
+    // make proper settings and try again (should work this time)
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 12345, kVoEDefault, localIP));
+    TEST_MUSTPASS(base->SetSendDestination(0, 12345, localIP));
+    TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT, 3));
+    MARK();
+
+    // Now, let's try some loopback tests using override DSCP
+
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    ANL();
+    TEST_LOG("[overrideDSCP=3]");
+    Play(0, 2000, true, true); // file should be played out here...
+
+    TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT, 17));
+    MARK();
+    ANL();
+    TEST_LOG("[overrideDSCP=17]");
+    Play(0, 2000, true, true); // file should be played out here...
+
+    // And finally, send to remote side as well to verify that the new mapping
+    // works as it should.
+
+#ifdef _SEND_TO_REMOTE_IP_
+    // Modify the send destination on the fly
+    TEST_MUSTPASS(base->SetSendDestination(0, RemotePort, RemoteIP));
+
+    TEST_LOG("\nUse Wireshark and verify a correctly received DSCP mapping at"
+        " the remote side!\n");
+    TEST_LOG("Sending approx. 5 packets to %s:%d for each GQoS setting below:\n",
+             RemoteIP, RemotePort);
+    TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT, 18));
+    TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+    TEST_LOG("  serviceType is set to SERVICETYPE_BESTEFFORT, should be "
+        "overrided to DSCP = 0x%02x\n", overrideDSCP);
+    SLEEP(100);
+    TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT, 62));
+    TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+    TEST_LOG("  serviceType is set to SERVICETYPE_BESTEFFORT, should be "
+        "overrided to DSCP = 0x%02x\n", overrideDSCP);
+    SLEEP(100);
+    TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT, 32));
+    TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+    TEST_LOG("  serviceType is set to SERVICETYPE_BESTEFFORT, should be "
+        "overrided to DSCP = 0x%02x\n", overrideDSCP);
+    SLEEP(100);
+    TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT, 1));
+    TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+    TEST_LOG("  serviceType is set to SERVICETYPE_BESTEFFORT, should be "
+        "overrided to DSCP = 0x%02x\n", overrideDSCP);
+    SLEEP(100);
+    TEST_MUSTPASS(netw->SetSendGQoS(0, false, 0));
+    TEST_LOG("  QoS is disabled, should give DSCP = 0x%02x\n", 0);
+    SLEEP(100);
+#endif // _SEND_TO_REMOTE_IP_
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(base->StopSend(0));
+
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    ANL(); AOK(); ANL(); ANL();
+
+#else
+    TEST_LOG("Skipping GQoS tests - _WIN32 is not defined");
+#endif  // #ifdef _WIN32
+    // >> end of SetSendGQoS
+    // ------------------------------------------------------------------------
+
+    if (file)
+    {
+        file->StopPlayingFileAsMicrophone(0);
+    }
+    base->StopSend(0);
+    base->StopPlayout(0);
+    base->StopReceive(0);
+    base->DeleteChannel(0);
+    base->Terminate();
+
+    ANL();
+    AOK();
+    return 0;
+}
+
+// ----------------------------------------------------------------------------
+//  VoEExtendedTest::TestRTP_RTCP
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestRTP_RTCP()
+{
+    PrepareTest("RTP_RTCP");
+
+    VoEBase* base = _mgr.BasePtr();
+    VoEFile* file = _mgr.FilePtr();
+    VoERTP_RTCP* rtp_rtcp = _mgr.RTP_RTCPPtr();
+
+    XRTPObserver rtpObserver;
+
+#ifdef ANDROID
+    int sleepTime = 200;
+#else
+    int sleepTime = 100;
+#endif
+
+#ifdef _USE_EXTENDED_TRACE_
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(GetFilename(
+        "VoERTP_RTCP_trace.txt")));
+    TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStateInfo |
+                                              kTraceStateInfo |
+                                              kTraceWarning |
+                                              kTraceError |
+                                              kTraceCritical |
+                                              kTraceApiCall |
+                                              kTraceMemory |
+                                              kTraceInfo));
+#endif
+
+    TEST_MUSTPASS(base->Init());
+    TEST_MUSTPASS(base->CreateChannel());
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 12345));
+    TEST_MUSTPASS(base->SetSendDestination(0, 12345, "127.0.0.1"));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    TEST_MUSTPASS(base->StartPlayout(0));
+
+    ///////////////////////////
+    // Actual test starts here
+
+    // ------------------------------------------------------------------------
+    // >> Set/GetRTPAudioLevelIndicationStatus
+
+    TEST(SetRTPAudioLevelIndicationStatus);
+    ANL();
+    TEST(GetRTPAudioLevelIndicationStatus);
+
+    // test invalid input parameters
+    TEST_MUSTPASS(-1 != rtp_rtcp->SetRTPAudioLevelIndicationStatus(0, true, 0));
+    MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+    TEST_MUSTPASS(-1 != rtp_rtcp->SetRTPAudioLevelIndicationStatus(0, true, 15));
+    MARK();
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+    TEST_MUSTPASS(-1 != rtp_rtcp->SetRTPAudioLevelIndicationStatus(0, false, 15));
+    MARK();
+    TEST_MUSTPASS(-1 != rtp_rtcp->SetRTPAudioLevelIndicationStatus(1, true, 5));
+    MARK();
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+    // test complete valid input range [1,14]
+    bool audioLevelEnabled(false);
+    unsigned char ID(0);
+    for (int id = 1; id < 15; id++)
+    {
+        TEST_MUSTPASS(rtp_rtcp->SetRTPAudioLevelIndicationStatus(0, true, id));
+        MARK();
+        TEST_MUSTPASS(rtp_rtcp->GetRTPAudioLevelIndicationStatus(
+            0, audioLevelEnabled, ID));
+        MARK();
+        TEST_MUSTPASS(audioLevelEnabled != true);
+        TEST_MUSTPASS(rtp_rtcp->SetRTPAudioLevelIndicationStatus(0, false, id));
+        MARK();
+        TEST_MUSTPASS(rtp_rtcp->GetRTPAudioLevelIndicationStatus(
+            0, audioLevelEnabled, ID));
+        MARK();
+        TEST_MUSTPASS(audioLevelEnabled != false);
+        TEST_MUSTPASS(ID != id);
+    }
+
+    // disable audio-level-rtp-header-extension
+    TEST_MUSTPASS(rtp_rtcp->SetRTPAudioLevelIndicationStatus(0, false));
+    MARK();
+    ANL();
+
+    // ------------------------------------------------------------------------
+    // >> InsertExtraRTPPacket
+
+    int i(0);
+
+    TEST(SetLocalSSRC);
+    TEST_MUSTPASS(!rtp_rtcp->SetLocalSSRC(0, 5678));
+    MARK();
+    TEST_MUSTPASS(VE_ALREADY_SENDING != base->LastError());
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(rtp_rtcp->SetLocalSSRC(0, 5678)); // force send SSRC to 5678
+    TEST_MUSTPASS(base->StartSend(0));
+    MARK();
+    ANL();
+
+    TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0, micFile, true ,true));
+
+    // ------------------------------------------------------------------------
+    // >> InsertExtraRTPPacket
+
+    TEST(InsertExtraRTPPacket);
+    ANL();
+
+    const char payloadData[8] = { 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H' };
+
+    TEST_MUSTPASS(-1 != rtp_rtcp->InsertExtraRTPPacket(-1, 0, false,
+                                                       payloadData, 8));
+    MARK(); // invalid channel
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+    TEST_MUSTPASS(-1 != rtp_rtcp->InsertExtraRTPPacket(0, -1, false,
+                                                       payloadData, 8));
+    MARK(); // invalid payload type
+    TEST_ERROR(VE_INVALID_PLTYPE);
+    TEST_MUSTPASS(-1 != rtp_rtcp->InsertExtraRTPPacket(0, 128, false,
+                                                       payloadData, 8));
+    MARK(); // invalid payload type
+    TEST_ERROR(VE_INVALID_PLTYPE);
+    TEST_MUSTPASS(-1 != rtp_rtcp->InsertExtraRTPPacket(0, 99, false,
+                                                       NULL, 8));
+    MARK(); // invalid pointer
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+    TEST_MUSTPASS(-1 != rtp_rtcp->InsertExtraRTPPacket(0, 99, false,
+                                                       payloadData, 1500-28+1));
+    MARK(); // invalid size
+    TEST_ERROR(VE_INVALID_ARGUMENT);
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(-1 != rtp_rtcp->InsertExtraRTPPacket(0, 99, false,
+                                                       payloadData, 8));
+    MARK(); // not sending
+    TEST_ERROR(VE_NOT_SENDING);
+    TEST_MUSTPASS(base->StartSend(0));
+    TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0, micFile, true ,true));
+
+    SLEEP(1000);
+    for (int p = 0; p < 128; p++)
+    {
+        TEST_MUSTPASS(rtp_rtcp->InsertExtraRTPPacket(0, p, false,
+                                                     payloadData, 8));
+        MARK();
+        TEST_MUSTPASS(rtp_rtcp->InsertExtraRTPPacket(0, p, true,
+                                                     payloadData, 8));
+        MARK();
+    }
+
+    // Ensure we have sent all extra packets before we move forward to avoid
+    //incorrect error code
+    SLEEP(1000);
+
+    ANL();
+
+    // ------------------------------------------------------------------------
+    // >> RTP dump APIs
+
+    TEST(Start/StopRtpDump);
+    ANL();
+    TEST(Start/RTPDumpIsActive);
+
+    TEST_MUSTPASS(-1 != rtp_rtcp->RTPDumpIsActive(-1, kRtpIncoming));
+    MARK(); // invalid channel
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+    TEST_MUSTPASS(false != rtp_rtcp->RTPDumpIsActive(0, kRtpIncoming));
+    MARK(); // should be off by default
+    TEST_MUSTPASS(false != rtp_rtcp->RTPDumpIsActive(0, kRtpOutgoing));
+    MARK(); // should be off by default
+
+    TEST_MUSTPASS(-1 != rtp_rtcp->StartRTPDump(-1, NULL));
+    MARK(); // invalid channel
+    TEST_ERROR(VE_CHANNEL_NOT_VALID);
+    TEST_MUSTPASS(-1 != rtp_rtcp->StartRTPDump(0, NULL));
+    MARK(); // invalid file name
+    TEST_ERROR(VE_BAD_FILE);
+
+    // Create two RTP dump files: 
+
+    //  - dump_in_1sec.rtp <=> ~1 sec recording of input side
+    //  - dump_in_2sec.rtp <=> ~2 sec recording of output side
+    //
+    TEST_MUSTPASS(rtp_rtcp->StopRTPDump(0));
+    MARK();
+    TEST_MUSTPASS(rtp_rtcp->StopRTPDump(0, kRtpIncoming));
+    MARK();
+    TEST_MUSTPASS(rtp_rtcp->StopRTPDump(0, kRtpOutgoing));
+    MARK();
+    TEST_MUSTPASS(rtp_rtcp->StartRTPDump(0, GetFilename("dump_in_1sec.rtp"),
+                                         kRtpIncoming));
+    MARK();
+    TEST_MUSTPASS(rtp_rtcp->StartRTPDump(0, GetFilename("dump_out_2sec.rtp"),
+                                         kRtpOutgoing));
+    MARK();
+    SLEEP(1000);
+    TEST_MUSTPASS(rtp_rtcp->StopRTPDump(0, kRtpIncoming));
+    MARK();
+    SLEEP(1000);
+    TEST_MUSTPASS(rtp_rtcp->StopRTPDump(0, kRtpOutgoing));
+    MARK();
+
+    // Start/Stop tests:
+    //
+    // - only one file (called dump_in_200ms.rtp) should exist after this test
+    //
+    for (i = 0; i < 10; i++)
+    {
+        TEST_MUSTPASS(rtp_rtcp->StartRTPDump(0,
+                                             GetFilename("dump_in_200ms.rtp")));
+        MARK();
+        SLEEP(200);
+        TEST_MUSTPASS(rtp_rtcp->StopRTPDump(0));
+        MARK();
+    }
+
+    // >> end of RTP dump APIs
+    // ------------------------------------------------------------------------
+
+    ANL();
+
+    TEST(GetRTCPStatus);
+    bool enabled;
+    TEST_MUSTPASS(!rtp_rtcp->GetRTCPStatus(-1, enabled));
+    MARK();
+    TEST_MUSTPASS(rtp_rtcp->GetRTCPStatus(0, enabled));
+    MARK(); // should be on by default
+    TEST_MUSTPASS(enabled != true);
+    ANL();
+
+    TEST(SetRTCPStatus);
+    TEST_MUSTPASS(rtp_rtcp->SetRTCPStatus(0, false));
+    MARK();
+    TEST_MUSTPASS(rtp_rtcp->GetRTCPStatus(0, enabled));
+    TEST_MUSTPASS(enabled != false);
+    MARK();
+    SLEEP(2000);
+    TEST_MUSTPASS(rtp_rtcp->SetRTCPStatus(0, true));
+    MARK();
+    TEST_MUSTPASS(rtp_rtcp->GetRTCPStatus(0, enabled));
+    TEST_MUSTPASS(enabled != true);
+    MARK();
+    SLEEP(6000); // Make sure we get an RTCP packet
+    ANL();
+
+    TEST(CNAME);
+    TEST_MUSTPASS(!rtp_rtcp->SetRTCP_CNAME(0, NULL));
+    MARK();
+    TEST_MUSTPASS(VE_RTP_RTCP_MODULE_ERROR != base->LastError());
+    MARK();
+    TEST_MUSTPASS(!rtp_rtcp->GetRemoteRTCP_CNAME(0, NULL));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+    ANL();
+
+    TEST(GetRemoteSSRC);
+    unsigned int ssrc(0);
+    TEST_MUSTPASS(rtp_rtcp->GetRemoteSSRC(0, ssrc));
+    MARK();
+    TEST_MUSTPASS(ssrc != 5678);
+    ANL();
+
+    TEST(GetRemoteCSRC); // only trivial tests added
+    unsigned int csrcs[2];
+    int n(0);
+    TEST_MUSTPASS(!rtp_rtcp->GetRemoteCSRCs(1, csrcs));
+    MARK();
+    n = rtp_rtcp->GetRemoteCSRCs(0, csrcs);
+    MARK();
+    TEST_MUSTPASS(n != 0); // should be empty
+    ANL();
+
+    TEST(SetRTPObserver);
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(rtp_rtcp->RegisterRTPObserver(0, rtpObserver));
+    TEST_MUSTPASS(rtp_rtcp->DeRegisterRTPObserver(0));
+    TEST_MUSTPASS(rtp_rtcp->RegisterRTPObserver(0, rtpObserver));
+    TEST_MUSTPASS(rtp_rtcp->SetLocalSSRC(0, 7777)); // force send SSRC to 7777
+    TEST_MUSTPASS(base->StartSend(0));
+    SLEEP(sleepTime);
+    // verify that the new SSRC has been detected by the observer
+    TEST_MUSTPASS(rtpObserver._SSRC != 7777);
+    TEST_MUSTPASS(rtp_rtcp->DeRegisterRTPObserver(0));
+    ANL();
+
+    TEST(GetRTPKeepaliveStatus);
+    unsigned char pt;
+    int dT;
+    TEST_MUSTPASS(!rtp_rtcp->GetRTPKeepaliveStatus(-1, enabled, pt, dT));
+    MARK();
+    TEST_MUSTPASS(rtp_rtcp->GetRTPKeepaliveStatus(0, enabled, pt, dT));
+    MARK(); // should be off by default
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(pt != 255);
+    TEST_MUSTPASS(dT != 0);
+    ANL();
+
+    TEST(SetRTPKeepaliveStatus);
+    // stop send before changing the settings
+    TEST_MUSTPASS(base->StopSend(0));
+    // verify invalid input parameters
+    TEST_MUSTPASS(!rtp_rtcp->SetRTPKeepaliveStatus(-1, true, 0, 15));
+    MARK();
+    TEST_MUSTPASS(!rtp_rtcp->SetRTPKeepaliveStatus(0, true, -1, 15));
+    MARK();
+    TEST_MUSTPASS(!rtp_rtcp->SetRTPKeepaliveStatus(0, true, 0, 61));
+    MARK();
+    TEST_MUSTPASS(rtp_rtcp->GetRTPKeepaliveStatus(0, enabled, pt, dT));
+    MARK(); // should still be off
+    TEST_MUSTPASS(enabled != false);
+    // try valid settings
+    TEST_MUSTPASS(rtp_rtcp->SetRTPKeepaliveStatus(0, true, 117));
+    MARK();
+    TEST_MUSTPASS(rtp_rtcp->GetRTPKeepaliveStatus(0, enabled, pt, dT));
+    MARK(); // should be on now
+    TEST_MUSTPASS(enabled != true);
+    TEST_MUSTPASS(pt != 117);
+    TEST_MUSTPASS(dT != 15);
+    // change from PT 99 to 121, as 99 is occupied
+    TEST_MUSTPASS(rtp_rtcp->SetRTPKeepaliveStatus(0, true, 121, 3));
+    MARK(); // on, PT=99, dT=3
+    TEST_MUSTPASS(rtp_rtcp->GetRTPKeepaliveStatus(0, enabled, pt, dT));
+    MARK();
+    TEST_MUSTPASS(enabled != true);
+    TEST_MUSTPASS(pt != 121);
+    TEST_MUSTPASS(dT != 3);
+    ANL();
+
+    // Make fresh restart (ensures that SSRC is randomized)
+
+    TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0));
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(base->DeleteChannel(0));
+
+    SLEEP(100);
+
+    TEST_MUSTPASS(base->CreateChannel());
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 12345));
+    TEST_MUSTPASS(base->SetSendDestination(0, 12345, "127.0.0.1"));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    TEST_MUSTPASS(base->StartPlayout(0));
+    TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0, micFile, true ,true));
+
+    SLEEP(8000);
+
+    TEST(GetRemoteRTCPData);
+    // Statistics based on received RTCP reports (i.e. statistics on the remote
+    // side sent to us).
+    unsigned int NTPHigh(0), NTPLow(0), timestamp(0), playoutTimestamp(0),
+        jitter(0);
+    unsigned short fractionLost(0);
+    TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCPData(0, NTPHigh, NTPLow,
+                                              timestamp, playoutTimestamp));
+    TEST_LOG( "\n    NTPHigh = %u \n    NTPLow = %u \n    timestamp = %u \n  "
+        "  playoutTimestamp = %u \n    jitter = %u \n    fractionLost = %hu \n",
+        NTPHigh, NTPLow, timestamp, playoutTimestamp, jitter, fractionLost);
+
+    unsigned int NTPHigh2(0), NTPLow2(0), timestamp2(0);
+    unsigned int playoutTimestamp2(0), jitter2(0);
+    unsigned short fractionLost2(0);
+
+    TEST_LOG(
+             "take a new sample and ensure that the playout timestamp is "
+             "maintained");
+    SLEEP(100);
+    TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCPData(0, NTPHigh2, NTPLow2, timestamp2,
+                                              playoutTimestamp2, &jitter2,
+                                              &fractionLost2));
+    TEST_LOG("\n    NTPHigh = %u \n    NTPLow = %u \n    timestamp = %u \n  "
+        "  playoutTimestamp = %u \n    jitter = %u \n    fractionLost = %hu \n",
+        NTPHigh2, NTPLow2, timestamp2, playoutTimestamp2, jitter2,
+        fractionLost2);
+    TEST_MUSTPASS(playoutTimestamp != playoutTimestamp2);
+
+    TEST_LOG("wait for 8 seconds and ensure that the RTCP statistics is"
+        " updated...");
+    SLEEP(8000);
+    TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCPData(0, NTPHigh2, NTPLow2,
+                                              timestamp2, playoutTimestamp2,
+                                              &jitter2, &fractionLost2));
+    TEST_LOG("\n    NTPHigh = %u \n    NTPLow = %u \n    timestamp = %u \n  "
+        "  playoutTimestamp = %u \n    jitter = %u \n    fractionLost = %hu \n",
+        NTPHigh2, NTPLow2, timestamp2, playoutTimestamp2, jitter2,
+        fractionLost2);
+    TEST_MUSTPASS((NTPHigh == NTPHigh2) && (NTPLow == NTPLow2));
+    TEST_MUSTPASS(timestamp == timestamp2);
+    TEST_MUSTPASS(playoutTimestamp == playoutTimestamp2);
+
+#ifdef WEBRTC_CODEC_RED
+    //The following test is related to defect 4985 and 4986
+    TEST_LOG("Turn FEC and VAD on and wait for 4 seconds and ensure that "
+        "the jitter is still small...");
+    VoECodec* codec = _mgr.CodecPtr();
+    TEST_MUSTPASS(NULL == codec);
+    CodecInst cinst;
+#if (!defined(MAC_IPHONE) && !defined(ANDROID))
+    cinst.pltype = 104;
+    strcpy(cinst.plname, "isac");
+    cinst.plfreq = 32000;
+    cinst.pacsize = 960;
+    cinst.channels = 1;
+    cinst.rate = 45000;
+#else
+    cinst.pltype = 119;
+    strcpy(cinst.plname, "isaclc");
+    cinst.plfreq = 16000;
+    cinst.pacsize = 320;
+    cinst.channels = 1;
+    cinst.rate = 40000;
+#endif
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(codec->SetRecPayloadType(0, cinst));
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+    TEST_MUSTPASS(base->StartSend(0));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartPlayout(0));
+    TEST_MUSTPASS(rtp_rtcp->SetFECStatus(0, true, -1));
+    MARK();
+    TEST_MUSTPASS(codec->SetVADStatus(0,true));
+    SLEEP(4000);
+    TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCPData(0, NTPHigh2, NTPLow2, timestamp2,
+                                              playoutTimestamp2, &jitter2,
+                                              &fractionLost2));
+    TEST_LOG("\n    NTPHigh = %u \n    NTPLow = %u \n    timestamp = %u \n "
+        "   playoutTimestamp = %u \n    jitter = %u \n   fractionLost = %hu \n",
+        NTPHigh2, NTPLow2, timestamp2, playoutTimestamp2, jitter2,
+        fractionLost2);
+    TEST_MUSTPASS(jitter2 > 1000)
+    TEST_MUSTPASS(rtp_rtcp->SetFECStatus(0, false));
+    MARK();
+    //4985 and 4986 end
+#endif // #ifdef WEBRTC_CODEC_RED
+    TEST(GetRTPStatistics);
+    ANL();
+    // Statistics summarized on local side based on received RTP packets.
+    CallStatistics stats;
+    // Call GetRTPStatistics over a longer period than 7.5 seconds
+    // (=dT RTCP transmissions).
+    unsigned int averageJitterMs, maxJitterMs, discardedPackets;
+    SLEEP(1000);
+    for (i = 0; i < 8; i++)
+    {
+        TEST_MUSTPASS(rtp_rtcp->GetRTPStatistics(0, averageJitterMs,
+                                                 maxJitterMs,
+                                                 discardedPackets));
+        TEST_LOG( "    %i) averageJitterMs = %u \n    maxJitterMs = %u \n  "
+            "  discardedPackets = %u \n",
+            i, averageJitterMs, maxJitterMs, discardedPackets);
+        SLEEP(1000);
+    }
+
+    TEST(RTCPStatistics #1);
+    ANL();
+    unsigned int packetsSent(0);
+    unsigned int packetsReceived(0);
+    for (i = 0; i < 8; i++)
+    {
+        TEST_MUSTPASS(rtp_rtcp->GetRTCPStatistics(0, stats));
+        TEST_LOG("    %i) fractionLost = %hu \n    cumulativeLost = %u \n  "
+            "  extendedMax = %u \n    jitterSamples = %u \n    rttMs = %d \n",
+            i, stats.fractionLost, stats.cumulativeLost,
+            stats.extendedMax, stats.jitterSamples, stats.rttMs);
+        TEST_LOG( "    bytesSent = %d \n    packetsSent = %d \n   "
+            " bytesReceived = %d \n    packetsReceived = %d \n",
+            stats.bytesSent, stats.packetsSent, stats.bytesReceived,
+            stats.packetsReceived);
+        if (i > 0)
+        {
+            TEST_LOG("    diff sent packets    : %u (~50)\n",
+                     stats.packetsSent - packetsSent);
+            TEST_LOG("    diff received packets: %u (~50)\n",
+                     stats.packetsReceived - packetsReceived);
+        }
+        packetsSent = stats.packetsSent;
+        packetsReceived = stats.packetsReceived;
+        SLEEP(1000);
+    }
+
+TEST(RTCPStatistics #2);
+    ANL();
+    TEST_LOG("restart sending and ensure that the statistics is reset");
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    SLEEP(50); // ensures approx. two received packets
+    TEST_MUSTPASS(rtp_rtcp->GetRTCPStatistics(0, stats));
+    TEST_LOG("\n    fractionLost = %hu \n    cumulativeLost = %u \n  "
+        "  extendedMax = %u \n    jitterSamples = %u \n    rttMs = %d \n",
+        stats.fractionLost, stats.cumulativeLost,
+        stats.extendedMax, stats.jitterSamples, stats.rttMs);
+    TEST_LOG( "    bytesSent = %d \n    packetsSent = %d \n   "
+        " bytesReceived = %d \n    packetsReceived = %d \n",
+        stats.bytesSent, stats.packetsSent, stats.bytesReceived,
+        stats.packetsReceived);
+
+    TEST(RTCPStatistics #3);
+    ANL();
+    TEST_LOG("disable RTCP and verify that statistics is not corrupt");
+    TEST_MUSTPASS(rtp_rtcp->SetRTCPStatus(0, false));
+    SLEEP(250);
+    TEST_MUSTPASS(rtp_rtcp->GetRTCPStatistics(0, stats));
+    TEST_LOG("\n    fractionLost = %hu \n    cumulativeLost = %u \n   "
+        " extendedMax = %u \n    jitterSamples = %u \n    rttMs = %d \n",
+        stats.fractionLost, stats.cumulativeLost,
+        stats.extendedMax, stats.jitterSamples, stats.rttMs);
+    TEST_LOG("    bytesSent = %d \n    packetsSent = %d \n    "
+        "bytesReceived = %d \n    packetsReceived = %d \n",
+        stats.bytesSent, stats.packetsSent,
+        stats.bytesReceived, stats.packetsReceived);
+    TEST_MUSTPASS(rtp_rtcp->SetRTCPStatus(0, true));
+
+    TEST(RTCPStatistics #4);
+    ANL();
+    TEST_LOG("restart receiving and check RX statistics");
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(base->StartReceive(0));
+    SLEEP(50); // ensures approx. two received packets
+    TEST_MUSTPASS(rtp_rtcp->GetRTCPStatistics(0, stats));
+    TEST_LOG("\n    fractionLost = %hu \n    cumulativeLost = %u \n   "
+        " extendedMax = %u \n    jitterSamples = %u \n    rttMs = %d \n",
+        stats.fractionLost, stats.cumulativeLost,
+        stats.extendedMax, stats.jitterSamples,
+        stats.rttMs);
+    TEST_LOG("    bytesSent = %d \n    packetsSent = %d \n   "
+        " bytesReceived = %d \n    packetsReceived = %d \n",
+        stats.bytesSent, stats.packetsSent,
+        stats.bytesReceived, stats.packetsReceived);
+
+    TEST(SendApplicationDefinedRTCPPacket);
+    // just do some fail tests here
+    TEST_MUSTPASS(base->StopSend(0));
+    // should fail since sending is off
+    TEST_MUSTPASS(!rtp_rtcp->SendApplicationDefinedRTCPPacket(
+        0, 0, 0, "abcdabcdabcdabcdabcdabcdabcdabcd", 32));
+    MARK();
+    TEST_MUSTPASS(base->StartSend(0));
+    TEST_MUSTPASS(rtp_rtcp->SendApplicationDefinedRTCPPacket(
+        0, 0, 0, "abcdabcdabcdabcdabcdabcdabcdabcd", 32));
+    MARK();
+    TEST_MUSTPASS(rtp_rtcp->SetRTCPStatus(0, false));
+    // should fail since RTCP is off
+    TEST_MUSTPASS(!rtp_rtcp->SendApplicationDefinedRTCPPacket(
+        0, 0, 0, "abcdabcdabcdabcdabcdabcdabcdabcd", 32));
+    MARK();
+    TEST_MUSTPASS(rtp_rtcp->SetRTCPStatus(0, true));
+    TEST_MUSTPASS(rtp_rtcp->SendApplicationDefinedRTCPPacket(
+        0, 0, 0, "abcdabcdabcdabcdabcdabcdabcdabcd", 32));
+    MARK();
+    // invalid data length
+    TEST_MUSTPASS(!rtp_rtcp->SendApplicationDefinedRTCPPacket(
+        0, 0, 0, "abcdabcdabcdabcdabcdabcdabcdabc", 31));
+    MARK();
+    // invalid data vector
+    TEST_MUSTPASS(!rtp_rtcp->SendApplicationDefinedRTCPPacket(0, 0, 0, NULL, 0));
+    MARK();
+    ANL();
+
+#ifdef WEBRTC_CODEC_RED
+    TEST(SetFECStatus);
+    ANL();
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+    cinst.pltype = 126;
+    strcpy(cinst.plname, "red");
+    cinst.plfreq = 8000;
+    cinst.pacsize = 0;
+    cinst.channels = 1;
+    cinst.rate = 0;
+    TEST_MUSTPASS(codec->SetRecPayloadType(0, cinst));
+#if (!defined(MAC_IPHONE) && !defined(ANDROID))
+    cinst.pltype = 104;
+    strcpy(cinst.plname, "isac");
+    cinst.plfreq = 32000;
+    cinst.pacsize = 960;
+    cinst.channels = 1;
+    cinst.rate = 45000;
+#else
+    cinst.pltype = 119;
+    strcpy(cinst.plname, "isaclc");
+    cinst.plfreq = 16000;
+    cinst.pacsize = 320;
+    cinst.channels = 1;
+    cinst.rate = 40000;
+#endif
+    // We have to re-register the audio codec payload type as stopReceive will
+    // clean the database
+    TEST_MUSTPASS(codec->SetRecPayloadType(0, cinst));
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 8000));
+    TEST_MUSTPASS(base->SetSendDestination(0, 8000, "127.0.0.1"));
+    TEST_MUSTPASS(base->StartPlayout(0));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    TEST_LOG("Start playing a file as microphone again \n");
+    TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0, micFile, true ,true));
+    TEST_MUSTPASS(rtp_rtcp->SetFECStatus(0, true, 126));
+    MARK();
+    TEST_LOG("Should sound OK with FEC enabled\n");
+    SLEEP(4000);
+    TEST_MUSTPASS(rtp_rtcp->SetFECStatus(0, false));
+    MARK();
+#endif // #ifdef WEBRTC_CODEC_RED
+    TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0));
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    TEST_MUSTPASS(base->Terminate());
+
+    ANL();
+    AOK();
+    return 0;
+}
+
+// ----------------------------------------------------------------------------
+//  VoEExtendedTest::TestVideoSync
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestVideoSync()
+{
+    PrepareTest("VideoSync");
+
+    VoEBase* base = _mgr.BasePtr();
+    VoEVideoSync* vsync = _mgr.VideoSyncPtr();
+
+    // check if this interface is supported
+    if (!vsync)
+    {
+        TEST_LOG("VoEVideoSync is not supported!");
+        return -1;
+    }
+
+#ifdef _USE_EXTENDED_TRACE_
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(GetFilename(
+        "VoEVideoSync_trace.txt")));
+    TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStateInfo |
+                                              kTraceStateInfo |
+                                              kTraceWarning |
+                                              kTraceError |
+                                              kTraceCritical |
+                                              kTraceApiCall |
+                                              kTraceMemory |
+                                              kTraceInfo));
+#endif
+
+    TEST_MUSTPASS(base->Init());
+    TEST_MUSTPASS(base->CreateChannel());
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 12345));
+    TEST_MUSTPASS(base->SetSendDestination(0, 12345, "127.0.0.1"));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartPlayout(0));
+    TEST_MUSTPASS(base->StartSend(0));
+
+    ///////////////////////////
+    // Actual test starts here
+
+    TEST(SetInitTimestamp);
+    ANL();
+    TEST_MUSTPASS(!vsync->SetInitTimestamp(0, 12345));
+    TEST_MUSTPASS(base->StopSend(0));
+    MARK();
+    SLEEP(1000);
+    TEST_MUSTPASS(vsync->SetInitTimestamp(0, 12345));
+    TEST_MUSTPASS(base->StartSend(0));
+    MARK();
+    SLEEP(1000);
+    AOK();
+    ANL();
+
+    TEST(SetInitSequenceNumber);
+    ANL();
+    TEST_MUSTPASS(!vsync->SetInitSequenceNumber(0, 123));
+    TEST_MUSTPASS(base->StopSend(0));
+    MARK();
+    SLEEP(1000);
+    TEST_MUSTPASS(vsync->SetInitSequenceNumber(0, 123));
+    TEST_MUSTPASS(base->StartSend(0));
+    MARK();
+    SLEEP(1000);
+    AOK();
+    ANL();
+
+    unsigned int timeStamp;
+    TEST(GetPlayoutTimestamp);
+    ANL();
+    TEST_MUSTPASS(vsync->GetPlayoutTimestamp(0, timeStamp));
+    TEST_LOG("GetPlayoutTimestamp: %u", timeStamp);
+    SLEEP(1000);
+    TEST_MUSTPASS(vsync->GetPlayoutTimestamp(0, timeStamp));
+    TEST_LOG(" %u", timeStamp);
+    SLEEP(1000);
+    TEST_MUSTPASS(vsync->GetPlayoutTimestamp(0, timeStamp));
+    TEST_LOG(" %u\n", timeStamp);
+    AOK();
+    ANL();
+
+    TEST(SetMinimumPlayoutDelay);
+    ANL();
+    TEST_MUSTPASS(!vsync->SetMinimumPlayoutDelay(0, -1));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+    TEST_MUSTPASS(!vsync->SetMinimumPlayoutDelay(0, 5000));
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    MARK();
+
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    TEST_MUSTPASS(base->Terminate());
+
+    AOK();
+    ANL();
+    return 0;
+}
+
+// ----------------------------------------------------------------------------
+//  VoEExtendedTest::TestVolumeControl
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestVolumeControl()
+{
+    PrepareTest("TestVolumeControl");
+
+    VoEBase* base = _mgr.BasePtr();
+    VoEVolumeControl* volume = _mgr.VolumeControlPtr();
+#ifdef _TEST_FILE_
+    VoEFile* file = _mgr.FilePtr();
+#endif
+#ifdef _TEST_HARDWARE_
+    VoEHardware* hardware = _mgr.HardwarePtr();
+#endif
+
+#ifdef _USE_EXTENDED_TRACE_
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(
+        GetFilename("VoEVolumeControl_trace.txt")));
+    TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStateInfo |
+                                              kTraceStateInfo |
+                                              kTraceWarning |
+                                              kTraceError |
+                                              kTraceCritical |
+                                              kTraceApiCall |
+                                              kTraceMemory |
+                                              kTraceInfo));
+#endif
+
+    TEST_MUSTPASS(base->Init());
+    TEST_MUSTPASS(base->CreateChannel());
+#if (defined _TEST_HARDWARE_ && (!defined(MAC_IPHONE) && !defined(ANDROID)))
+#if defined(_WIN32)
+    TEST_MUSTPASS(hardware->SetRecordingDevice(-1));
+    TEST_MUSTPASS(hardware->SetPlayoutDevice(-1));
+#else
+    TEST_MUSTPASS(hardware->SetRecordingDevice(0));
+    TEST_MUSTPASS(hardware->SetPlayoutDevice(0));
+#endif
+#endif
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 12345));
+    TEST_MUSTPASS(base->SetSendDestination(0, 12345, "127.0.0.1"));
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartPlayout(0));
+    TEST_MUSTPASS(base->StartSend(0));
+#ifdef _TEST_FILE_
+    TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0, micFile, true ,true));
+#endif
+
+    ////////////////////////////
+    // Actual test starts here
+
+#if !defined(MAC_IPHONE)
+    TEST(SetSpeakerVolume);
+    ANL();
+    TEST_MUSTPASS(-1 != volume->SetSpeakerVolume(256));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    ANL();
+#endif // #if !defined(MAC_IPHONE)
+
+#if (!defined(MAC_IPHONE) && !defined(ANDROID))
+    TEST(SetMicVolume); ANL();
+    TEST_MUSTPASS(-1 != volume->SetMicVolume(256)); MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    ANL();
+#endif // #if (!defined(MAC_IPHONE) && !defined(ANDROID))
+
+#if !defined(MAC_IPHONE)
+    TEST(SetChannelOutputVolumeScaling);
+    ANL();
+    TEST_MUSTPASS(-1 != volume->SetChannelOutputVolumeScaling(0, (float)-0.1));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    TEST_MUSTPASS(-1 != volume->SetChannelOutputVolumeScaling(0, (float)10.1));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    ANL();
+#endif // #if !defined(MAC_IPHONE)
+#if (!defined(MAC_IPHONE) && !defined(ANDROID))
+    TEST(SetOutputVolumePan);
+    ANL();
+    TEST_MUSTPASS(-1 != volume->SetOutputVolumePan(-1, (float)-0.1,
+                                                   (float)1.0));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    TEST_MUSTPASS(-1 != volume->SetOutputVolumePan(-1, (float)1.1,
+                                                   (float)1.0));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    TEST_MUSTPASS(-1 != volume->SetOutputVolumePan(-1, (float)1.0,
+                                                   (float)-0.1));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    TEST_MUSTPASS(-1 != volume->SetOutputVolumePan(-1, (float)1.0,
+                                                   (float)1.1));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    ANL();
+
+    TEST(SetChannelOutputVolumePan);
+    ANL();
+    TEST_MUSTPASS(-1 != volume->SetOutputVolumePan(0, (float)-0.1,
+                                                   (float)1.0));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    TEST_MUSTPASS(-1 != volume->SetOutputVolumePan(0, (float)1.1,
+                                                   (float)1.0));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    TEST_MUSTPASS(-1 != volume->SetOutputVolumePan(0, (float)1.0,
+                                                   (float)-0.1));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    TEST_MUSTPASS(-1 != volume->SetOutputVolumePan(0, (float)1.0,
+                                                   (float)1.1));
+    MARK();
+    TEST_MUSTPASS(VE_INVALID_ARGUMENT != base->LastError());
+    ANL();
+#endif // #if (!defined(MAC_IPHONE) && !defined(ANDROID))
+#ifdef _TEST_FILE_
+    TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0));
+#endif
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    TEST_MUSTPASS(base->Terminate());
+
+    AOK();
+    ANL();
+    return 0;
+}
+
+// ----------------------------------------------------------------------------
+//  VoEExtendedTest::TestAPM
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestAPM()
+{
+    PrepareTest("AudioProcessing");
+
+    VoEBase* base = _mgr.BasePtr();
+    VoEAudioProcessing* apm = _mgr.APMPtr();
+
+    //#ifdef _USE_EXTENDED_TRACE_
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(GetFilename("apm_trace.txt")));
+    TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStateInfo |
+                                              kTraceStateInfo |
+                                              kTraceWarning |
+                                              kTraceError |
+                                              kTraceCritical |
+                                              kTraceApiCall |
+                                              kTraceMemory |
+                                              kTraceInfo));
+    //#endif
+
+    TEST_MUSTPASS(base->Init());
+    TEST_MUSTPASS(base->CreateChannel());
+
+    ///////////////////////////
+    // Actual test starts here
+
+    int i;
+    bool enabled;
+
+    //////
+    // EC
+
+    const int ECSleep = 0;
+    const int ECIterations = 10;
+
+    EcModes ECmode(kEcAec);
+    AecmModes AECMmode(kAecmSpeakerphone);
+    bool enabledCNG(false);
+
+#if (defined(MAC_IPHONE) || defined(ANDROID))
+    const EcModes ECmodeDefault(kEcAecm);
+#else
+    const EcModes ECmodeDefault(kEcAec);
+#endif
+
+    // verify default settings (should be OFF and mode as above)
+    TEST_MUSTPASS(apm->GetEcStatus(enabled, ECmode));
+    TEST_LOG("EC: enabled=%d, ECmode=%d\n", enabled, ECmode);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(ECmode != ECmodeDefault);
+
+    // set EC defaults
+    TEST_MUSTPASS(apm->SetEcStatus(false, kEcDefault));
+    TEST_MUSTPASS(apm->GetEcStatus(enabled, ECmode));
+    TEST_LOG("EC: enabled=%d, ECmode=%d\n", enabled, ECmode);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(ECmode != ECmodeDefault);
+    SLEEP(ECSleep);
+
+#if (!defined(MAC_IPHONE) && !defined(ANDROID))
+    // set kEcAec mode
+    TEST_MUSTPASS(apm->SetEcStatus(true, kEcAec));
+    TEST_MUSTPASS(apm->GetEcStatus(enabled, ECmode));
+    TEST_LOG("EC: enabled=%d, ECmode=%d\n", enabled, ECmode);
+    TEST_MUSTPASS(enabled != true);
+    TEST_MUSTPASS(ECmode != kEcAec);
+    SLEEP(ECSleep);
+
+    // set kEcConference mode
+    TEST_MUSTPASS(apm->SetEcStatus(true, kEcConference));
+    TEST_MUSTPASS(apm->GetEcStatus(enabled, ECmode));
+    TEST_LOG("EC: enabled=%d, ECmode=%d\n", enabled, ECmode);
+    TEST_MUSTPASS(enabled != true);
+    TEST_MUSTPASS(ECmode != kEcAec);
+    SLEEP(ECSleep);
+#endif // #if (!defined(MAC_IPHONE) && !defined(ANDROID))
+    // get default AECM mode, should the kAecmSpeakerphone as default
+    TEST_MUSTPASS(apm->GetAecmMode(AECMmode, enabledCNG));
+    TEST_MUSTPASS(AECMmode != kAecmSpeakerphone);
+    TEST_MUSTPASS(enabledCNG != true);
+    TEST_MUSTPASS(apm->SetAecmMode(kAecmQuietEarpieceOrHeadset, false));
+    TEST_MUSTPASS(apm->GetAecmMode(AECMmode, enabledCNG));
+    TEST_LOG("AECM: mode=%d, CNG: mode=%d\n", AECMmode,
+             enabledCNG);
+    TEST_MUSTPASS(AECMmode != kAecmQuietEarpieceOrHeadset);
+    TEST_MUSTPASS(enabledCNG != false);
+
+    // set kEcAecm mode
+    TEST_MUSTPASS(apm->SetEcStatus(true, kEcAecm));
+    TEST_MUSTPASS(apm->GetEcStatus(enabled, ECmode));
+    TEST_LOG("EC: enabled=%d, ECmode=%d\n", enabled, ECmode);
+    TEST_MUSTPASS(enabled != true);
+    TEST_MUSTPASS(ECmode != kEcAecm);
+    SLEEP(ECSleep);
+
+    // AECM mode, get and set
+    TEST_MUSTPASS(apm->GetAecmMode(AECMmode, enabledCNG));
+    TEST_MUSTPASS(AECMmode != kAecmQuietEarpieceOrHeadset);
+    TEST_MUSTPASS(enabledCNG != false);
+    TEST_MUSTPASS(apm->SetAecmMode(kAecmEarpiece, true));
+    TEST_MUSTPASS(apm->GetAecmMode(AECMmode, enabledCNG));
+    TEST_LOG("AECM: mode=%d, CNG: mode=%d\n", AECMmode,
+             enabledCNG);
+    TEST_MUSTPASS(AECMmode != kAecmEarpiece);
+    TEST_MUSTPASS(enabledCNG != true);
+    TEST_MUSTPASS(apm->SetAecmMode(kAecmEarpiece, false));
+    TEST_MUSTPASS(apm->GetAecmMode(AECMmode, enabledCNG));
+    TEST_LOG("AECM: mode=%d, CNG: mode=%d\n", AECMmode,
+             enabledCNG);
+    TEST_MUSTPASS(AECMmode != kAecmEarpiece);
+    TEST_MUSTPASS(enabledCNG != false);
+    TEST_MUSTPASS(apm->SetAecmMode(kAecmLoudEarpiece, true));
+    TEST_MUSTPASS(apm->GetAecmMode(AECMmode, enabledCNG));
+    TEST_LOG("AECM: mode=%d, CNG: mode=%d\n", AECMmode,
+             enabledCNG);
+    TEST_MUSTPASS(AECMmode != kAecmLoudEarpiece);
+    TEST_MUSTPASS(enabledCNG != true);
+    TEST_MUSTPASS(apm->SetAecmMode(kAecmSpeakerphone, false));
+    TEST_MUSTPASS(apm->GetAecmMode(AECMmode, enabledCNG));
+    TEST_LOG("AECM: mode=%d, CNG: mode=%d\n", AECMmode,
+             enabledCNG);
+    TEST_MUSTPASS(AECMmode != kAecmSpeakerphone);
+    TEST_MUSTPASS(enabledCNG != false);
+    TEST_MUSTPASS(apm->SetAecmMode(kAecmLoudSpeakerphone, true));
+    TEST_MUSTPASS(apm->GetAecmMode(AECMmode, enabledCNG));
+    TEST_LOG("AECM: mode=%d, CNG: mode=%d\n", AECMmode,
+             enabledCNG);
+    TEST_MUSTPASS(AECMmode != kAecmLoudSpeakerphone);
+    TEST_MUSTPASS(enabledCNG != true);
+
+    // verify that all modes are maintained when EC is disabled
+    TEST_MUSTPASS(apm->SetEcStatus(false));
+    TEST_MUSTPASS(apm->GetEcStatus(enabled, ECmode));
+    TEST_LOG("EC: enabled=%d, ECmode=%d\n", enabled, ECmode);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(ECmode != kEcAecm);
+    SLEEP(ECSleep);
+
+    // restore defaults
+    TEST_MUSTPASS(apm->SetEcStatus(true, kEcDefault));
+    TEST_MUSTPASS(apm->SetEcStatus(false, kEcUnchanged));
+    TEST_MUSTPASS(apm->GetEcStatus(enabled, ECmode));
+    TEST_LOG("EC: enabled=%d, ECmode=%d\n", enabled, ECmode);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(ECmode != ECmodeDefault);
+    SLEEP(ECSleep);
+
+    // enable/disable many times in a row
+    for (i = 0; i < ECIterations; i++)
+    {
+        TEST_MUSTPASS(apm->SetEcStatus(true));
+        TEST_MUSTPASS(apm->SetEcStatus(false));
+    }
+    TEST_MUSTPASS(apm->GetEcStatus(enabled, ECmode));
+    TEST_LOG("EC: enabled=%d, ECmode=%d\n", enabled, ECmode);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(ECmode != ECmodeDefault);
+    SLEEP(ECSleep);
+
+    ///////
+    // AGC
+
+    const int AGCSleep = 0;
+    const int AGCIterations = 10;
+
+    AgcModes AGCmode(kAgcAdaptiveAnalog);
+
+#if (defined(MAC_IPHONE) || defined(ANDROID))
+    bool enabledDefault = false;
+    AgcModes AGCmodeDefault(kAgcAdaptiveDigital);
+#else
+    bool enabledDefault = true;
+    AgcModes AGCmodeDefault(kAgcAdaptiveAnalog);
+#endif
+
+    // verify default settings (should be as above)
+    TEST_MUSTPASS(apm->GetAgcStatus(enabled, AGCmode));
+    TEST_LOG("AGC: enabled=%d, AGCmode=%d\n", enabled, AGCmode);
+    TEST_MUSTPASS(enabled != enabledDefault);
+    TEST_MUSTPASS(AGCmode != AGCmodeDefault);
+
+    // set default AGC mode
+    TEST_MUSTPASS(apm->SetAgcStatus(false, kAgcDefault));
+    TEST_MUSTPASS(apm->GetAgcStatus(enabled, AGCmode));
+    TEST_LOG("AGC: enabled=%d, AGCmode=%d\n", enabled, AGCmode);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(AGCmode != AGCmodeDefault);
+    SLEEP(AGCSleep);
+
+    // set kAgcFixedDigital mode
+    TEST_MUSTPASS(apm->SetAgcStatus(true, kAgcFixedDigital));
+    TEST_MUSTPASS(apm->GetAgcStatus(enabled, AGCmode));
+    TEST_LOG("AGC: enabled=%d, AGCmode=%d\n", enabled, AGCmode);
+    TEST_MUSTPASS(AGCmode != kAgcFixedDigital);
+    SLEEP(AGCSleep);
+
+#if (!defined(MAC_IPHONE) && !defined(ANDROID))
+    // set kAgcAdaptiveAnalog mode
+    TEST_MUSTPASS(apm->SetAgcStatus(true, kAgcAdaptiveAnalog));
+    TEST_MUSTPASS(apm->GetAgcStatus(enabled, AGCmode));
+    TEST_LOG("AGC: enabled=%d, AGCmode=%d\n", enabled, AGCmode);
+    TEST_MUSTPASS(AGCmode != kAgcAdaptiveAnalog);
+    SLEEP(AGCSleep);
+#endif // #if (!defined(MAC_IPHONE) && !defined(ANDROID))
+    // set kAgcAdaptiveDigital mode
+    TEST_MUSTPASS(apm->SetAgcStatus(true, kAgcAdaptiveDigital));
+    TEST_MUSTPASS(apm->GetAgcStatus(enabled, AGCmode));
+    TEST_LOG("AGC: enabled=%d, AGCmode=%d\n", enabled, AGCmode);
+    TEST_MUSTPASS(AGCmode != kAgcAdaptiveDigital);
+    SLEEP(AGCSleep);
+
+    // verify that mode is maintained when AGC is disabled
+    TEST_MUSTPASS(apm->SetAgcStatus(false));
+    TEST_MUSTPASS(apm->GetAgcStatus(enabled, AGCmode));
+    TEST_LOG("AGC: enabled=%d, AGCmode=%d\n", enabled, AGCmode);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(AGCmode != kAgcAdaptiveDigital);
+    SLEEP(AGCSleep);
+
+    // restore default AGC
+    TEST_MUSTPASS(apm->SetAgcStatus(enabledDefault, kAgcDefault));
+    TEST_MUSTPASS(apm->GetAgcStatus(enabled, AGCmode));
+    TEST_LOG("AGC: enabled=%d, AGCmode=%d\n", enabled, AGCmode);
+    TEST_MUSTPASS(enabled != enabledDefault);
+    TEST_MUSTPASS(AGCmode != AGCmodeDefault);
+    SLEEP(AGCSleep);
+
+    // enable/disable many times in a row
+    for (i = 0; i < AGCIterations; i++)
+    {
+        TEST_MUSTPASS(apm->SetAgcStatus(true));
+        TEST_MUSTPASS(apm->SetAgcStatus(false));
+    }
+    TEST_MUSTPASS(apm->GetAgcStatus(enabled, AGCmode));
+    TEST_LOG("AGC: enabled=%d, AGCmode=%d\n", enabled, AGCmode);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(AGCmode != AGCmodeDefault);
+
+    // --- Set/AGCConfig --
+
+    //
+    // targetLeveldBOv         : [0, 31] (default 3)
+    // digitalCompressionGaindB: [0, 90] (default 9)
+    // limiterEnable           : 0: Off, 1: On (default)
+
+    AgcConfig agcConfig;
+    AgcConfig agcConfigDefault;
+
+    const unsigned short targetLeveldBOvDefault = 3;
+    const unsigned short digitalCompressionGaindBDefault = 9;
+    const bool limiterEnableDefault = true;
+
+    const unsigned short targetLeveldBOvMax = 31;
+    const unsigned short digitalCompressionGaindBMax = 90;
+
+    // verify default configuration
+    TEST_MUSTPASS(apm->GetAgcConfig(agcConfigDefault));
+    TEST_LOG("AGC: targetLeveldBOv=%d, digitalCompressionGaindB=%d, "
+        "limiterEnable=%d\n",
+        agcConfigDefault.targetLeveldBOv,
+        agcConfigDefault.digitalCompressionGaindB,
+        agcConfigDefault.limiterEnable);
+    TEST_MUSTPASS(agcConfigDefault.targetLeveldBOv != targetLeveldBOvDefault);
+    TEST_MUSTPASS(agcConfigDefault.digitalCompressionGaindB !=
+        digitalCompressionGaindBDefault);
+    TEST_MUSTPASS(agcConfigDefault.limiterEnable != limiterEnableDefault);
+
+    // verify that invalid (out-of-range) parameters are detected
+    agcConfig = agcConfigDefault;
+    agcConfig.targetLeveldBOv = targetLeveldBOvMax + 1;
+    TEST_MUSTPASS(!apm->SetAgcConfig(agcConfig));
+    int err = base->LastError();
+    TEST_MUSTPASS(err != VE_APM_ERROR);
+    agcConfig = agcConfigDefault;
+    agcConfig.digitalCompressionGaindB
+    = digitalCompressionGaindBMax + 1;
+    TEST_MUSTPASS(!apm->SetAgcConfig(agcConfig));
+
+    AgcConfig agcConfigSet;
+    agcConfigSet.digitalCompressionGaindB = 17;
+    agcConfigSet.targetLeveldBOv = 11;
+    agcConfigSet.limiterEnable = false;
+
+    // try some set/get operations using valid settings
+    TEST_MUSTPASS(apm->SetAgcConfig(agcConfigDefault));
+    TEST_MUSTPASS(apm->GetAgcConfig(agcConfig));
+    TEST_MUSTPASS(agcConfig.targetLeveldBOv != targetLeveldBOvDefault);
+    TEST_MUSTPASS(agcConfig.digitalCompressionGaindB !=
+digitalCompressionGaindBDefault);
+    TEST_MUSTPASS(agcConfig.limiterEnable != limiterEnableDefault);
+
+    TEST_MUSTPASS(apm->SetAgcConfig(agcConfigSet));
+    TEST_MUSTPASS(apm->GetAgcConfig(agcConfig));
+    TEST_MUSTPASS(agcConfig.targetLeveldBOv != agcConfigSet.targetLeveldBOv);
+    TEST_MUSTPASS(agcConfig.digitalCompressionGaindB !=
+        agcConfigSet.digitalCompressionGaindB);
+    TEST_MUSTPASS(agcConfig.limiterEnable != agcConfigSet.limiterEnable);
+
+    // restore default AGC config
+    TEST_MUSTPASS(apm->SetAgcConfig(agcConfigDefault));
+    SLEEP(AGCSleep);
+
+    //////
+    // NS
+
+    const int NSSleep = 0;
+    const int NSIterations = 10;
+
+    NsModes NSmode(kNsHighSuppression);
+    NsModes NSmodeDefault(kNsModerateSuppression);
+
+    // verify default settings (should be OFF and mode as above)
+    TEST_MUSTPASS(apm->GetNsStatus(enabled, NSmode));
+    TEST_LOG("NS: enabled=%d, NSmode=%d\n", enabled, NSmode);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(NSmode != NSmodeDefault);
+
+    // enable default NS settings
+    // must set a value first time!
+    TEST_MUSTPASS(apm->SetNsStatus(false, kNsDefault));
+    TEST_MUSTPASS(apm->GetNsStatus(enabled, NSmode));
+    TEST_LOG("NS: enabled=%d, NSmode=%d\n", enabled, NSmode);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(NSmode != NSmodeDefault);
+    SLEEP(NSSleep);
+
+    // set kNsLowSuppression mode
+    TEST_MUSTPASS(apm->SetNsStatus(true, kNsLowSuppression));
+    TEST_MUSTPASS(apm->GetNsStatus(enabled, NSmode));
+    TEST_LOG("NS: enabled=%d, NSmode=%d\n", enabled, NSmode);
+    TEST_MUSTPASS(NSmode != kNsLowSuppression);
+    SLEEP(NSSleep);
+
+    // set kNsModerateSuppression mode
+    TEST_MUSTPASS(apm->SetNsStatus(true, kNsModerateSuppression));
+    TEST_MUSTPASS(apm->GetNsStatus(enabled, NSmode));
+    TEST_LOG("NS: enabled=%d, NSmode=%d\n", enabled, NSmode);
+    TEST_MUSTPASS(NSmode != kNsModerateSuppression);
+    SLEEP(NSSleep);
+
+    // set kNsHighSuppression mode
+    TEST_MUSTPASS(apm->SetNsStatus(true, kNsHighSuppression));
+    TEST_MUSTPASS(apm->GetNsStatus(enabled, NSmode));
+    TEST_LOG("NS: enabled=%d, NSmode=%d\n", enabled, NSmode);
+    TEST_MUSTPASS(NSmode != kNsHighSuppression);
+    SLEEP(NSSleep);
+
+    // set kNsVeryHighSuppression mode
+    TEST_MUSTPASS(apm->SetNsStatus(true, kNsVeryHighSuppression));
+    TEST_MUSTPASS(apm->GetNsStatus(enabled, NSmode));
+    TEST_LOG("NS: enabled=%d, NSmode=%d\n", enabled, NSmode);
+    TEST_MUSTPASS(NSmode != kNsVeryHighSuppression);
+    SLEEP(NSSleep);
+
+    // set kNsVeryHighSuppression mode
+    TEST_MUSTPASS(apm->SetNsStatus(true, kNsConference));
+    TEST_MUSTPASS(apm->GetNsStatus(enabled, NSmode));
+    TEST_LOG("NS: enabled=%d, NSmode=%d\n", enabled, NSmode);
+    TEST_MUSTPASS(NSmode != kNsHighSuppression);
+    SLEEP(NSSleep);
+
+    // verify that mode is maintained when NS is disabled
+    TEST_MUSTPASS(apm->SetNsStatus(false));
+    TEST_MUSTPASS(apm->GetNsStatus(enabled, NSmode));
+    TEST_LOG("NS: enabled=%d, NSmode=%d\n", enabled, NSmode);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(NSmode != kNsHighSuppression);
+    SLEEP(NSSleep);
+
+    // restore default NS
+    TEST_MUSTPASS(apm->SetNsStatus(true, kNsDefault));
+    TEST_MUSTPASS(apm->SetNsStatus(false));
+    TEST_MUSTPASS(apm->GetNsStatus(enabled, NSmode));
+    TEST_LOG("NS: enabled=%d, NSmode=%d\n", enabled, NSmode);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(NSmode != NSmodeDefault);
+    SLEEP(NSSleep);
+
+    // enable/disable many times in a row
+    for (i = 0; i < NSIterations; i++)
+    {
+        TEST_MUSTPASS(apm->SetNsStatus(true));
+        TEST_MUSTPASS(apm->SetNsStatus(false));
+    }
+    TEST_MUSTPASS(apm->GetNsStatus(enabled, NSmode));
+    TEST_LOG("NS: enabled=%d, NSmode=%d\n", enabled, NSmode);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(NSmode != NSmodeDefault);
+    SLEEP(NSSleep);
+
+    //////////////////////////////////
+    // Speech, Noise and Echo Metrics
+
+#if (!defined(MAC_IPHONE) && !defined(ANDROID))
+    // TODO(xians), enable the tests when APM is ready
+    /*
+    TEST(GetMetricsStatus);
+    ANL();
+    TEST(SetMetricsStatus);
+    ANL();
+    TEST_MUSTPASS(apm->GetMetricsStatus(enabled));
+    MARK();
+    TEST_MUSTPASS(enabled != false);
+    MARK(); // should be OFF by default
+    TEST_MUSTPASS(apm->SetMetricsStatus(true));
+    MARK();
+    TEST_MUSTPASS(apm->GetMetricsStatus(enabled));
+    MARK();
+    TEST_MUSTPASS(enabled != true);
+    MARK();
+    TEST_MUSTPASS(apm->SetMetricsStatus(false));
+    MARK();
+    TEST_MUSTPASS(apm->GetMetricsStatus(enabled));
+    MARK();
+    TEST_MUSTPASS(enabled != false);
+    MARK();
+    AOK();
+    ANL();
+
+    TEST(GetSpeechMetrics);
+    ANL();
+
+    int levelTx, levelRx;
+    TEST_MUSTPASS(-1 != apm->GetSpeechMetrics(levelTx, levelRx));
+    MARK(); // should fail since not activated
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_APM_ERROR);
+    TEST_MUSTPASS(apm->SetMetricsStatus(true));
+    TEST_MUSTPASS(apm->GetSpeechMetrics(levelTx, levelRx));
+    MARK();
+    TEST_LOG("\nSpeech: levelTx=%d, levelRx=%d [dBm0]\n",
+             levelTx, levelTx);
+    TEST_MUSTPASS(apm->SetMetricsStatus(false));
+    AOK();
+    ANL();
+
+    TEST(GetNoiseMetrics);
+    ANL();
+
+    TEST_MUSTPASS(-1 != apm->GetNoiseMetrics(levelTx, levelRx));
+    MARK(); // should fail since not activated
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_APM_ERROR);
+    TEST_MUSTPASS(apm->SetMetricsStatus(true));
+    TEST_MUSTPASS(apm->GetNoiseMetrics(levelTx, levelRx));
+    MARK();
+    TEST_LOG("\nNoise: levelTx=%d, levelRx=%d [dBm0]\n",
+             levelTx, levelTx);
+    TEST_MUSTPASS(apm->SetMetricsStatus(false));
+    AOK(        );
+    ANL();
+
+    TEST(GetEchoMetrics);
+    ANL();
+
+    int ERL, ERLE, RERL, A_NLP;
+    TEST_MUSTPASS(-1 != apm->GetEchoMetrics(ERL, ERLE, RERL, A_NLP));
+    MARK(); // should fail since not activated
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_APM_ERROR);
+    TEST_MUSTPASS(apm->SetMetricsStatus(true));
+    TEST_MUSTPASS(-1 != apm->GetEchoMetrics(ERL, ERLE, RERL, A_NLP));
+    MARK(); // should fail since AEC is off
+    err = base->LastError();
+    TEST_MUSTPASS(err != VE_APM_ERROR);
+    TEST_MUSTPASS(apm->SetEcStatus(true));
+    TEST_MUSTPASS(apm->GetEchoMetrics(ERL, ERLE, RERL, A_NLP));
+    MARK(); // should work now
+    TEST_LOG(
+        "\nEcho: ERL=%d, ERLE=%d, RERL=%d, A_NLP=%d [dB]\n",
+        ERL, ERLE, RERL, A_NLP);
+    TEST_MUSTPASS(apm->SetMetricsStatus(false));
+    TEST_MUSTPASS(apm->SetEcStatus(false));
+    AOK();
+    ANL();
+    */
+#endif // #if (!defined(MAC_IPHONE) && !defined(ANDROID))
+    // far-end AudioProcessing
+    ///////
+    // AGC
+
+    AgcModes rxAGCmode(kAgcAdaptiveDigital);
+    AgcModes rxAGCmodeDefault(kAgcAdaptiveDigital);
+    bool rxEnabledDefault = false;
+
+    // verify default settings (should be as above)
+    TEST_MUSTPASS(apm->GetRxAgcStatus(0, enabled, rxAGCmode));
+    TEST_LOG("rxAGC: enabled=%d, AGCmode=%d\n", enabled,
+             rxAGCmode);
+    TEST_MUSTPASS(enabled != rxEnabledDefault);
+    TEST_MUSTPASS(rxAGCmode != rxAGCmodeDefault);
+
+    // set default AGC mode
+    TEST_MUSTPASS(apm->SetRxAgcStatus(0, false, kAgcDefault));
+    TEST_MUSTPASS(apm->GetRxAgcStatus(0, enabled, rxAGCmode));
+    TEST_LOG("rxAGC: enabled=%d, AGCmode=%d\n", enabled,
+             rxAGCmode);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(rxAGCmode != rxAGCmodeDefault);
+    SLEEP(AGCSleep);
+
+    // set kAgcAdaptiveAnalog mode, should fail
+    TEST_MUSTPASS(!apm->SetRxAgcStatus(0, true, kAgcAdaptiveAnalog));
+
+    // set kAgcFixedDigital mode
+    TEST_MUSTPASS(apm->SetRxAgcStatus(0, true, kAgcFixedDigital));
+    TEST_MUSTPASS(apm->GetRxAgcStatus(0, enabled, rxAGCmode));
+    TEST_LOG("rxAGC: enabled=%d, AGCmode=%d\n", enabled,
+             rxAGCmode);
+    TEST_MUSTPASS(rxAGCmode != kAgcFixedDigital);
+    SLEEP(AGCSleep);
+
+    // set kAgcAdaptiveDigital mode
+    TEST_MUSTPASS(apm->SetRxAgcStatus(0, true, kAgcAdaptiveDigital));
+    TEST_MUSTPASS(apm->GetRxAgcStatus(0, enabled, rxAGCmode));
+    TEST_LOG("rxAGC: enabled=%d, AGCmode=%d\n", enabled,
+             rxAGCmode);
+    TEST_MUSTPASS(rxAGCmode != kAgcAdaptiveDigital);
+    SLEEP(AGCSleep);
+
+    // verify that mode is maintained when AGC is disabled
+    TEST_MUSTPASS(apm->SetRxAgcStatus(0, false));
+    TEST_MUSTPASS(apm->GetRxAgcStatus(0, enabled, rxAGCmode));
+    TEST_LOG("rxAGC: enabled=%d, AGCmode=%d\n", enabled,
+             rxAGCmode);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(rxAGCmode != kAgcAdaptiveDigital);
+    SLEEP(AGCSleep);
+
+    // restore default AGC
+    TEST_MUSTPASS(apm->SetRxAgcStatus(0, enabledDefault, kAgcDefault));
+    TEST_MUSTPASS(apm->GetRxAgcStatus(0, enabled, rxAGCmode));
+    TEST_LOG("rxAGC: enabled=%d, AGCmode=%d\n", enabled,
+             rxAGCmode);
+    TEST_MUSTPASS(enabled != enabledDefault);
+    TEST_MUSTPASS(rxAGCmode != rxAGCmodeDefault);
+    SLEEP(AGCSleep);
+
+    // enable/disable many times in a row
+    for (i = 0; i < AGCIterations; i++)
+    {
+        TEST_MUSTPASS(apm->SetRxAgcStatus(0, true));
+        TEST_MUSTPASS(apm->SetRxAgcStatus(0, false));
+    }
+    TEST_MUSTPASS(apm->GetRxAgcStatus(0, enabled, rxAGCmode));
+    TEST_LOG("rxAGC: enabled=%d, AGCmode=%d\n", enabled,
+             rxAGCmode);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(rxAGCmode != rxAGCmodeDefault);
+
+    // --- Set/GetAgcConfig --
+
+
+    // targetLeveldBOv         : [0, 31] (default 3)
+    // digitalCompressionGaindB: [0, 90] (default 9)
+    // limiterEnable           : 0: Off, 1: On (default)
+
+    AgcConfig rxAGCConfig;
+    AgcConfig rxAGCConfigDefault;
+
+    const unsigned short rxTargetLeveldBOvDefault = 3;
+    const unsigned short rxDigitalCompressionGaindBDefault = 9;
+    const bool rxLimiterEnableDefault = true;
+
+    const unsigned short rxTargetLeveldBOvMax = 31;
+    const unsigned short rxDigitalCompressionGaindBMax = 90;
+
+    // verify default configuration
+    TEST_MUSTPASS(apm->GetRxAgcConfig(0, rxAGCConfigDefault));
+    TEST_LOG(
+        "rxAGC: targetLeveldBOv=%u, digitalCompressionGaindB=%u, "
+        "limiterEnable=%d\n",
+        rxAGCConfigDefault.targetLeveldBOv,
+        rxAGCConfigDefault.digitalCompressionGaindB,
+        rxAGCConfigDefault.limiterEnable);
+    TEST_MUSTPASS(rxAGCConfigDefault.targetLeveldBOv !=
+        rxTargetLeveldBOvDefault);
+    TEST_MUSTPASS(rxAGCConfigDefault.digitalCompressionGaindB !=
+        rxDigitalCompressionGaindBDefault);
+    TEST_MUSTPASS(rxAGCConfigDefault.limiterEnable != rxLimiterEnableDefault);
+
+    // verify that invalid (out-of-range) parameters are detected
+    rxAGCConfig = rxAGCConfigDefault;
+    rxAGCConfig.targetLeveldBOv = rxTargetLeveldBOvMax + 1;
+    TEST_MUSTPASS(!apm->SetRxAgcConfig(0, rxAGCConfig));
+    int rxErr = base->LastError();
+    TEST_MUSTPASS(rxErr != VE_APM_ERROR);
+    rxAGCConfig = rxAGCConfigDefault;
+    rxAGCConfig.digitalCompressionGaindB
+    = rxDigitalCompressionGaindBMax + 1;
+    TEST_MUSTPASS(!apm->SetRxAgcConfig(0, rxAGCConfig));
+
+    AgcConfig rxAGCConfigSet;
+    rxAGCConfigSet.digitalCompressionGaindB = 17;
+    rxAGCConfigSet.targetLeveldBOv = 11;
+    rxAGCConfigSet.limiterEnable = false;
+
+    // try some set/get operations using valid settings
+    TEST_MUSTPASS(apm->SetRxAgcConfig(0, rxAGCConfigDefault));
+    TEST_MUSTPASS(apm->GetRxAgcConfig(0, rxAGCConfig));
+    TEST_MUSTPASS(rxAGCConfig.targetLeveldBOv != rxTargetLeveldBOvDefault);
+    TEST_MUSTPASS(rxAGCConfig.digitalCompressionGaindB !=
+        rxDigitalCompressionGaindBDefault);
+    TEST_MUSTPASS(rxAGCConfig.limiterEnable != rxLimiterEnableDefault);
+
+    TEST_MUSTPASS(apm->SetRxAgcConfig(0, rxAGCConfigSet));
+    TEST_MUSTPASS(apm->GetRxAgcConfig(0, rxAGCConfig));
+    TEST_MUSTPASS(rxAGCConfig.targetLeveldBOv !=
+        rxAGCConfigSet.targetLeveldBOv);
+    TEST_MUSTPASS(rxAGCConfig.digitalCompressionGaindB !=
+        rxAGCConfigSet.digitalCompressionGaindB);
+    TEST_MUSTPASS(rxAGCConfig.limiterEnable != rxAGCConfigSet.limiterEnable);
+
+    // restore default AGC config
+    TEST_MUSTPASS(apm->SetRxAgcConfig(0, rxAGCConfigDefault));
+    SLEEP(AGCSleep);
+
+    //////
+    // NS
+
+    NsModes rxNSmode(kNsHighSuppression);
+    NsModes rxNSmodeDefault(kNsModerateSuppression);
+
+    // verify default settings (should be OFF and mode as above)
+    TEST_MUSTPASS(apm->GetRxNsStatus(0, enabled, rxNSmode));
+    TEST_LOG("rxNS: enabled=%d, NSmode=%d\n", enabled, rxNSmode);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(rxNSmode != rxNSmodeDefault);
+
+    // enable default NS settings
+    // must set a value first time!
+    TEST_MUSTPASS(apm->SetRxNsStatus(0, false, kNsDefault));
+    TEST_MUSTPASS(apm->GetRxNsStatus(0, enabled, rxNSmode));
+    TEST_LOG("rxNS: enabled=%d, NSmode=%d\n", enabled, rxNSmode);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(rxNSmode != rxNSmodeDefault);
+    SLEEP(NSSleep);
+
+    // set kNsLowSuppression mode
+    TEST_MUSTPASS(apm->SetRxNsStatus(0, true, kNsLowSuppression));
+    TEST_MUSTPASS(apm->GetRxNsStatus(0, enabled, rxNSmode));
+    TEST_LOG("rxNS: enabled=%d, NSmode=%d\n", enabled, rxNSmode);
+    TEST_MUSTPASS(rxNSmode != kNsLowSuppression);
+    SLEEP(NSSleep);
+
+    // set kNsModerateSuppression mode
+    TEST_MUSTPASS(apm->SetRxNsStatus(0, true, kNsModerateSuppression));
+    TEST_MUSTPASS(apm->GetRxNsStatus(0, enabled, rxNSmode));
+    TEST_LOG("rxNS: enabled=%d, NSmode=%d\n", enabled, rxNSmode);
+    TEST_MUSTPASS(rxNSmode != kNsModerateSuppression);
+    SLEEP(NSSleep);
+
+    // set kNsHighSuppression mode
+    TEST_MUSTPASS(apm->SetRxNsStatus(0, true, kNsHighSuppression));
+    TEST_MUSTPASS(apm->GetRxNsStatus(0, enabled, rxNSmode));
+    TEST_LOG("rxNS: enabled=%d, NSmode=%d\n", enabled, rxNSmode);
+    TEST_MUSTPASS(rxNSmode != kNsHighSuppression);
+    SLEEP(NSSleep);
+
+    // set kNsVeryHighSuppression mode
+    TEST_MUSTPASS(apm->SetRxNsStatus(0, true, kNsVeryHighSuppression));
+    TEST_MUSTPASS(apm->GetRxNsStatus(0, enabled, rxNSmode));
+    TEST_LOG("rxNS: enabled=%d, NSmode=%d\n", enabled, rxNSmode);
+    TEST_MUSTPASS(rxNSmode != kNsVeryHighSuppression);
+    SLEEP(NSSleep);
+
+    // set kNsVeryHighSuppression mode
+    TEST_MUSTPASS(apm->SetRxNsStatus(0, true, kNsConference));
+    TEST_MUSTPASS(apm->GetRxNsStatus(0, enabled, rxNSmode));
+    TEST_LOG("rxNS: enabled=%d, NSmode=%d\n", enabled, rxNSmode);
+    TEST_MUSTPASS(rxNSmode != kNsHighSuppression);
+    SLEEP(NSSleep);
+
+    // verify that mode is maintained when NS is disabled
+    TEST_MUSTPASS(apm->SetRxNsStatus(0, false));
+    TEST_MUSTPASS(apm->GetRxNsStatus(0, enabled, rxNSmode));
+    TEST_LOG("rxNS: enabled=%d, NSmode=%d\n", enabled, rxNSmode);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(rxNSmode != kNsHighSuppression);
+    SLEEP(NSSleep);
+
+    // restore default NS
+    TEST_MUSTPASS(apm->SetRxNsStatus(0, true, kNsDefault));
+    TEST_MUSTPASS(apm->SetRxNsStatus(0, false));
+    TEST_MUSTPASS(apm->GetRxNsStatus(0, enabled, rxNSmode));
+    TEST_LOG("rxNS: enabled=%d, NSmode=%d\n", enabled, rxNSmode);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(rxNSmode != NSmodeDefault);
+    SLEEP(NSSleep);
+
+    // enable/disable many times in a row
+    for (i = 0; i < NSIterations; i++)
+    {
+        TEST_MUSTPASS(apm->SetRxNsStatus(0, true));
+        TEST_MUSTPASS(apm->SetRxNsStatus(0, false));
+    }
+    TEST_MUSTPASS(apm->GetRxNsStatus(0, enabled, rxNSmode));
+    TEST_LOG("rxNS: enabled=%d, NSmode=%d\n", enabled, rxNSmode);
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(rxNSmode != NSmodeDefault);
+    SLEEP(NSSleep);
+
+    /////////////////////////////
+    // StartDebugRecording
+    ////////////////////////////
+    // StopDebugRecording
+    TEST_LOG("StartDebugRecording");
+    TEST_MUSTPASS(apm->StartDebugRecording(GetFilename("apm_debug.txt")));
+    SLEEP(1000);
+    TEST_LOG("StopDebugRecording");
+    TEST_MUSTPASS(apm->StopDebugRecording());
+
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    TEST_MUSTPASS(base->Terminate());
+
+    return 0;
+}
+
+} //  namespace voetest
diff --git a/voice_engine/main/test/auto_test/voe_extended_test.h b/voice_engine/main/test/auto_test/voe_extended_test.h
new file mode 100644
index 0000000..8557a3c
--- /dev/null
+++ b/voice_engine/main/test/auto_test/voe_extended_test.h
@@ -0,0 +1,140 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_EXTENDED_TEST_H
+#define WEBRTC_VOICE_ENGINE_VOE_EXTENDED_TEST_H
+
+#include "voe_standard_test.h"
+
+namespace voetest {
+
+class VoETestManager;
+
+// ----------------------------------------------------------------------------
+//	Transport
+// ----------------------------------------------------------------------------
+
+class ExtendedTestTransport : public Transport
+{
+public:
+    ExtendedTestTransport(VoENetwork* ptr);
+    ~ExtendedTestTransport();
+    VoENetwork* myNetw;
+protected:
+    virtual int SendPacket(int channel,const void *data,int len);
+    virtual int SendRTCPPacket(int channel, const void *data, int len);
+private:
+    static bool Run(void* ptr);
+    bool Process();
+private:
+    ThreadWrapper* _thread;
+    CriticalSectionWrapper* _lock;
+    EventWrapper* _event;
+private:
+    unsigned char _packetBuffer[1612];
+    int _length;
+    int _channel;
+};
+
+class XTransport : public Transport
+{
+public:
+    XTransport(VoENetwork* netw, VoEFile* file);
+    VoENetwork* _netw;
+    VoEFile* _file;
+public:
+    virtual int SendPacket(int channel, const void *data, int len);
+    virtual int SendRTCPPacket(int channel, const void *data, int len);
+};
+
+class XRTPObserver : public VoERTPObserver
+{
+public:
+    XRTPObserver();
+    ~XRTPObserver();
+    virtual void OnIncomingCSRCChanged(const int channel,
+                                       const unsigned int CSRC,
+                                       const bool added);
+	virtual void OnIncomingSSRCChanged(const int channel,
+	                                   const unsigned int SSRC);
+public:
+    unsigned int _SSRC;
+};
+
+// ----------------------------------------------------------------------------
+//	VoEExtendedTest
+// ----------------------------------------------------------------------------
+
+class VoEExtendedTest : public VoiceEngineObserver,
+                        public VoEConnectionObserver
+{
+public:
+    VoEExtendedTest(VoETestManager& mgr);
+    ~VoEExtendedTest();
+    int PrepareTest(const char* str) const;
+    int TestPassed(const char* str) const;
+    int TestBase();
+    int TestCallReport();
+    int TestCodec();
+    int TestDtmf();
+    int TestEncryption();
+    int TestExternalMedia();
+    int TestFile();
+    int TestHardware();
+    int TestNetEqStats();
+    int TestNetwork();
+    int TestRTP_RTCP();
+    int TestVideoSync();
+    int TestVolumeControl();
+    int TestAPM();
+public:
+    int ErrorCode() const
+    {
+        return _errCode;
+    }
+    ;
+    void ClearErrorCode()
+    {
+        _errCode = 0;
+    }
+    ;
+protected:
+    // from VoiceEngineObserver
+    void CallbackOnError(const int errCode, const int channel);
+    void CallbackOnTrace(const TraceLevel level,
+                         const char* message,
+                         const int length);
+protected:
+    // from VoEConnectionObserver
+    void OnPeriodicDeadOrAlive(const int channel, const bool alive);
+private:
+    void Play(int channel,
+              unsigned int timeMillisec,
+              bool addFileAsMicrophone = false,
+              bool addTimeMarker = false);
+    void Sleep(unsigned int timeMillisec, bool addMarker = false);
+    void StartMedia(int channel,
+                    int rtpPort,
+                    bool listen,
+                    bool playout,
+                    bool send);
+    void StopMedia(int channel);
+private:
+    VoETestManager& _mgr;
+private:
+    int _errCode;
+    bool _alive;
+    bool _listening[32];
+    bool _playing[32];
+    bool _sending[32];
+};
+
+} //  namespace voetest
+#endif // WEBRTC_VOICE_ENGINE_VOE_EXTENDED_TEST_H
diff --git a/voice_engine/main/test/auto_test/voe_standard_test.cc b/voice_engine/main/test/auto_test/voe_standard_test.cc
new file mode 100644
index 0000000..d2c3e09
--- /dev/null
+++ b/voice_engine/main/test/auto_test/voe_standard_test.cc
@@ -0,0 +1,4197 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+#include "engine_configurations.h"
+#if defined(_WIN32)
+#include <conio.h>     // exists only on windows
+#include <tchar.h>
+#endif
+
+#include "voe_standard_test.h"
+
+#if defined (_ENABLE_VISUAL_LEAK_DETECTOR_) && defined(_DEBUG) && defined(_WIN32) && !defined(_INSTRUMENTATION_TESTING_)
+#include "vld.h"
+#endif
+
+#ifdef MAC_IPHONE
+#include "../../source/voice_engine_defines.h"  // defines build macros
+#else
+#include "../../source/voice_engine_defines.h"  // defines build macros
+#endif
+
+#include "thread_wrapper.h"
+#include "critical_section_wrapper.h"
+#include "event_wrapper.h"
+
+#ifdef _TEST_NETEQ_STATS_
+#include "../../interface/voe_neteq_stats.h" // Not available in delivery folder
+#endif
+
+#include "voe_extended_test.h"
+#include "voe_stress_test.h"
+#include "voe_unit_test.h"
+#include "voe_cpu_test.h"
+
+using namespace webrtc;
+
+namespace voetest{
+
+#ifdef MAC_IPHONE
+// Defined in iPhone specific test file
+int GetDocumentsDir(char* buf, int bufLen);
+char* GetFilename(char* filename);
+const char* GetFilename(const char* filename);
+int GetResource(char* resource, char* dest, int destLen);
+char* GetResource(char* resource);
+const char* GetResource(const char* resource);
+// #ifdef MAC_IPHONE
+#elif defined(ANDROID)
+char filenameStr[2][256] = {0};
+int currentStr = 0;
+
+char* GetFilename(char* filename)
+{
+    currentStr = !currentStr;
+    sprintf(filenameStr[currentStr], "/sdcard/%s", filename); 
+    return filenameStr[currentStr];
+}
+const char* GetFilename(const char* filename)
+{
+    currentStr = !currentStr;
+    sprintf(filenameStr[currentStr], "/sdcard/%s", filename); 
+    return filenameStr[currentStr];
+}
+int GetResource(char* resource, char* dest, int destLen)
+{
+    currentStr = !currentStr;
+    sprintf(filenameStr[currentStr], "/sdcard/%s", resource);
+    strncpy(dest, filenameStr[currentStr], destLen-1);
+    return 0;
+}
+char* GetResource(char* resource)
+{
+    currentStr = !currentStr;
+    sprintf(filenameStr[currentStr], "/sdcard/%s", resource); 
+    return filenameStr[currentStr];
+}
+const char* GetResource(const char* resource)
+{ 
+    currentStr = !currentStr;
+    sprintf(filenameStr[currentStr], "/sdcard/%s", resource); 
+    return filenameStr[currentStr];
+}
+#else
+char filenameStr[2][256] = {0};
+int currentStr = 0;
+
+char* GetFilename(char* filename)
+{
+    currentStr = !currentStr;
+    sprintf(filenameStr[currentStr],
+            "/tmp/%s",
+            filename);
+    return filenameStr[currentStr];
+}
+const char* GetFilename(const char* filename)
+{
+    currentStr = !currentStr;
+    sprintf(filenameStr[currentStr],
+            "/tmp/%s",
+            filename);
+    return filenameStr[currentStr];
+}
+int GetResource(char* resource, char* dest, int destLen)
+{
+    currentStr = !currentStr;
+    sprintf(filenameStr[currentStr],
+            "/tmp/%s",
+            resource);
+    strncpy(dest, filenameStr[currentStr], destLen-1);
+    return 0;
+}
+char* GetResource(char* resource)
+{
+    currentStr = !currentStr;
+    sprintf(filenameStr[currentStr],
+            "/tmp/%s",
+            resource);
+    return filenameStr[currentStr];
+}
+const char* GetResource(const char* resource)
+{ 
+    currentStr = !currentStr;
+    sprintf(filenameStr[currentStr],
+            "/tmp/%s",
+            resource);
+    return filenameStr[currentStr];
+}
+#endif
+
+#if defined(MAC_IPHONE)
+char micFile[256] = {0}; // Filename copied to buffer in code
+#elif defined(WEBRTC_MAC) && !defined(WEBRTC_MAC_INTEL)
+const char* micFile = "audio_long16bigendian.pcm";
+#elif defined(ANDROID)
+const char* micFile = "/sdcard/audio_long16.pcm";
+#else
+const char* micFile =
+    "/tmp/audio_long16.pcm";
+#endif
+
+#if !defined(MAC_IPHONE)
+const char* summaryFilename =
+    "/tmp/VoiceEngineSummary.txt";
+#endif
+// For iPhone the summary filename is created in createSummary
+
+int dummy = 0; // Dummy used in different functions to avoid warnings
+
+MyRTPObserver::MyRTPObserver()
+{
+    Reset();
+}
+
+MyRTPObserver::~MyRTPObserver()
+{
+}
+
+void MyRTPObserver::Reset()
+{
+    for (int i = 0; i < 2; i++)
+    {
+        _SSRC[i] = 0;
+        _CSRC[i][0] = 0;
+        _CSRC[i][1] = 0;
+        _added[i][0] = false;
+        _added[i][1] = false;
+        _size[i] = 0;
+    }
+}
+
+void MyRTPObserver::OnIncomingCSRCChanged(const int channel,
+                                          const unsigned int CSRC,
+                                          const bool added)
+{
+    char msg[128];
+    sprintf(msg, "=> OnIncomingCSRCChanged(channel=%d, CSRC=%u, added=%d)\n",
+            channel, CSRC, added);
+    TEST_LOG("%s", msg);
+
+    if (channel > 1)
+        return; // not enough memory
+
+    _CSRC[channel][_size[channel]] = CSRC;
+    _added[channel][_size[channel]] = added;
+
+    _size[channel]++;
+    if (_size[channel] == 2)
+        _size[channel] = 0;
+}
+
+void MyRTPObserver::OnIncomingSSRCChanged(const int channel,
+                                          const unsigned int SSRC)
+{
+    char msg[128];
+    sprintf(msg,
+            "\n=> OnIncomingSSRCChanged(channel=%d, SSRC=%u)\n",
+            channel, SSRC);
+    TEST_LOG("%s", msg);
+
+    _SSRC[channel] = SSRC; 
+}
+
+void MyDeadOrAlive::OnPeriodicDeadOrAlive(const int /*channel*/,
+                                          const bool alive)
+{
+    if (alive)
+    {
+        TEST_LOG("ALIVE\n");
+    }
+    else
+    {
+        TEST_LOG("DEAD\n");
+    }
+    fflush(NULL);
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
+void MyMedia::Process(const int channel,
+                      const ProcessingTypes type,
+                      WebRtc_Word16 audio_10ms[],
+                      const int length, 
+                      const int samplingFreqHz,
+                      const bool stereo)
+{
+    for(int i = 0; i < length; i++)
+    {
+        if (!stereo)
+        {
+            audio_10ms[i] = (WebRtc_Word16)(audio_10ms[i] *
+                sin(2.0 * 3.14 * f * 400.0 / samplingFreqHz));
+        }
+        else
+        {
+            // interleaved stereo 
+            audio_10ms[2 * i] = (WebRtc_Word16)(audio_10ms[2 * i] *
+                sin(2.0 * 3.14 * f * 400.0 / samplingFreqHz));
+            audio_10ms[2 * i + 1] = (WebRtc_Word16)(audio_10ms[2 * i + 1] *
+                sin(2.0 * 3.14 * f * 400.0 / samplingFreqHz));
+        }
+        f++;
+    }
+}
+#endif
+
+MyMedia mobj;
+
+my_transportation::my_transportation(VoENetwork* ptr) :
+    myNetw(ptr),
+    _thread(NULL),
+    _lock(NULL),
+    _event(NULL),
+    _length(0),
+    _channel(0),
+    _delayIsEnabled(0),
+    _delayTimeInMs(0)
+{
+    const char* threadName = "external_thread";
+    _lock = CriticalSectionWrapper::CreateCriticalSection();
+    _event = EventWrapper::Create();
+    _thread = ThreadWrapper::CreateThread(Run,
+                                          this,
+                                          kHighPriority,
+                                          threadName);
+    if (_thread)
+    {
+        unsigned int id;
+        _thread->Start(id);
+    }
+}
+
+my_transportation::~my_transportation()
+{
+    if (_thread)
+    {
+        _thread->SetNotAlive();
+        _event->Set();
+        if (_thread->Stop())
+        {
+            delete _thread;
+            _thread = NULL;
+            delete _event; 
+            _event = NULL;
+            delete _lock; 
+            _lock = NULL;
+        }
+    }
+}
+
+bool my_transportation::Run(void* ptr)
+{
+    return static_cast<my_transportation*>(ptr)->Process();
+}
+
+bool my_transportation::Process()
+{
+    switch(_event->Wait(500))
+    {
+    case kEventSignaled:
+        _lock->Enter();
+        myNetw->ReceivedRTPPacket( _channel, _packetBuffer, _length );
+        _lock->Leave();
+        return true;
+    case kEventTimeout:
+        return true; 
+    case kEventError:
+        break;
+    }
+    return true;
+}
+
+int my_transportation::SendPacket(int channel, const void *data, int len)
+{
+    _lock->Enter();
+    if (len < 1612)
+    {
+        memcpy(_packetBuffer, (const unsigned char*)data, len);
+        _length = len;
+        _channel = channel;
+    }
+    _lock->Leave();
+    _event->Set();  // triggers ReceivedRTPPacket() from worker thread
+    return len;
+}
+
+int my_transportation::SendRTCPPacket(int channel,const void *data,int len)
+{
+    if (_delayIsEnabled) 
+    {
+        Sleep(_delayTimeInMs);
+    }
+    myNetw->ReceivedRTCPPacket(channel, data, len);
+    return len;
+}
+
+void my_transportation::SetDelayStatus(bool enable, unsigned int delayInMs)
+{
+    _delayIsEnabled = enable;
+    _delayTimeInMs = delayInMs;
+}
+
+ErrorObserver::ErrorObserver()
+{
+    code = -1;
+}
+void ErrorObserver::CallbackOnError(const int channel, const int errCode)
+{
+    code=errCode;
+#ifndef _INSTRUMENTATION_TESTING_
+    TEST_LOG("\n************************\n");
+    TEST_LOG(" RUNTIME ERROR: %d \n", errCode);
+    TEST_LOG("************************\n");
+#endif
+}
+
+void MyTraceCallback::Print(const TraceLevel level, const char *traceString,
+                            const int length)
+{
+    if (traceString)
+    {
+        char* tmp = new char[length];
+        memcpy(tmp, traceString, length);
+        TEST_LOG(tmp);
+        TEST_LOG("\n");
+        delete [] tmp;
+    }
+}
+
+void RtcpAppHandler::OnApplicationDataReceived(
+    const int /*channel*/,
+    const unsigned char subType,
+    const unsigned int name,
+    const unsigned char* data,
+    const unsigned short dataLengthInBytes)
+{
+    _lengthBytes = dataLengthInBytes;
+    memcpy(_data, &data[0], dataLengthInBytes);
+    _subType = subType;
+    _name = name;
+}
+
+void RtcpAppHandler::Reset()
+{
+    _lengthBytes = 0;
+    memset(_data, 0, sizeof(_data));
+    _subType = 0;
+    _name = 0;
+}
+
+ErrorObserver obs;
+RtcpAppHandler myRtcpAppHandler;
+MyRTPObserver rtpObserver;
+
+void my_encryption::encrypt(int ,
+                            unsigned char * in_data,
+                            unsigned char * out_data,
+                            int bytes_in,
+                            int * bytes_out){
+    int i;
+    for(i=0;i<bytes_in;i++)
+        out_data[i]=~in_data[i];
+    *bytes_out=bytes_in+2;  // length is increased by 2
+}
+
+void my_encryption::decrypt(int ,
+                            unsigned char * in_data,
+                            unsigned char * out_data,
+                            int bytes_in,
+                            int * bytes_out){
+    int i;
+    for(i=0;i<bytes_in;i++)
+        out_data[i]=~in_data[i];
+    *bytes_out=bytes_in-2;  // length is decreased by 2
+}
+
+void my_encryption::encrypt_rtcp(int ,
+                                 unsigned char * in_data,
+                                 unsigned char * out_data,
+                                 int bytes_in,
+                                 int * bytes_out)
+{
+    int i;
+    for(i=0;i<bytes_in;i++)
+        out_data[i]=~in_data[i];
+    *bytes_out=bytes_in+2;
+}
+
+void my_encryption::decrypt_rtcp(int ,
+                                 unsigned char * in_data,
+                                 unsigned char * out_data,
+                                 int bytes_in,
+                                 int * bytes_out)
+{
+    int i;
+    for(i=0;i<bytes_in;i++)
+        out_data[i]=~in_data[i];
+    *bytes_out=bytes_in+2;
+}
+
+void SubAPIManager::DisplayStatus() const
+{
+    TEST_LOG("Supported sub APIs:\n\n");
+    if (_base) TEST_LOG("  Base\n");
+    if (_callReport) TEST_LOG("  CallReport\n");
+    if (_codec) TEST_LOG("  Codec\n");
+    if (_dtmf) TEST_LOG("  Dtmf\n");
+    if (_encryption) TEST_LOG("  Encryption\n");
+    if (_externalMedia) TEST_LOG("  ExternalMedia\n");
+    if (_file) TEST_LOG("  File\n");
+    if (_hardware) TEST_LOG("  Hardware\n");
+    if (_netEqStats) TEST_LOG("  NetEqStats\n");
+    if (_network) TEST_LOG("  Network\n");
+    if (_rtp_rtcp) TEST_LOG("  RTP_RTCP\n");
+    if (_videoSync) TEST_LOG("  VideoSync\n");
+    if (_volumeControl) TEST_LOG("  VolumeControl\n");
+    if (_apm) TEST_LOG("  AudioProcessing\n");
+    ANL();
+    TEST_LOG("Excluded sub APIs:\n\n");
+    if (!_base) TEST_LOG("  Base\n");
+    if (!_callReport) TEST_LOG("  CallReport\n");
+    if (!_codec) TEST_LOG("  Codec\n");
+    if (!_dtmf) TEST_LOG("  Dtmf\n");
+    if (!_encryption) TEST_LOG("  Encryption\n");
+    if (!_externalMedia) TEST_LOG("  ExternamMedia\n");
+    if (!_file) TEST_LOG("  File\n");
+    if (!_hardware) TEST_LOG("  Hardware\n");
+    if (!_netEqStats) TEST_LOG("  NetEqStats\n");
+    if (!_network) TEST_LOG("  Network\n");
+    if (!_rtp_rtcp) TEST_LOG("  RTP_RTCP\n");
+    if (!_videoSync) TEST_LOG("  VideoSync\n");
+    if (!_volumeControl) TEST_LOG("  VolumeControl\n");
+    if (!_apm) TEST_LOG("  AudioProcessing\n");
+    ANL();
+}
+
+bool SubAPIManager::GetExtendedMenuSelection(ExtendedSelection& sel)
+{
+    printf("------------------------------------------------\n");
+    printf("Select extended test\n\n");
+    printf(" (0)  None\n");
+    printf("- - - - - - - - - - - - - - - - - - - - - - - - \n");
+    printf(" (1)  Base");
+    if (_base) printf("\n"); else printf(" (NA)\n");
+    printf(" (2)  CallReport");
+    if (_callReport) printf("\n"); else printf(" (NA)\n");
+    printf(" (3)  Codec");
+    if (_codec) printf("\n"); else printf(" (NA)\n");
+    printf(" (4)  Dtmf");
+    if (_dtmf) printf("\n"); else printf(" (NA)\n");
+    printf(" (5)  Encryption");
+    if (_encryption) printf("\n"); else printf(" (NA)\n");
+    printf(" (6)  VoEExternalMedia");
+    if (_externalMedia) printf("\n"); else printf(" (NA)\n");
+    printf(" (7)  File");
+    if (_file) printf("\n"); else printf(" (NA)\n");
+    printf(" (8)  Hardware");
+    if (_hardware) printf("\n"); else printf(" (NA)\n");
+    printf(" (9) NetEqStats");
+    if (_netEqStats) printf("\n"); else printf(" (NA)\n");
+    printf(" (10) Network");
+    if (_network) printf("\n"); else printf(" (NA)\n");
+    printf(" (11) RTP_RTCP");
+    if (_rtp_rtcp) printf("\n"); else printf(" (NA)\n");
+    printf(" (12) VideoSync");
+    if (_videoSync) printf("\n"); else printf(" (NA)\n");
+    printf(" (13) VolumeControl");
+    if (_volumeControl) printf("\n"); else printf(" (NA)\n");
+    printf(" (14) AudioProcessing");
+    if (_apm) printf("\n"); else printf(" (NA)\n");
+    printf("\n: ");
+
+    ExtendedSelection xsel(XSEL_Invalid);
+    int selection(0);
+    dummy = scanf("%d", &selection);
+
+    switch (selection)
+    {
+    case 0:
+        xsel = XSEL_None;
+        break;
+    case 1:
+        if (_base) xsel = XSEL_Base;
+        break;
+    case 2:
+        if (_callReport) xsel = XSEL_CallReport;
+        break;
+    case 3:
+        if (_codec) xsel = XSEL_Codec;
+        break;
+    case 4:
+        if (_dtmf) xsel = XSEL_DTMF;
+        break;
+    case 5:
+        if (_encryption) xsel = XSEL_Encryption;
+        break;
+    case 6:
+        if (_externalMedia) xsel = XSEL_ExternalMedia;
+        break;
+    case 7:
+        if (_file) xsel = XSEL_File;
+        break;
+    case 8:
+        if (_hardware) xsel = XSEL_Hardware;
+        break;
+    case 9:
+        if (_netEqStats) xsel = XSEL_NetEqStats;
+        break;
+    case 10:
+        if (_network) xsel = XSEL_Network;
+        break;
+    case 11:
+        if (_rtp_rtcp) xsel = XSEL_RTP_RTCP;
+        break;
+    case 12:
+        if (_videoSync) xsel = XSEL_VideoSync;
+        break;
+    case 13:
+        if (_volumeControl) xsel = XSEL_VolumeControl;
+        break;
+    case 14:
+        if (_apm) xsel = XSEL_AudioProcessing;
+        break;
+    default:
+        xsel = XSEL_Invalid;
+        break;
+    }
+    if (xsel == XSEL_Invalid)
+        printf("Invalid selection!\n");
+
+    sel = xsel;
+    _xsel = xsel;
+
+    return (xsel != XSEL_Invalid);
+}
+
+VoETestManager::VoETestManager() :
+    ve(0),
+    base(0),
+    codec(0),
+    volume(0),
+    dtmf(0),
+    rtp_rtcp(0),
+    apm(0),
+    netw(0),
+    file(0),
+    encrypt(0),
+    hardware(0),
+    xmedia(0),
+    report(0),
+    vsync(0),
+    instanceCount(0)
+{
+    if (VoiceEngine::SetTraceFile(NULL) != -1)
+    {
+        // should not be possible to call a Trace method before the VoE is
+        // created
+        TEST_LOG("\nError at line: %i (VoiceEngine::SetTraceFile()"
+            "should fail)!\n", __LINE__);
+    }
+#ifdef _TEST_NETEQ_STATS_
+    neteqst = 0;
+#endif
+    ve = VoiceEngine::Create();
+    instanceCount++;
+};
+
+VoETestManager::~VoETestManager()
+{
+}
+
+void VoETestManager::GetInterfaces()
+{
+    if (ve)
+    {
+        base = VoEBase::GetInterface(ve);
+        codec = VoECodec::GetInterface(ve);
+        volume = VoEVolumeControl::GetInterface(ve);
+        dtmf = VoEDtmf::GetInterface(ve);
+        rtp_rtcp = VoERTP_RTCP::GetInterface(ve);
+        apm = VoEAudioProcessing::GetInterface(ve);
+        netw = VoENetwork::GetInterface(ve);
+        file = VoEFile::GetInterface(ve);
+#ifdef _TEST_VIDEO_SYNC_
+        vsync = VoEVideoSync::GetInterface(ve);
+#endif
+        encrypt = VoEEncryption::GetInterface(ve);
+        hardware = VoEHardware::GetInterface(ve);
+        // Set the audio layer to use in all tests
+        if (hardware)
+        {
+            int res = hardware->SetAudioDeviceLayer(TESTED_AUDIO_LAYER);
+            if (res < 0)
+            {
+                printf("\nERROR: failed to set audio layer to use in "
+                    "testing\n");
+            }
+            else
+            {
+                printf("\nAudio layer %d will be used in testing\n",
+                       TESTED_AUDIO_LAYER);
+            }
+        }
+#ifdef _TEST_XMEDIA_
+        xmedia = VoEExternalMedia::GetInterface(ve);
+#endif
+#ifdef _TEST_CALL_REPORT_
+        report = VoECallReport::GetInterface(ve);
+#endif
+#ifdef _TEST_NETEQ_STATS_
+        neteqst = VoENetEqStats::GetInterface(ve);
+#endif
+    }
+}
+
+int VoETestManager::ReleaseInterfaces()
+{
+    int err(0), remInt(1), j(0);
+    bool releaseOK(true);
+
+    if (base) 
+    {
+        for (remInt=1,j=0; remInt>0; j++)
+            TEST_MUSTPASS(-1 == (remInt = base->Release()));
+        if (j>1)
+        {
+            TEST_LOG("\n\n*** Error: released %d base interfaces"
+                "(should only be 1) \n", j);
+            releaseOK = false;
+        }
+        // try to release one addition time (should fail)
+        TEST_MUSTPASS(-1 != base->Release());
+        err = base->LastError();
+        // it is considered safe to delete even if Release has been called
+        // too many times
+        TEST_MUSTPASS(err != VE_INTERFACE_NOT_FOUND);
+    }
+    if (codec)
+    {
+        for (remInt=1,j=0; remInt>0; j++)
+            TEST_MUSTPASS(-1 == (remInt = codec->Release()));
+        if (j>1)
+        {
+            TEST_LOG("\n\n*** Error: released %d codec interfaces"
+                " (should only be 1) \n", j);
+            releaseOK = false;
+        }
+        TEST_MUSTPASS(-1 != codec->Release());
+        err = base->LastError();
+        TEST_MUSTPASS(err != VE_INTERFACE_NOT_FOUND);
+    }
+    if (volume)
+    {
+        for (remInt=1,j=0; remInt>0; j++)
+            TEST_MUSTPASS(-1 == (remInt = volume->Release()));
+        if (j>1)
+        {
+            TEST_LOG("\n\n*** Error: released %d volume interfaces"
+                "(should only be 1) \n", j);
+            releaseOK = false;
+        }
+        TEST_MUSTPASS(-1 != volume->Release());
+        err = base->LastError();
+        TEST_MUSTPASS(err != VE_INTERFACE_NOT_FOUND);
+    }
+    if (dtmf)
+    {
+        for (remInt=1,j=0; remInt>0; j++)
+            TEST_MUSTPASS(-1 == (remInt = dtmf->Release()));
+        if (j>1)
+        {
+            TEST_LOG("\n\n*** Error: released %d dtmf interfaces"
+                "(should only be 1) \n", j);
+            releaseOK = false;
+        }
+        TEST_MUSTPASS(-1 != dtmf->Release());
+        err = base->LastError();
+        TEST_MUSTPASS(err != VE_INTERFACE_NOT_FOUND);
+    }
+    if (rtp_rtcp)
+    {
+        for (remInt=1,j=0; remInt>0; j++)
+            TEST_MUSTPASS(-1 == (remInt = rtp_rtcp->Release()));
+        if (j>1)
+        {
+            TEST_LOG("\n\n*** Error: released %d rtp/rtcp interfaces"
+                "(should only be 1) \n", j);
+            releaseOK = false;
+        }
+        TEST_MUSTPASS(-1 != rtp_rtcp->Release());
+        err = base->LastError();
+        TEST_MUSTPASS(err != VE_INTERFACE_NOT_FOUND);
+    }
+    if (apm)
+    {
+        for (remInt=1,j=0; remInt>0; j++)
+            TEST_MUSTPASS(-1 == (remInt = apm->Release()));
+        if (j>1)
+        {
+            TEST_LOG("\n\n*** Error: released %d apm interfaces"
+                "(should only be 1) \n", j);
+            releaseOK = false;
+        }
+        TEST_MUSTPASS(-1 != apm->Release());
+        err = base->LastError();
+        TEST_MUSTPASS(err != VE_INTERFACE_NOT_FOUND);
+    }
+    if (netw)
+    {
+        for (remInt=1,j=0; remInt>0; j++)
+            TEST_MUSTPASS(-1 == (remInt = netw->Release()));
+        if (j>1)
+        {
+            TEST_LOG("\n\n*** Error: released %d network interfaces"
+                "(should only be 1) \n", j);
+            releaseOK = false;
+        }
+        TEST_MUSTPASS(-1 != netw->Release());
+        err = base->LastError();
+        TEST_MUSTPASS(err != VE_INTERFACE_NOT_FOUND);
+    }
+    if (file)
+    {
+        for (remInt=1,j=0; remInt>0; j++)
+            TEST_MUSTPASS(-1 == (remInt = file->Release()));
+        if (j>1)
+        {
+            TEST_LOG("\n\n*** Error: released %d file interfaces"
+                "(should only be 1) \n", j);
+            releaseOK = false;
+        }
+        TEST_MUSTPASS(-1 != file->Release());
+        err = base->LastError();
+        TEST_MUSTPASS(err != VE_INTERFACE_NOT_FOUND);
+    }
+#ifdef _TEST_VIDEO_SYNC_
+    if (vsync)
+    {
+        for (remInt=1,j=0; remInt>0; j++)
+            TEST_MUSTPASS(-1 == (remInt = vsync->Release()));
+        if (j>1)
+        {
+            TEST_LOG("\n\n*** Error: released %d video sync interfaces"
+                "(should only be 1) \n", j);
+            releaseOK = false;
+        }
+        TEST_MUSTPASS(-1 != vsync->Release());
+        err = base->LastError();
+        TEST_MUSTPASS(err != VE_INTERFACE_NOT_FOUND);
+    }
+#endif
+    if (encrypt)
+    {
+        for (remInt=1,j=0; remInt>0; j++)
+            TEST_MUSTPASS(-1 == (remInt = encrypt->Release()));
+        if (j>1)
+        {
+            TEST_LOG("\n\n*** Error: released %d encryption interfaces"
+                "(should only be 1) \n", j);
+            releaseOK = false;
+        }
+        TEST_MUSTPASS(-1 != encrypt->Release());
+        err = base->LastError();
+        TEST_MUSTPASS(err != VE_INTERFACE_NOT_FOUND);
+    }
+    if (hardware)
+    {
+        for (remInt=1,j=0; remInt>0; j++)
+            TEST_MUSTPASS(-1 == (remInt = hardware->Release()));
+        if (j>1)
+        {
+            TEST_LOG("\n\n*** Error: released %d hardware interfaces"
+                "(should only be 1) \n", j);
+            releaseOK = false;
+        }
+        TEST_MUSTPASS(-1 != hardware->Release());
+        err = base->LastError();
+        TEST_MUSTPASS(err != VE_INTERFACE_NOT_FOUND);
+    }
+#ifdef _TEST_XMEDIA_
+    if (xmedia)
+    {
+        for (remInt=1,j=0; remInt>0; j++)
+            TEST_MUSTPASS(-1 == (remInt = xmedia->Release()));
+        if (j>1)
+        {
+            TEST_LOG("\n\n*** Error: released %d external media interfaces"
+                "(should only be 1) \n", j);
+            releaseOK = false;
+        }
+        TEST_MUSTPASS(-1 != xmedia->Release());
+        err = base->LastError();
+        TEST_MUSTPASS(err != VE_INTERFACE_NOT_FOUND);
+    }
+#endif
+#ifdef _TEST_CALL_REPORT_
+    if (report)
+    {
+        for (remInt=1,j=0; remInt>0; j++)
+            TEST_MUSTPASS(-1 == (remInt = report->Release()));
+        if (j>1)
+        {
+            TEST_LOG("\n\n*** Error: released %d call report interfaces"
+                "(should only be 1) \n", j);
+            releaseOK = false;
+        }
+        TEST_MUSTPASS(-1 != report->Release());
+        err = base->LastError();
+        TEST_MUSTPASS(err != VE_INTERFACE_NOT_FOUND);
+    }
+#endif
+#ifdef _TEST_NETEQ_STATS_
+    if (neteqst)
+    {
+        for (remInt=1,j=0; remInt>0; j++)
+            TEST_MUSTPASS(-1 == (remInt = neteqst->Release()));
+        if (j>1)
+        {
+            TEST_LOG("\n\n*** Error: released %d neteq stat interfaces "
+                "(should only be 1) \n", j);
+            releaseOK = false;
+        }
+        TEST_MUSTPASS(-1 != neteqst->Release());
+        err = base->LastError();
+        TEST_MUSTPASS(err != VE_INTERFACE_NOT_FOUND);
+    }
+#endif
+    if (false == VoiceEngine::Delete(ve))
+    {
+        TEST_LOG("\n\nVoiceEngine::Delete() failed. \n");
+        releaseOK = false;
+    }
+
+    if (VoiceEngine::SetTraceFile(NULL) != -1)
+    {
+        TEST_LOG("\nError at line: %i (VoiceEngine::SetTraceFile()"
+            "should fail)!\n", __LINE__);
+    }
+    
+    return (releaseOK == true) ? 0 : -1;
+}
+
+int VoETestManager::DoStandardTest()
+{
+#if (defined(_TEST_CODEC_) || defined(_TEST_FILE_))
+    CodecInst cinst;
+    memset(&cinst, 0, sizeof(cinst));
+#endif
+    char tmpStr[1024];
+    bool enabled(false);
+
+    TEST_LOG("\n\n+++ Base tests +++\n\n");
+
+    // Test trace callbacks
+    TEST_LOG("Enabling the trace callback => default trace messages "
+        "shall be printed... \n\n");
+    MyTraceCallback* callback = new MyTraceCallback();
+    VoiceEngine::SetTraceCallback(callback);
+    
+    // Test the remaining trace APIs
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(GetFilename("webrtc_voe_trace.txt"),
+                                            true));
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(NULL));
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(GetFilename(
+        "webrtc_voe_trace.txt")));
+
+    VoiceEngine* extra = VoiceEngine::Create();
+    instanceCount++;
+    TEST_LOG("\nVerify that the VoE ID is now changed from 1 to 2\n\n");
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(NULL));
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(GetFilename(
+        "webrtc_voe_trace.txt")));
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(NULL));
+    VoiceEngine::Delete(extra);
+    SLEEP(10);
+    TEST_LOG("\nVerify that the VoE ID is now changed back to 1\n");
+    TEST_LOG("NOTE: Currently it will still be 2, this is OK\n\n");
+
+    // The API below shall be the first line in the stored trace file
+    // (verify after test)!
+    TEST_MUSTPASS(VoiceEngine::SetTraceFile(GetFilename(
+        "webrtc_voe_trace.txt")));
+    VoiceEngine::SetTraceCallback(NULL);
+    delete callback;
+    TEST_LOG("\n...the trace callback is now disabled.\n\n");
+
+    /////////////////////////////////////////////////
+    // Hardware (test before VoE is intialized)
+#ifdef _TEST_HARDWARE_
+    // Set/GetAudioDeviceLayer
+    TEST_LOG("Set/Get audio device layer\n");
+    AudioLayers wantedLayer = TESTED_AUDIO_LAYER;
+    AudioLayers givenLayer;
+    TEST_MUSTPASS(hardware->SetAudioDeviceLayer(wantedLayer));
+    TEST_MUSTPASS(hardware->GetAudioDeviceLayer(givenLayer));
+    TEST_MUSTPASS(wantedLayer != givenLayer); // Should be same before init
+#endif //_TEST_HARDWARE_
+
+    TEST_LOG("Init \n");
+#if defined BLACKFIN
+    TEST_MUSTPASS(base->Init(0,LINUX_AUDIO_OSS));
+#else
+   TEST_MUSTPASS( base->Init());
+#endif
+
+#if defined(ANDROID)
+    TEST_LOG("Setting loudspeaker status to false \n");
+    TEST_MUSTPASS(hardware->SetLoudspeakerStatus(false));
+#endif
+
+#ifndef __INSURE__
+    TEST_LOG("Enabling the observer \n");
+    TEST_MUSTPASS(base->RegisterVoiceEngineObserver(obs));
+#endif
+
+    TEST_LOG("Get version \n");
+    TEST_MUSTPASS(base->GetVersion(tmpStr));
+    TEST_LOG("--------------------\n%s\n--------------------\n", tmpStr);
+
+    TEST_LOG("Create channel \n");
+    int nChannels = base->MaxNumOfChannels();
+    TEST_MUSTPASS(!(nChannels > 0));
+    TEST_LOG("Max number of channels = %d \n", nChannels);
+    TEST_MUSTPASS(base->CreateChannel());
+
+    /////////////////////////////////////////////////
+    // RTP/RTCP (test before streaming is activated)
+#ifdef _TEST_RTP_RTCP_
+    TEST_LOG("\n\n+++ RTP/RTCP tests +++\n\n");
+
+    TEST_LOG("Set/Get RTCP and CName \n");
+    bool on;
+    TEST_MUSTPASS(rtp_rtcp->GetRTCPStatus(0, on));   // should be on by default
+    TEST_MUSTPASS(on != true);
+    TEST_MUSTPASS(rtp_rtcp->SetRTCPStatus(0, false));
+    TEST_MUSTPASS(rtp_rtcp->GetRTCPStatus(0, on));
+    TEST_MUSTPASS(on != false);
+    TEST_MUSTPASS(rtp_rtcp->SetRTCPStatus(0, true));
+    TEST_MUSTPASS(rtp_rtcp->GetRTCPStatus(0, on));
+    TEST_MUSTPASS(on != true);
+    TEST_MUSTPASS(rtp_rtcp->SetRTCP_CNAME(0, "Niklas"));
+
+    TEST_LOG("Set/Get RTP Keepalive\n");
+    unsigned char pt; int dT;
+    TEST_MUSTPASS(!rtp_rtcp->GetRTPKeepaliveStatus(-1, on, pt, dT));
+    // should be off by default
+    TEST_MUSTPASS(rtp_rtcp->GetRTPKeepaliveStatus(0, on, pt, dT));
+    TEST_MUSTPASS(on != false); 
+    TEST_MUSTPASS(pt != 255);
+    TEST_MUSTPASS(dT != 0);
+
+    // verify invalid input parameters
+    TEST_MUSTPASS(!rtp_rtcp->SetRTPKeepaliveStatus(-1, true, 0, 15));
+    TEST_MUSTPASS(!rtp_rtcp->SetRTPKeepaliveStatus(0, true, -1, 15));
+    TEST_MUSTPASS(!rtp_rtcp->SetRTPKeepaliveStatus(0, true, 0, 61));
+    // should still be off
+    TEST_MUSTPASS(rtp_rtcp->GetRTPKeepaliveStatus(0, on, pt, dT));
+    TEST_MUSTPASS(!rtp_rtcp->SetRTPKeepaliveStatus(0, true, 0));
+    // should fail since default 0 is used bu PCMU
+    TEST_MUSTPASS(on != false);
+    // try valid settings
+    TEST_MUSTPASS(rtp_rtcp->SetRTPKeepaliveStatus(0, true, 1));
+    TEST_MUSTPASS(rtp_rtcp->SetRTPKeepaliveStatus(0, true, 1));
+    // should be on now
+    TEST_MUSTPASS(rtp_rtcp->GetRTPKeepaliveStatus(0, on, pt, dT));
+    TEST_MUSTPASS(on != true); TEST_MUSTPASS(pt != 1); TEST_MUSTPASS(dT != 15);
+    // Set the Keep alive payload to 60, and this payloadtype could not used
+    // by the codecs
+    TEST_MUSTPASS(rtp_rtcp->SetRTPKeepaliveStatus(0, true, 60, 3));
+    TEST_MUSTPASS(rtp_rtcp->GetRTPKeepaliveStatus(0, on, pt, dT));
+    TEST_MUSTPASS(on != true); TEST_MUSTPASS(pt != 60); TEST_MUSTPASS(dT != 3);
+    TEST_MUSTPASS(rtp_rtcp->SetRTPKeepaliveStatus(0, false, 60));
+
+    TEST_LOG("Set and get SSRC \n");
+    TEST_MUSTPASS(rtp_rtcp->SetLocalSSRC(0, 1234));
+    unsigned int sendSSRC = 0;
+    TEST_MUSTPASS(rtp_rtcp->GetLocalSSRC(0, sendSSRC));
+    TEST_MUSTPASS(1234 != sendSSRC);
+#else
+    TEST_LOG("\n\n+++ RTP/RTCP tests NOT ENABLED +++\n");
+#endif
+
+    /////////////////////////////////////////////////
+    // Hardware (test before streaming is activated)
+    // the test will select the device using 44100, which fails the call
+#ifdef _TEST_HARDWARE_
+    TEST_LOG("\n\n+++ Hardware tests +++\n\n");
+
+    // Set/GetAudioDeviceLayer
+    TEST_LOG("Set/Get audio device layer\n");
+    TEST_MUSTPASS(-1 != hardware->SetAudioDeviceLayer(wantedLayer));
+    TEST_MUSTPASS(VE_ALREADY_INITED != base->LastError());
+    TEST_MUSTPASS(hardware->GetAudioDeviceLayer(givenLayer));
+    switch (givenLayer)
+    {
+    case kAudioPlatformDefault:
+        // already set above
+        break;
+    case kAudioWindowsCore:
+        TEST_LOG("Running kAudioWindowsCore\n");
+        break;
+    case kAudioWindowsWave:
+        TEST_LOG("Running kAudioWindowsWave\n");
+        break;
+    case kAudioLinuxAlsa:
+        TEST_LOG("Running kAudioLinuxAlsa\n");
+        break;
+    case kAudioLinuxPulse:
+        TEST_LOG("Running kAudioLinuxPulse\n");
+        break;
+    default:
+        TEST_LOG("ERROR: Running unknown audio layer!!\n");
+        return -1;
+    }
+
+    int loadPercent;
+#if defined(_WIN32)
+    TEST_LOG("CPU load \n");
+    TEST_MUSTPASS(hardware->GetCPULoad(loadPercent));
+    TEST_LOG("GetCPULoad => %d%%\n", loadPercent);
+#else
+    TEST_MUSTPASS(!hardware->GetCPULoad(loadPercent));
+#endif
+#if !defined(MAC_IPHONE) & !defined(ANDROID)
+    TEST_MUSTPASS(hardware->GetSystemCPULoad(loadPercent));
+    TEST_LOG("GetSystemCPULoad => %d%%\n", loadPercent);
+#endif
+    
+#if !defined(MAC_IPHONE) && !defined(ANDROID)
+    bool playAvail = false, recAvail = false;
+    TEST_LOG("Get device status \n");
+    TEST_MUSTPASS(hardware->GetPlayoutDeviceStatus(playAvail));
+    TEST_MUSTPASS(hardware->GetRecordingDeviceStatus(recAvail));
+    TEST_MUSTPASS(!(recAvail && playAvail));
+#endif
+    
+    // Win, Mac and Linux sound device tests
+#if (defined(WEBRTC_MAC) && !defined(MAC_IPHONE)) || defined(_WIN32) || (defined(WEBRTC_LINUX) && !defined(ANDROID))
+    int idx, nRec = 0, nPlay = 0;
+    char devName[128] = {0};
+    char guidName[128] = {0};
+
+    TEST_LOG("Printing names of default sound devices \n");
+#if defined(_WIN32)
+    TEST_MUSTPASS(hardware->GetRecordingDeviceName(-1, devName, guidName));
+    TEST_LOG("Recording device= %s, guid=%s\n",devName,guidName);
+    TEST_MUSTPASS(hardware->GetPlayoutDeviceName(-1, devName, guidName));
+    TEST_LOG("Playout device= %s, guid=%s\n",devName,guidName);
+#else
+    TEST_MUSTPASS(hardware->GetRecordingDeviceName(0, devName, guidName));
+    TEST_LOG("Recording device= %s\n",devName);
+    TEST_MUSTPASS(hardware->GetPlayoutDeviceName(0, devName, guidName));
+    TEST_LOG("Playout device= %s\n",devName);
+#endif
+
+    // Recording side
+    TEST_MUSTPASS(hardware->GetNumOfRecordingDevices(nRec));
+    TEST_LOG("GetNumOfRecordingDevices = %d\n", nRec);
+    for (idx = 0; idx < nRec; idx++)
+    {
+        // extended Win32 enumeration tests => unique GUID outputs on Vista
+        // and up
+        // Win XP and below : devName is copied to guidName
+        // Win Vista and up : devName is the friendly name and GUID is a uniqe
+        // indentifier
+        // Other            : guidName is left unchanged
+        TEST_MUSTPASS(hardware->GetRecordingDeviceName(idx, devName, guidName));
+#if defined(_WIN32)
+        TEST_LOG("GetRecordingDeviceName(%d) => name=%s, guid=%s\n",
+                 idx, devName, guidName);
+#else
+        TEST_LOG("GetRecordingDeviceName(%d) => name=%s\n", idx, devName);
+#endif
+        TEST_MUSTPASS(hardware->SetRecordingDevice(idx));
+    }
+
+    // Playout side
+    TEST_MUSTPASS(hardware->GetNumOfPlayoutDevices(nPlay));
+    TEST_LOG("GetNumDevsPlayout = %d\n", nPlay);
+    for (idx = 0; idx < nPlay; idx++)
+    {
+        // extended Win32 enumeration tests => unique GUID outputs on Vista
+        // and up
+        // Win XP and below : devName is copied to guidName
+        // Win Vista and up : devName is the friendly name and GUID is a
+        // uniqe indentifier
+        // Other            : guidName is left unchanged
+        TEST_MUSTPASS(hardware->GetPlayoutDeviceName(idx, devName, guidName));
+#if defined(_WIN32)
+        TEST_LOG("GetPlayoutDeviceName(%d) => name=%s, guid=%s\n",
+                 idx, devName, guidName);
+#else
+        TEST_LOG("GetPlayoutDeviceName(%d) => name=%s\n", idx, devName);
+#endif
+        TEST_MUSTPASS(hardware->SetPlayoutDevice(idx));
+    }
+
+#endif // #if (defined(WEBRTC_MAC) && !defined(MAC_IPHONE)) || (defined(_WI...&
+    TEST_LOG("Setting default sound devices \n");
+#ifdef _WIN32
+    TEST_MUSTPASS(hardware->SetRecordingDevice(-1));
+    TEST_MUSTPASS(hardware->SetPlayoutDevice(-1));
+#else
+#if !defined(MAC_IPHONE) && !defined(ANDROID)
+    TEST_MUSTPASS(hardware->SetRecordingDevice(0));
+    TEST_MUSTPASS(hardware->SetPlayoutDevice(0));
+#endif
+#endif
+    
+#ifdef MAC_IPHONE
+    // Reset sound device
+    TEST_LOG("Reset sound device \n");
+    TEST_MUSTPASS(hardware->ResetAudioDevice());
+#endif
+
+#else
+    TEST_LOG("\n\n+++ Hardware tests NOT ENABLED +++\n");
+#endif  // #ifdef _TEST_HARDWARE_
+
+    // This testing must be done before we start playing
+#ifdef _TEST_CODEC_
+    // Test that set and get payload type work
+#if defined(WEBRTC_CODEC_ISAC)
+    TEST_LOG("Getting payload type for iSAC\n");
+    strcpy(cinst.plname,"niklas");
+    cinst.channels=1;
+    cinst.plfreq=16000;
+    cinst.pacsize=480;
+    // should fail since niklas is not a valid codec name
+    TEST_MUSTPASS(!codec->GetRecPayloadType(0,cinst));
+    strcpy(cinst.plname,"iSAC");                                
+    TEST_MUSTPASS(codec->GetRecPayloadType(0,cinst));  // both iSAC
+    strcpy(cinst.plname,"ISAC");                        // and ISAC should work
+    TEST_MUSTPASS(codec->GetRecPayloadType(0,cinst));
+    int orgPT=cinst.pltype;                      // default payload type is 103
+    TEST_LOG("Setting payload type for iSAC to 127\n");
+    cinst.pltype=123;
+    TEST_MUSTPASS(codec->SetRecPayloadType(0,cinst));
+    TEST_MUSTPASS(codec->GetRecPayloadType(0,cinst));
+    TEST_MUSTPASS(!(cinst.pltype==123));
+    TEST_LOG("Setting it back\n");
+    cinst.pltype=orgPT;
+    TEST_MUSTPASS(codec->SetRecPayloadType(0,cinst));
+    TEST_MUSTPASS(codec->GetRecPayloadType(0,cinst));
+    TEST_MUSTPASS(!(cinst.pltype==orgPT));
+    cinst.pltype=123;
+    cinst.plfreq=8000;
+    cinst.pacsize=240;
+    cinst.rate=13300;
+#ifdef WEBRTC_CODEC_ILBC
+    strcpy(cinst.plname,"iLBC");
+    TEST_MUSTPASS(codec->GetRecPayloadType(0,cinst));
+    orgPT=cinst.pltype;
+    cinst.pltype=123;
+    TEST_MUSTPASS(codec->SetRecPayloadType(0,cinst));
+    TEST_MUSTPASS(codec->GetRecPayloadType(0,cinst));
+    TEST_LOG("Setting it back\n");
+    cinst.pltype=orgPT;
+    TEST_MUSTPASS(codec->SetRecPayloadType(0,cinst));
+    TEST_MUSTPASS(codec->GetRecPayloadType(0,cinst));
+    TEST_MUSTPASS(!(cinst.pltype==orgPT));
+#endif // #ifdef WEBRTC_CODEC_ILBC
+#endif // #if defined(WEBRTC_CODEC_ISAC)
+#endif // #ifdef _TEST_CODEC_
+
+    ///////////////////////////////////////////////
+    // Network (test before streaming is activated)
+
+#ifdef _TEST_NETWORK_
+    TEST_LOG("\n\n+++ Network tests +++\n\n");
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    int srcRtpPort = 0;
+    int srcRtcpPort = 0;
+
+    int filtPort = -1;
+    int filtPortRTCP = -1;
+    char srcIp[32] = "0.0.0.0";
+    char filtIp[32] = "0.0.0.0";
+
+    TEST_LOG("GetSourceInfo \n");
+    srcRtpPort = 1234;
+    srcRtcpPort = 1235;
+    TEST_MUSTPASS(netw->GetSourceInfo(0, srcRtpPort, srcRtcpPort, srcIp));
+    TEST_MUSTPASS(0 != srcRtpPort);
+    TEST_MUSTPASS(0 != srcRtcpPort);
+    TEST_MUSTPASS(_stricmp(srcIp, ""));
+
+    TEST_LOG("GetSourceFilter \n");
+    TEST_MUSTPASS(netw->GetSourceFilter(0, filtPort, filtPortRTCP, filtIp));
+    TEST_MUSTPASS(0 != filtPort);
+    TEST_MUSTPASS(0 != filtPortRTCP);
+    TEST_MUSTPASS(_stricmp(filtIp, ""));
+
+    TEST_LOG("SetSourceFilter \n");
+    TEST_MUSTPASS(netw->SetSourceFilter(0, srcRtpPort));
+#else
+    TEST_LOG("Skipping network tests - WEBRTC_EXTERNAL_TRANSPORT is defined \n");
+#endif // #ifndef WEBRTC_EXTERNAL_TRANSPORT
+#else
+    TEST_LOG("\n\n+++ Network tests NOT ENABLED +++\n");
+#endif 
+
+    ///////////////////
+    // Start streaming
+
+    TEST_LOG("\n\n+++ Starting streaming +++\n\n");
+
+    my_transportation ch0transport(netw);
+
+    // goto Exit;
+
+#ifdef WEBRTC_EXTERNAL_TRANSPORT
+    TEST_LOG("Enabling external transport \n");
+    TEST_MUSTPASS(netw->RegisterExternalTransport(0, ch0transport));
+#else
+    TEST_LOG("Setting send and receive parameters \n");
+    TEST_MUSTPASS(base->SetSendDestination(0, 8000, "127.0.0.1"));
+    // no IP specified => "0.0.0.0" will be stored
+    TEST_MUSTPASS(base->SetLocalReceiver(0,8000));
+
+    CodecInst Jing_inst;
+    Jing_inst.channels=1;
+    Jing_inst.pacsize=160;
+    Jing_inst.plfreq=8000;
+    Jing_inst.pltype=0;
+    Jing_inst.rate=64000;
+    strcpy(Jing_inst.plname, "PCMU");
+    TEST_MUSTPASS(codec->SetSendCodec(0, Jing_inst));
+
+    int port = -1, srcPort = -1, rtcpPort = -1;
+    char ipaddr[64] = {0};
+    strcpy(ipaddr, "10.10.10.10");
+    TEST_MUSTPASS(base->GetSendDestination(0, port, ipaddr, srcPort, rtcpPort));
+    TEST_MUSTPASS(8000 != port);
+    TEST_MUSTPASS(8000 != srcPort);
+    TEST_MUSTPASS(8001 != rtcpPort);
+    TEST_MUSTPASS(_stricmp(ipaddr, "127.0.0.1"));
+
+    port = -1; rtcpPort = -1;
+    TEST_MUSTPASS(base->GetLocalReceiver(0, port, rtcpPort, ipaddr));
+    TEST_MUSTPASS(8000 != port);
+    TEST_MUSTPASS(8001 != rtcpPort);
+    TEST_MUSTPASS(_stricmp(ipaddr, "0.0.0.0"));
+#endif
+
+    TEST_LOG("Start listening, playout and sending \n");
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartPlayout(0));
+    TEST_MUSTPASS(base->StartSend(0));
+
+    // <=== full duplex ===>
+
+    TEST_LOG("You should now hear yourself, running default codec (PCMU)\n");
+    SLEEP(2000);
+
+    if (file)
+    {
+        TEST_LOG("Start playing a file as microphone, so you don't need to"
+            " speak all the time\n");
+        TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0,
+                                                         micFile,
+                                                         true,
+                                                         true));
+        SLEEP(1000);
+    }
+
+#ifdef _TEST_BASE_
+    TEST_LOG("Put channel on hold => should *not* hear audio \n");
+    // HOLD_SEND_AND_PLAY is the default mode
+    TEST_MUSTPASS(base->SetOnHoldStatus(0, true));
+    SLEEP(2000);
+    TEST_LOG("Remove on hold => should hear audio again \n");
+    TEST_MUSTPASS(base->SetOnHoldStatus(0, false));
+    SLEEP(2000);
+    TEST_LOG("Put sending on hold => should *not* hear audio \n");
+    TEST_MUSTPASS(base->SetOnHoldStatus(0, true, kHoldSendOnly));
+    SLEEP(2000);
+    if (file)
+    {
+        TEST_LOG("Start playing a file locally => "
+            "you should now hear this file being played out \n");
+        TEST_MUSTPASS(file->StartPlayingFileLocally(0, micFile, true));
+        SLEEP(2000);
+    }
+    TEST_LOG("Put playing on hold => should *not* hear audio \n");
+    TEST_MUSTPASS(base->SetOnHoldStatus(0, true, kHoldPlayOnly));
+    SLEEP(2000);
+    TEST_LOG("Remove on hold => should hear audio again \n");
+    if (file)
+    {
+        TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+    }
+    TEST_MUSTPASS(base->SetOnHoldStatus(0, false));
+    SLEEP(2000);
+
+    NetEqModes mode;
+    TEST_MUSTPASS(base->GetNetEQPlayoutMode(0, mode));
+    TEST_MUSTPASS(mode != kNetEqDefault);
+    TEST_LOG("NetEQ DEFAULT playout mode enabled => should hear OK audio \n");
+    TEST_MUSTPASS(base->SetNetEQPlayoutMode(0, kNetEqDefault));
+    SLEEP(3000);
+    TEST_LOG("NetEQ STREAMING playout mode enabled => should hear OK audio \n");
+    TEST_MUSTPASS(base->SetNetEQPlayoutMode(0, kNetEqStreaming));
+    SLEEP(3000);
+    TEST_LOG("NetEQ FAX playout mode enabled => should hear OK audio \n");
+    TEST_MUSTPASS(base->SetNetEQPlayoutMode(0, kNetEqFax));
+    SLEEP(3000);
+    TEST_LOG("NetEQ default mode is restored \n");
+    TEST_MUSTPASS(base->SetNetEQPlayoutMode(0, kNetEqDefault));
+    TEST_MUSTPASS(base->GetNetEQPlayoutMode(0, mode));
+    TEST_MUSTPASS(mode != kNetEqDefault);
+    TEST_MUSTPASS(base->GetNetEQPlayoutMode(0, mode));
+    TEST_MUSTPASS(mode != kNetEqDefault);
+    TEST_LOG("NetEQ DEFAULT playout mode enabled => should hear OK audio \n");
+    TEST_MUSTPASS(base->SetNetEQPlayoutMode(0, kNetEqDefault));
+    SLEEP(3000);
+    TEST_LOG("NetEQ STREAMING playout mode enabled => should hear OK audio \n");
+    TEST_MUSTPASS(base->SetNetEQPlayoutMode(0, kNetEqStreaming));
+    SLEEP(3000);
+    TEST_LOG("NetEQ FAX playout mode enabled => should hear OK audio \n");
+    TEST_MUSTPASS(base->SetNetEQPlayoutMode(0, kNetEqFax));
+    SLEEP(3000);
+    TEST_LOG("NetEQ default mode is restored \n");
+    TEST_MUSTPASS(base->SetNetEQPlayoutMode(0, kNetEqDefault));
+    TEST_MUSTPASS(base->GetNetEQPlayoutMode(0, mode));
+    TEST_MUSTPASS(mode != kNetEqDefault);
+    TEST_MUSTPASS(base->GetNetEQPlayoutMode(0, mode));
+    TEST_MUSTPASS(mode != kNetEqDefault);
+    TEST_LOG("NetEQ DEFAULT playout mode enabled => should hear OK audio \n");
+    TEST_MUSTPASS(base->SetNetEQPlayoutMode(0, kNetEqDefault));
+    SLEEP(3000);
+    TEST_LOG("NetEQ STREAMING playout mode enabled => should hear OK audio \n");
+    TEST_MUSTPASS(base->SetNetEQPlayoutMode(0, kNetEqStreaming));
+    SLEEP(3000);
+    TEST_LOG("NetEQ FAX playout mode enabled => should hear OK audio \n");
+    TEST_MUSTPASS(base->SetNetEQPlayoutMode(0, kNetEqFax));
+    SLEEP(3000);
+    TEST_LOG("NetEQ default mode is restored \n");
+    TEST_MUSTPASS(base->SetNetEQPlayoutMode(0, kNetEqDefault));
+    TEST_MUSTPASS(base->GetNetEQPlayoutMode(0, mode));
+    TEST_MUSTPASS(mode != kNetEqDefault);
+
+    TEST_LOG("Scan all possible NetEQ BGN modes\n");  // skip listening test
+    enum NetEqBgnModes bgnMode;
+    TEST_MUSTPASS(base->GetNetEQBGNMode(0, bgnMode));
+    TEST_MUSTPASS(bgnMode != kBgnOn);
+    TEST_MUSTPASS(base->SetNetEQBGNMode(0, kBgnOn));
+    TEST_MUSTPASS(base->GetNetEQBGNMode(0, bgnMode));
+    TEST_MUSTPASS(bgnMode != kBgnOn);
+    TEST_MUSTPASS(base->SetNetEQBGNMode(0, kBgnFade));
+    TEST_MUSTPASS(base->GetNetEQBGNMode(0, bgnMode));
+    TEST_MUSTPASS(bgnMode != kBgnFade);
+    TEST_MUSTPASS(base->SetNetEQBGNMode(0, kBgnOff));
+    TEST_MUSTPASS(base->GetNetEQBGNMode(0, bgnMode));
+    TEST_MUSTPASS(bgnMode != kBgnOff);
+#else
+    TEST_LOG("Skipping on hold and NetEQ playout tests -"
+        "Base tests are not enabled \n");
+#endif // #ifdef _TEST_BASE_
+
+    /////////
+    // Codec
+
+#ifdef _TEST_CODEC_
+    TEST_LOG("\n\n+++ Codec tests +++\n\n");
+
+    TEST_LOG("Checking default codec\n");
+    TEST_MUSTPASS(codec->GetSendCodec(0, cinst));
+    TEST_MUSTPASS(cinst.channels != 1);
+    TEST_MUSTPASS(cinst.pacsize != 160);
+    TEST_MUSTPASS(cinst.plfreq != 8000);
+    TEST_MUSTPASS(cinst.pltype != 0);
+    TEST_MUSTPASS(cinst.rate != 64000);
+    TEST_MUSTPASS(strcmp("PCMU", cinst.plname) != 0);
+
+    TEST_LOG("Looping through all codecs and packet sizes\n");
+    TEST_LOG("NOTE: For swb codecs, ensure that you speak in the mic\n");
+    int nCodecs = codec->NumOfCodecs();
+    for (int index = 0; index < nCodecs; index++)
+    {
+        TEST_MUSTPASS(codec->GetCodec(index, cinst));
+
+        if (!((!_stricmp("CN", cinst.plname)) ||
+            (!_stricmp("telephone-event", cinst.plname) ||
+            (!_stricmp("red",cinst.plname)))))
+        {
+            // If no default payload type is defined, we use 127 and also set
+          // receive payload type
+            if (-1 == cinst.pltype)
+            {
+                cinst.pltype = 127;
+                TEST_MUSTPASS(base->StopPlayout(0));
+                TEST_MUSTPASS(base->StopReceive(0));
+                TEST_MUSTPASS(codec->SetRecPayloadType(0, cinst));
+                TEST_MUSTPASS(base->StartReceive(0));
+                TEST_MUSTPASS(base->StartPlayout(0));
+            }
+            TEST_LOG("%s (pt=%d): default(%d) ", cinst.plname, cinst.pltype,
+                     cinst.pacsize);
+            TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+            SLEEP(CODEC_TEST_TIME);
+            // Packet sizes
+            if (!_stricmp("g7221", cinst.plname))   // special case for G.722.1
+            {
+                // Test 16 and 32 kHz
+                for (int freq = 16000; freq <= 32000; freq += 16000)
+                {
+                    cinst.plfreq = freq;
+                    // Test 16/24/32 and 24/32/48 kbit respectively
+                    int rate = (16000 == freq ? 16000 : 24000);
+                    int maxRate = (16000 == freq ? 32000 : 40000);
+                    // In fact 48, see below
+                    for (; rate <= maxRate; rate += 8000)
+                    {
+                        rate = (40000 == rate ? 48000 : rate); // 40 -> 48
+                        cinst.rate = rate;
+                        // Test packet sizes
+                        TEST_LOG("\n%s (pt=%d, fs=%d, rate=%d): ",
+                                 cinst.plname, cinst.pltype,
+                                 cinst.plfreq, cinst.rate);
+                        for (int pacsize = 80; pacsize < 1000; pacsize += 80)
+                        {
+                            // Set codec, and receive payload type
+                            cinst.pacsize = pacsize;
+                            if (-1 != codec->SetSendCodec(0, cinst))
+                            {
+                                TEST_MUSTPASS(base->StopPlayout(0));
+                                TEST_MUSTPASS(base->StopReceive(0));
+                                TEST_MUSTPASS(codec->SetRecPayloadType(0,
+                                                                       cinst));
+                                TEST_MUSTPASS(base->StartReceive(0));
+                                TEST_MUSTPASS(base->StartPlayout(0));
+                                TEST_LOG("%d ", pacsize);
+                                fflush(NULL);
+                                SLEEP(2*CODEC_TEST_TIME);
+                            }
+                        }
+                    }
+                }
+            }
+            else
+            {
+                for (int pacsize = 80; pacsize < 1000; pacsize += 80)
+                {
+                    // Set codec
+                    // from VoE 4.0, we need the specify the right rate
+                    if (!_stricmp("ilbc", cinst.plname))
+                    {
+
+                        if((pacsize == 160) || (pacsize == 320))
+                        {
+                            cinst.rate = 15200;
+                        }
+                        else
+                        {
+                            cinst.rate = 13300;
+                        }
+                    }
+                    cinst.pacsize = pacsize;
+                    if (-1 != codec->SetSendCodec(0, cinst))
+                    {
+                        TEST_LOG("%d ", pacsize);
+                        fflush(NULL);
+                        SLEEP(CODEC_TEST_TIME);
+                    }
+                }
+            }
+            TEST_LOG("\n");
+        }
+    }
+
+    TEST_MUSTPASS(codec->GetCodec(0, cinst));
+    TEST_LOG("Setting codec to first in list: %s \n", cinst.plname);
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+    TEST_LOG("Voice Activity Detection calls\n");
+    TEST_LOG("Must be OFF by default\n");
+    bool VADtest = true;
+    VadModes vadMode = kVadAggressiveHigh;
+    bool disabledDTX = true;
+    TEST_MUSTPASS(codec->GetVADStatus(0, VADtest, vadMode, disabledDTX));
+    TEST_MUSTPASS(VADtest);
+    TEST_MUSTPASS(kVadConventional != vadMode);
+    TEST_MUSTPASS(!disabledDTX);
+    TEST_MUSTPASS(codec->GetVADStatus(0, VADtest, vadMode, disabledDTX));
+    TEST_MUSTPASS(VADtest);
+    TEST_MUSTPASS(kVadConventional != vadMode);
+    TEST_MUSTPASS(!disabledDTX);
+    TEST_MUSTPASS(codec->GetVADStatus(0, VADtest, vadMode, disabledDTX));
+    TEST_MUSTPASS(VADtest);
+    TEST_MUSTPASS(kVadConventional != vadMode);
+    TEST_MUSTPASS(!disabledDTX);
+
+    TEST_LOG("Turn ON VAD\n");
+    TEST_MUSTPASS(codec->SetVADStatus(0, true));
+    TEST_LOG("Should be ON now\n");
+    TEST_MUSTPASS(codec->GetVADStatus(0, VADtest, vadMode, disabledDTX));
+    TEST_MUSTPASS(!VADtest);
+    TEST_MUSTPASS(kVadConventional != vadMode);
+    TEST_MUSTPASS(disabledDTX);
+    TEST_MUSTPASS(codec->GetVADStatus(0, VADtest, vadMode, disabledDTX));
+    TEST_MUSTPASS(!VADtest);
+    TEST_MUSTPASS(kVadConventional != vadMode);
+    TEST_MUSTPASS(disabledDTX);
+    TEST_MUSTPASS(codec->GetVADStatus(0, VADtest, vadMode, disabledDTX));
+    TEST_MUSTPASS(!VADtest);
+    TEST_MUSTPASS(kVadConventional != vadMode);
+    TEST_MUSTPASS(disabledDTX);
+
+    TEST_LOG("Testing Type settings\n");
+    TEST_MUSTPASS(codec->SetVADStatus(0, true, kVadAggressiveLow));
+    TEST_MUSTPASS(codec->GetVADStatus(0, VADtest, vadMode, disabledDTX));
+    TEST_MUSTPASS(kVadAggressiveLow != vadMode);
+    TEST_MUSTPASS(codec->SetVADStatus(0, true, kVadAggressiveMid));
+    TEST_MUSTPASS(codec->GetVADStatus(0, VADtest, vadMode, disabledDTX));
+    TEST_MUSTPASS(kVadAggressiveMid != vadMode);
+    TEST_MUSTPASS(codec->SetVADStatus(0, true, kVadAggressiveMid));
+    TEST_MUSTPASS(codec->GetVADStatus(0, VADtest, vadMode, disabledDTX));
+    TEST_MUSTPASS(kVadAggressiveMid != vadMode);
+    TEST_MUSTPASS(codec->SetVADStatus(0, true, kVadAggressiveMid));
+    TEST_MUSTPASS(codec->GetVADStatus(0, VADtest, vadMode, disabledDTX));
+    TEST_MUSTPASS(kVadAggressiveMid != vadMode);
+    TEST_MUSTPASS(codec->SetVADStatus(0, true, kVadAggressiveHigh, true));
+    TEST_MUSTPASS(codec->GetVADStatus(0, VADtest, vadMode, disabledDTX));
+    TEST_MUSTPASS(kVadAggressiveHigh != vadMode);
+    TEST_MUSTPASS(codec->SetVADStatus(0, true, kVadAggressiveHigh, true));
+    TEST_MUSTPASS(codec->GetVADStatus(0, VADtest, vadMode, disabledDTX));
+    TEST_MUSTPASS(kVadAggressiveHigh != vadMode);
+    TEST_MUSTPASS(codec->SetVADStatus(0, true, kVadAggressiveHigh, true));
+    TEST_MUSTPASS(codec->GetVADStatus(0, VADtest, vadMode, disabledDTX));
+    TEST_MUSTPASS(kVadAggressiveHigh != vadMode);
+    TEST_MUSTPASS(!disabledDTX);
+    TEST_MUSTPASS(codec->GetVADStatus(0, VADtest, vadMode, disabledDTX));
+    TEST_MUSTPASS(kVadAggressiveHigh != vadMode);
+    TEST_MUSTPASS(!disabledDTX);
+    TEST_MUSTPASS(codec->GetVADStatus(0, VADtest, vadMode, disabledDTX));
+    TEST_MUSTPASS(kVadAggressiveHigh != vadMode);
+    TEST_MUSTPASS(!disabledDTX);
+    TEST_MUSTPASS(codec->SetVADStatus(0, true, kVadConventional));
+    TEST_MUSTPASS(codec->GetVADStatus(0, VADtest, vadMode, disabledDTX));
+    TEST_MUSTPASS(kVadConventional != vadMode);
+    TEST_MUSTPASS(disabledDTX);
+    TEST_MUSTPASS(codec->GetVADStatus(0, VADtest, vadMode, disabledDTX));
+    TEST_MUSTPASS(kVadConventional != vadMode);
+    TEST_MUSTPASS(disabledDTX);
+    TEST_MUSTPASS(codec->GetVADStatus(0, VADtest, vadMode, disabledDTX));
+    TEST_MUSTPASS(kVadConventional != vadMode);
+    TEST_MUSTPASS(disabledDTX);
+
+    // VAD is always on when DTX is on, so we need to turn off DTX too
+    TEST_LOG("Turn OFF VAD\n");
+    TEST_MUSTPASS(codec->SetVADStatus(0, false, kVadConventional, true));
+    TEST_LOG("Should be OFF now\n");
+    TEST_MUSTPASS(codec->GetVADStatus(0, VADtest, vadMode, disabledDTX));
+    TEST_MUSTPASS(VADtest);
+    TEST_MUSTPASS(kVadConventional != vadMode);
+    TEST_MUSTPASS(!disabledDTX);
+    TEST_MUSTPASS(codec->GetVADStatus(0, VADtest, vadMode, disabledDTX));
+    TEST_MUSTPASS(VADtest);
+    TEST_MUSTPASS(kVadConventional != vadMode);
+    TEST_MUSTPASS(!disabledDTX);
+    TEST_MUSTPASS(codec->GetVADStatus(0, VADtest, vadMode, disabledDTX));
+    TEST_MUSTPASS(VADtest);
+    TEST_MUSTPASS(kVadConventional != vadMode);
+    TEST_MUSTPASS(!disabledDTX);
+
+#if defined(WEBRTC_CODEC_ISAC)
+    TEST_LOG("Test extended iSAC APIs\n");
+    TEST_LOG("Start by selecting iSAC 30ms adaptive mode\n");
+    strcpy(cinst.plname,"isac");
+    cinst.pltype=103;
+    cinst.plfreq=16000;
+    cinst.channels=1;
+    cinst.rate=-1;  // adaptive rate
+    cinst.pacsize=480;
+    TEST_LOG("  testing SetISACInitTargetRate:\n");
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+    TEST_MUSTPASS(!codec->SetISACInitTargetRate(0, 5000));
+    TEST_MUSTPASS(!codec->SetISACInitTargetRate(0, 33000));
+    TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 32000));
+    TEST_LOG("Speak and ensure that iSAC sounds OK (target = 32kbps)...\n");
+    SLEEP(3000);
+    TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 10000));
+    TEST_LOG("Speak and ensure that iSAC sounds OK (target = 10kbps)...\n");
+    SLEEP(3000);
+    TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 10000, true));
+    TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 10000, false));
+    TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 0));
+    TEST_LOG("Speak and ensure that iSAC sounds OK (target = default)...\n");
+    SLEEP(3000);
+
+    TEST_LOG("  testing SetISACMaxPayloadSize:\n");
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(!codec->SetISACMaxPayloadSize(0, 50));
+    TEST_MUSTPASS(!codec->SetISACMaxPayloadSize(0, 650));
+    TEST_MUSTPASS(codec->SetISACMaxPayloadSize(0, 120));
+    TEST_MUSTPASS(base->StartSend(0));
+    TEST_LOG("Speak and ensure that iSAC sounds OK"
+        "(max payload size = 100 bytes)...\n");
+    SLEEP(3000);
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(codec->SetISACMaxPayloadSize(0, 400));
+    TEST_MUSTPASS(base->StartSend(0));
+
+    TEST_LOG("  testing SetISACMaxRate:\n");
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(!codec->SetISACMaxRate(0, 31900));
+    TEST_MUSTPASS(!codec->SetISACMaxRate(0, 53500));
+    TEST_MUSTPASS(codec->SetISACMaxRate(0, 32000));
+    TEST_MUSTPASS(base->StartSend(0));
+    TEST_LOG("Speak and ensure that iSAC sounds OK (max rate = 32 kbps)...\n");
+    SLEEP(3000);
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(codec->SetISACMaxRate(0, 53400)); // restore no limitation
+    TEST_MUSTPASS(base->StartSend(0));
+    if (file)
+    {
+        TEST_LOG("==> Start playing a file as microphone again \n");
+        TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0,
+                                                         micFile,
+                                                         true,
+                                                         true));
+    }
+#else
+    TEST_LOG("Skipping extended iSAC API tests - "
+        "WEBRTC_CODEC_ISAC not defined\n");
+#endif // #if defined(WEBRTC_CODEC_ISAC)
+
+    // Tests on AMR setencformat and setdecformat
+    // These should fail
+    TEST_MUSTPASS(!codec->SetAMREncFormat(0, kRfc3267BwEfficient));
+    TEST_MUSTPASS(!codec->SetAMRDecFormat(0, kRfc3267BwEfficient));
+    TEST_MUSTPASS(!codec->SetAMREncFormat(0, kRfc3267OctetAligned));
+    TEST_MUSTPASS(!codec->SetAMRDecFormat(0, kRfc3267OctetAligned));
+    TEST_MUSTPASS(!codec->SetAMREncFormat(0, kRfc3267FileStorage));
+    TEST_MUSTPASS(!codec->SetAMRDecFormat(0, kRfc3267FileStorage));
+
+    // Tests on AMRWB setencformat and setdecformat
+    // These should fail
+    TEST_MUSTPASS(!codec->SetAMRWbEncFormat(0, kRfc3267BwEfficient));
+    TEST_MUSTPASS(!codec->SetAMRWbDecFormat(0, kRfc3267BwEfficient));
+    TEST_MUSTPASS(!codec->SetAMRWbEncFormat(0, kRfc3267OctetAligned));
+    TEST_MUSTPASS(!codec->SetAMRWbDecFormat(0, kRfc3267OctetAligned));
+    TEST_MUSTPASS(!codec->SetAMRWbEncFormat(0, kRfc3267FileStorage));
+    TEST_MUSTPASS(!codec->SetAMRWbDecFormat(0, kRfc3267FileStorage));
+
+    TEST_LOG("Turn on VAD,G711 and set packet size to 30 ms:\n");
+    strcpy(cinst.plname,"pcmu");
+    cinst.pacsize=160;
+    cinst.pltype=0;
+    cinst.plfreq=8000;
+    cinst.channels=1;
+    cinst.rate=64000;
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+    // The test here is confusing, what are we expecting? VADtest = false? 
+    TEST_MUSTPASS(codec->GetVADStatus(0, VADtest, vadMode, disabledDTX));
+    TEST_MUSTPASS(VADtest);
+    TEST_MUSTPASS(codec->SetVADStatus(0, false, vadMode, true));
+
+    // Set back to preferred codec
+    TEST_MUSTPASS(codec->GetCodec(0, cinst));
+    TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+#else
+    TEST_LOG("\n\n+++ Codec tests NOT ENABLED +++\n");
+#endif // #ifdef _TEST_CODEC_
+
+    /////////////////////////
+    // Start another channel
+
+#if defined(_TEST_RTP_RTCP_)
+    TEST_LOG("\n\n+++ Preparing another channel for"
+        " RTP/RTCP tests +++ \n\n");
+
+    TEST_LOG("Create one more channel and start it up\n");
+    TEST_MUSTPASS(!(1==base->CreateChannel()));
+#ifdef WEBRTC_EXTERNAL_TRANSPORT
+    my_transportation ch1transport(netw);
+    TEST_MUSTPASS(netw->RegisterExternalTransport(1, ch1transport));
+#else
+    TEST_MUSTPASS(base->SetSendDestination(1, 8002, "127.0.0.1"));
+    TEST_MUSTPASS(base->SetLocalReceiver(1, 8002));
+#endif
+    TEST_MUSTPASS(base->StartReceive(1));
+    TEST_MUSTPASS(base->StartPlayout(1));
+    TEST_MUSTPASS(rtp_rtcp->SetLocalSSRC(1, 5678)); // ensures SSSR_ch1 = 5678
+    TEST_MUSTPASS(base->StartSend(1));
+    SLEEP(2000);
+#else
+    TEST_LOG("\n\n+++ Preparing another channel NOT NEEDED +++ \n");
+#endif // defined(_TEST_RTP_RTCP_)
+
+    /////////////////
+    // Conferencing
+
+#ifndef _TEST_BASE_
+    
+    TEST_LOG("\n\n+++ (Base) tests NOT ENABLED +++\n");
+#endif // #ifdef _TEST_BASE_
+
+    ////////////////////////////////////////////////
+    // RTP/RTCP (test after streaming is activated)
+
+#if (defined(_TEST_RTP_RTCP_) && defined(_TEST_BASE_))
+
+    TEST_LOG("\n\n+++ More RTP/RTCP tests +++\n\n");
+
+    SLEEP(8000);
+
+    TEST_LOG("Check that we have gotten RTCP packet, and collected CName\n");
+    TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCP_CNAME(0, tmpStr));
+    TEST_LOG("default cname is %s", tmpStr);
+    TEST_MUSTPASS(_stricmp("Niklas", tmpStr));
+
+    TEST_LOG("Check that we have received the right SSRC\n");
+    unsigned int ssrc1;
+    TEST_MUSTPASS(rtp_rtcp->GetLocalSSRC(0, ssrc1));
+    TEST_LOG("SSRC chan 0 = %lu \n", (long unsigned int) ssrc1);
+    TEST_MUSTPASS(rtp_rtcp->GetRemoteSSRC(0, ssrc1));
+    // the originally set 1234 should be maintained
+    TEST_MUSTPASS(1234 != ssrc1);
+
+    
+
+    // RTCP APP tests
+    TEST_LOG("Check RTCP APP send/receive \n");
+    TEST_MUSTPASS(rtp_rtcp->RegisterRTCPObserver(0, myRtcpAppHandler));
+    SLEEP(100);
+    // send RTCP APP packet (fill up data message to multiple of 32 bits)
+    const char* data = "application-dependent data------"; // multiple of 32byte
+    unsigned short lenBytes(static_cast<unsigned short>(strlen(data)));
+    unsigned int name = static_cast<unsigned int>(0x41424344); // 'ABCD';
+    unsigned char subType = 1;
+    TEST_MUSTPASS(rtp_rtcp->SendApplicationDefinedRTCPPacket(0,
+                                                             subType,
+                                                             name,
+                                                             data,
+                                                             lenBytes));
+    TEST_LOG("Waiting for RTCP APP callback...\n");
+    SLEEP(8000);    // ensures that RTCP is scheduled
+    TEST_MUSTPASS(strlen(data) != myRtcpAppHandler._lengthBytes);
+    TEST_MUSTPASS(memcmp(data, myRtcpAppHandler._data, lenBytes));
+    TEST_MUSTPASS(myRtcpAppHandler._name != name);
+    TEST_MUSTPASS(myRtcpAppHandler._subType != subType);
+    TEST_LOG("=> application-dependent data of size %d bytes was received\n",
+             lenBytes);
+    // disable the callback and verify that no callback is received this time
+    myRtcpAppHandler.Reset();
+    TEST_MUSTPASS(rtp_rtcp->DeRegisterRTCPObserver(0));
+
+    TEST_MUSTPASS(rtp_rtcp->SendApplicationDefinedRTCPPacket(0,
+                                                             subType,
+                                                             name,
+                                                             data,
+                                                             lenBytes));
+    TEST_LOG("RTCP APP callback should not be received since the observer "
+        "is disabled...\n");
+    SLEEP(5000);    // ensures that RTCP is scheduled
+    TEST_MUSTPASS(myRtcpAppHandler._name != 0);
+    TEST_MUSTPASS(myRtcpAppHandler._subType != 0);
+
+
+
+
+
+#if !defined(WEBRTC_EXTERNAL_TRANSPORT)
+    printf("Tesing InsertExtraRTPPacket\n");
+
+    const char payloadData[8] = {'A','B','C','D','E','F','G','H'};
+
+    // fail tests
+    // invalid channel
+    TEST_MUSTPASS(-1 != rtp_rtcp->InsertExtraRTPPacket(-1,
+                                                       0,
+                                                       false,
+                                                       payloadData,
+                                                       8));
+    // invalid payload type
+    TEST_MUSTPASS(-1 != rtp_rtcp->InsertExtraRTPPacket(0,
+                                                       -1,
+                                                       false,
+                                                       payloadData,
+                                                       8));
+    // invalid payload type
+    TEST_MUSTPASS(-1 != rtp_rtcp->InsertExtraRTPPacket(0,
+                                                       128,
+                                                       false,
+                                                       payloadData,
+                                                       8));
+    // invalid pointer
+    TEST_MUSTPASS(-1 != rtp_rtcp->InsertExtraRTPPacket(0,
+                                                       99,
+                                                       false,
+                                                       NULL,
+                                                       8));
+    // invalid size
+    TEST_MUSTPASS(-1 != rtp_rtcp->InsertExtraRTPPacket(0,
+                                                       99,
+                                                       false,
+                                                       payloadData,
+                                                       1500 - 28 + 1));
+
+    // transmit some extra RTP packets
+    for (int pt = 0; pt < 128; pt++)
+    {
+        TEST_MUSTPASS(rtp_rtcp->InsertExtraRTPPacket(0,
+                                                     pt,
+                                                     false,
+                                                     payloadData,
+                                                     8));
+        TEST_MUSTPASS(rtp_rtcp->InsertExtraRTPPacket(0,
+                                                     pt,
+                                                     true,
+                                                     payloadData,
+                                                     8));
+    }
+#else
+    printf("Skipping InsertExtraRTPPacket tests -"
+        " WEBRTC_EXTERNAL_TRANSPORT is defined \n");
+#endif
+
+    TEST_LOG("Enable the RTP observer\n");
+    TEST_MUSTPASS(rtp_rtcp->RegisterRTPObserver(0, rtpObserver));
+    TEST_MUSTPASS(rtp_rtcp->RegisterRTPObserver(1, rtpObserver));
+    rtpObserver.Reset();
+
+    // Create two RTP-dump files (3 seconds long).
+    // Verify using rtpplay or NetEqRTPplay when test is done.
+    TEST_LOG("Creating two RTP-dump files...\n");
+    TEST_MUSTPASS(rtp_rtcp->StartRTPDump(0,
+                                         GetFilename("dump_in_3sec.rtp"),
+                                         kRtpIncoming));
+    MARK();
+    TEST_MUSTPASS(rtp_rtcp->StartRTPDump(0,
+                                         GetFilename("dump_out_3sec.rtp"),
+                                         kRtpOutgoing));
+    MARK();
+    SLEEP(3000);
+    TEST_MUSTPASS(rtp_rtcp->StopRTPDump(0, kRtpIncoming));
+    MARK();
+    TEST_MUSTPASS(rtp_rtcp->StopRTPDump(0, kRtpOutgoing));
+    MARK();
+
+    rtpObserver.Reset();
+
+    TEST_LOG("Verify the OnIncomingSSRCChanged callback\n");
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(rtp_rtcp->SetLocalSSRC(0, 7777));
+    TEST_MUSTPASS(base->StartSend(0));
+    SLEEP(500);
+    TEST_MUSTPASS(rtpObserver._SSRC[0] != 7777);
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(rtp_rtcp->SetLocalSSRC(0, 1234));
+    TEST_MUSTPASS(base->StartSend(0));
+    SLEEP(500);
+    TEST_MUSTPASS(rtpObserver._SSRC[0] != 1234);
+    rtpObserver.Reset();
+    if (file)
+    {
+        TEST_LOG("Start playing a file as microphone again...\n");
+        TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0,
+                                                         micFile,
+                                                         true,
+                                                         true));
+    }
+
+#ifdef WEBRTC_CODEC_RED
+    TEST_LOG("Enabling FEC \n");
+    TEST_MUSTPASS(rtp_rtcp->SetFECStatus(0, true));
+    SLEEP(2000);
+
+    TEST_LOG("Disabling FEC\n");
+    TEST_MUSTPASS(rtp_rtcp->SetFECStatus(0, false));
+    SLEEP(2000);
+#else
+    TEST_LOG("Skipping FEC tests - WEBRTC_CODEC_RED not defined \n");
+#endif // #ifdef WEBRTC_CODEC_RED
+#else
+    TEST_LOG("\n\n+++ More RTP/RTCP tests NOT ENABLED +++\n");
+#endif // #ifdef _TEST_RTP_RTCP_
+
+    /////////////////////////
+    // Delete extra channel
+
+#if defined(_TEST_RTP_RTCP_)
+    TEST_LOG("\n\n+++ Delete extra channel +++ \n\n");
+
+    TEST_LOG("Delete channel 1, stopping everything\n");
+    TEST_MUSTPASS(base->DeleteChannel(1));
+#else
+    TEST_LOG("\n\n+++ Delete extra channel NOT NEEDED +++ \n");
+#endif // #if defined(WEBRTC_VOICE_ENGINE_CONFERENCING) && (define......
+
+    /////////////////////////////////////////////////
+    // Hardware (test after streaming is activated)
+
+#ifdef _TEST_HARDWARE_
+    TEST_LOG("\n\n+++ More hardware tests +++\n\n");
+
+
+#if !defined(MAC_IPHONE) && !defined(ANDROID)
+#ifdef _WIN32
+    // should works also while already recording
+    TEST_MUSTPASS(hardware->SetRecordingDevice(-1));
+    // should works also while already playing
+    TEST_MUSTPASS(hardware->SetPlayoutDevice(-1));
+#else
+    TEST_MUSTPASS(hardware->SetRecordingDevice(0));
+    TEST_MUSTPASS(hardware->SetPlayoutDevice(0));
+#endif
+    TEST_MUSTPASS(hardware->GetRecordingDeviceName(0, devName, guidName));
+    TEST_MUSTPASS(hardware->GetPlayoutDeviceName(0, devName, guidName));
+
+    TEST_MUSTPASS(hardware->GetNumOfRecordingDevices(nRec));
+    TEST_MUSTPASS(hardware->GetNumOfPlayoutDevices(nPlay));
+#endif
+    
+    int load = -1;
+    
+#if defined(_WIN32)
+    TEST_MUSTPASS(hardware->GetCPULoad(load));
+    TEST_MUSTPASS(load == -1);
+    TEST_LOG("VE CPU load     = %d\n", load);
+#else
+    TEST_MUSTPASS(!hardware->GetCPULoad(load));
+#endif
+
+#if !defined(WEBRTC_MAC) && !defined(ANDROID)
+    // Not supported on Mac yet
+    load = -1;
+    TEST_MUSTPASS(hardware->GetSystemCPULoad(load));
+    TEST_MUSTPASS(load == -1);
+    TEST_LOG("System CPU load = %d\n", load);
+#endif
+    
+#ifdef MAC_IPHONE
+    // Reset sound device
+    TEST_LOG("Reset sound device \n");
+    TEST_MUSTPASS(hardware->ResetAudioDevice());
+    SLEEP(2000);
+#endif // #ifdef MAC_IPHONE
+
+#else
+    TEST_LOG("\n\n+++ More hardware tests NOT ENABLED +++\n");
+#endif
+
+    ////////
+    // Dtmf
+
+#ifdef _TEST_DTMF_
+    TEST_LOG("\n\n+++ Dtmf tests +++\n\n");
+
+    TEST_LOG("Making sure Dtmf Feedback is enabled by default \n");
+    bool dtmfFeedback = false, dtmfDirectFeedback = true;
+    TEST_MUSTPASS(dtmf->GetDtmfFeedbackStatus(dtmfFeedback,
+                                              dtmfDirectFeedback));
+    TEST_MUSTPASS(!dtmfFeedback);
+    TEST_MUSTPASS(dtmfDirectFeedback);
+
+    // Add support when new 4.0 API is complete
+#if (defined(WEBRTC_DTMF_DETECTION) && !defined(_INSTRUMENTATION_TESTING_))
+    DtmfCallback *d = new DtmfCallback();
+
+    // Set codec to PCMU to make sure tones are not distorted
+    TEST_LOG("Setting codec to PCMU\n");
+    CodecInst ci;
+    ci.channels = 1;
+    ci.pacsize = 160;
+    ci.plfreq = 8000;
+    ci.pltype = 0;
+    ci.rate = 64000;
+    strcpy(ci.plname, "PCMU");
+    TEST_MUSTPASS(codec->SetSendCodec(0, ci));
+
+    // Loop the different detections methods
+    TelephoneEventDetectionMethods detMethod = kInBand;
+    for (int h=0; h<3; ++h)
+    {
+        if (0 == h)
+        {
+            TEST_LOG("Testing telephone-event (Dtmf) detection"
+                " using in-band method \n");
+            TEST_LOG("  In-band events should be detected \n");
+            TEST_LOG("  Out-of-band Dtmf events (0-15) should be"
+                " detected \n");
+            TEST_LOG("  Out-of-band non-Dtmf events (>15) should NOT be"
+                " detected \n");
+            detMethod = kInBand;
+        }
+        if (1 == h)
+        {
+            TEST_LOG("Testing telephone-event (Dtmf) detection using"
+                " out-of-band method\n");
+            TEST_LOG("  In-band events should NOT be detected \n");
+            TEST_LOG("  Out-of-band events should be detected \n");
+            detMethod = kOutOfBand;
+        }
+        if (2 == h)
+        {
+            TEST_LOG("Testing telephone-event (Dtmf) detection using both"
+                " in-band and out-of-band methods\n");
+            TEST_LOG("  In-band events should be detected \n");
+            TEST_LOG("  Out-of-band Dtmf events (0-15) should be detected"
+                " TWICE \n");
+            TEST_LOG("  Out-of-band non-Dtmf events (>15) should be detected"
+                " ONCE \n");
+            detMethod = kInAndOutOfBand;
+        }
+        TEST_MUSTPASS(dtmf->RegisterTelephoneEventDetection(0, detMethod, *d));
+#else
+        TEST_LOG("Skipping Dtmf detection tests - WEBRTC_DTMF_DETECTION not"
+            " defined or _INSTRUMENTATION_TESTING_ defined \n");
+#endif
+
+        TEST_MUSTPASS(dtmf->SetDtmfFeedbackStatus(false));
+        TEST_LOG("Sending in-band telephone events:");
+        for(int i = 0; i < 16; i++)
+        {
+            TEST_LOG("\n  %d ", i); fflush(NULL);
+            TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, i, false, 160, 10));
+            SLEEP(500);
+        }
+#ifdef WEBRTC_CODEC_AVT
+        TEST_LOG("\nSending out-of-band telephone events:");
+        for(int i = 0; i < 16; i++)
+        {
+            TEST_LOG("\n  %d ", i); fflush(NULL);
+            TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, i, true));
+            SLEEP(500);
+        }
+        // Testing 2 non-Dtmf events
+        int num = 32;
+        TEST_LOG("\n  %d ", num); fflush(NULL);
+        TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, num, true));
+        SLEEP(500);
+        num = 110;
+        TEST_LOG("\n  %d ", num); fflush(NULL);
+        TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, num, true));
+        SLEEP(500);
+        ANL();
+#endif
+#if (defined(WEBRTC_DTMF_DETECTION) && !defined(_INSTRUMENTATION_TESTING_))
+        TEST_MUSTPASS(dtmf->DeRegisterTelephoneEventDetection(0));
+        TEST_LOG("Detected %d events \n", d->counter);
+        int expectedCount = 32; // For 0 == h
+        if (1 == h) expectedCount = 18;
+        if (2 == h) expectedCount = 50;
+        TEST_MUSTPASS(d->counter != expectedCount);
+        d->counter = 0;
+    } // for loop
+
+    TEST_LOG("Testing no detection after disabling:");
+    TEST_MUSTPASS(dtmf->DeRegisterTelephoneEventDetection(0));
+    TEST_LOG(" 0");
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, false));
+    SLEEP(500);
+    TEST_LOG(" 1");
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 1, true));
+    SLEEP(500);
+    TEST_LOG("\nDtmf tones sent: 2, detected: %d \n", d->counter);
+    TEST_MUSTPASS(0 != d->counter);
+    delete d;
+
+    TEST_MUSTPASS(codec->GetCodec(0, ci));
+    TEST_LOG("Back to first codec in list: %s\n", ci.plname);
+    TEST_MUSTPASS(codec->SetSendCodec(0, ci));
+#endif
+
+
+#ifndef MAC_IPHONE
+#ifdef WEBRTC_CODEC_AVT
+    TEST_LOG("Disabling Dtmf playout (no tone should be heard) \n");
+    TEST_MUSTPASS(dtmf->SetDtmfPlayoutStatus(0, false));
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, true));
+    SLEEP(500);
+
+    TEST_LOG("Enabling Dtmf playout (tone should be heard) \n");
+    TEST_MUSTPASS(dtmf->SetDtmfPlayoutStatus(0, true));
+    TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, true));
+    SLEEP(500);
+#endif
+#endif
+    
+    TEST_LOG("Playing Dtmf tone locally \n");
+///    TEST_MUSTPASS(dtmf->PlayDtmfTone(0, 300, 15));
+    SLEEP(500);
+#ifdef WEBRTC_CODEC_AVT
+    CodecInst c2;
+
+    TEST_LOG("Changing Dtmf payload type \n");
+
+    // Start by modifying the receiving side
+    if (codec)
+    {
+        int nc = codec->NumOfCodecs();
+        for(int i = 0; i < nc; i++)
+        {
+            TEST_MUSTPASS(codec->GetCodec(i, c2));
+            if(!_stricmp("telephone-event", c2.plname))
+            {
+                c2.pltype = 88;    // use 88 instead of default 106
+                TEST_MUSTPASS(base->StopSend(0));
+                TEST_MUSTPASS(base->StopPlayout(0));
+                TEST_MUSTPASS(base->StopReceive(0));
+                TEST_MUSTPASS(codec->SetRecPayloadType(0, c2));
+                TEST_MUSTPASS(base->StartReceive(0));
+                TEST_MUSTPASS(base->StartPlayout(0));
+                TEST_MUSTPASS(base->StartSend(0));
+                TEST_LOG("Start playing a file as microphone again \n");
+                TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0,
+                                                                 micFile,
+                                                                 true,
+                                                                 true));
+                break;
+            }
+        }
+    }
+
+    SLEEP(500);
+
+    // Next, we must modify the sending side as well
+    TEST_MUSTPASS(dtmf->SetSendTelephoneEventPayloadType(0, c2.pltype));
+
+    TEST_LOG("Outband Dtmf test with modified Dtmf payload:");
+    for(int i = 0; i < 16; i++)
+    {
+        TEST_LOG(" %d", i);
+        fflush(NULL);
+        TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, i, true));
+        SLEEP(500);
+    }
+    ANL();
+#endif
+    TEST_MUSTPASS(dtmf->SetDtmfFeedbackStatus(true, false));
+#else
+    TEST_LOG("\n\n+++ Dtmf tests NOT ENABLED +++\n");
+#endif  // #ifdef _TEST_DTMF_
+
+    //////////
+    // Volume
+
+#ifdef _TEST_VOLUME_
+    TEST_LOG("\n\n+++ Volume tests +++\n\n");
+
+#if !defined(MAC_IPHONE)
+    // Speaker volume test
+    unsigned int vol = 1000;
+    TEST_LOG("Saving Speaker volume\n");
+    TEST_MUSTPASS(volume->GetSpeakerVolume(vol));
+    TEST_MUSTPASS(!(vol <= 255));
+    TEST_LOG("Setting speaker volume to 0\n");
+    TEST_MUSTPASS(volume->SetSpeakerVolume(0));
+    SLEEP(1000);
+    TEST_LOG("Setting speaker volume to 255\n");
+    TEST_MUSTPASS(volume->SetSpeakerVolume(255));
+    SLEEP(1000);
+    TEST_LOG("Setting speaker volume back to saved value\n");
+    TEST_MUSTPASS(volume->SetSpeakerVolume(vol));
+    SLEEP(1000);
+#endif // #if !defined(MAC_IPHONE)
+
+    if (file)
+    {
+        TEST_LOG("==> Talk into the microphone \n");
+        TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0));
+        SLEEP(1000);
+    }
+
+#if (!defined(MAC_IPHONE) && !defined(ANDROID))
+    // Mic volume test
+#if defined(_TEST_AUDIO_PROCESSING_) && defined(WEBRTC_VOICE_ENGINE_AGC)
+    bool agcTemp(true);
+    AgcModes agcModeTemp(kAgcAdaptiveAnalog);
+    TEST_MUSTPASS(apm->GetAgcStatus(agcTemp, agcModeTemp)); // current state
+    TEST_LOG("Turn off AGC\n");
+    TEST_MUSTPASS(apm->SetAgcStatus(false));
+#endif
+    TEST_LOG("Saving Mic volume\n");
+    TEST_MUSTPASS(volume->GetMicVolume(vol));
+    TEST_MUSTPASS(!(vol <= 255));
+    TEST_LOG("Setting Mic volume to 0\n");
+    TEST_MUSTPASS(volume->SetMicVolume(0));
+    SLEEP(1000);
+    TEST_LOG("Setting Mic volume to 255\n");
+    TEST_MUSTPASS(volume->SetMicVolume(255));
+    SLEEP(1000);
+    TEST_LOG("Setting Mic volume back to saved value\n");
+    TEST_MUSTPASS(volume->SetMicVolume(vol));
+    SLEEP(1000);
+#if defined(_TEST_AUDIO_PROCESSING_) && defined(WEBRTC_VOICE_ENGINE_AGC)
+    TEST_LOG("Reset AGC to previous state\n");
+    TEST_MUSTPASS(apm->SetAgcStatus(agcTemp, agcModeTemp));
+#endif
+#endif // #if (!defined(MAC_IPHONE) && !defined(ANDROID))
+
+    // Input mute test
+    TEST_LOG("Enabling input muting\n");
+    bool mute = true;
+    TEST_MUSTPASS(volume->GetInputMute(0, mute));
+    TEST_MUSTPASS(mute);
+    TEST_MUSTPASS(volume->SetInputMute(0, true));
+    TEST_MUSTPASS(volume->GetInputMute(0, mute));
+    TEST_MUSTPASS(!mute);
+    SLEEP(1000);
+    TEST_LOG("Disabling input muting\n");
+    TEST_MUSTPASS(volume->SetInputMute(0, false));
+    TEST_MUSTPASS(volume->GetInputMute(0, mute));
+    TEST_MUSTPASS(mute);
+    SLEEP(1000);
+
+#if (!defined(MAC_IPHONE) && !defined(ANDROID))
+    // System output mute test
+    TEST_LOG("Enabling system output muting\n");
+    bool outputMute = true;
+    TEST_MUSTPASS(volume->GetSystemOutputMute(outputMute));
+    TEST_MUSTPASS(outputMute);
+    TEST_MUSTPASS(volume->SetSystemOutputMute(true));
+    TEST_MUSTPASS(volume->GetSystemOutputMute(outputMute));
+    TEST_MUSTPASS(!outputMute);
+    SLEEP(1000);
+    TEST_LOG("Disabling system output muting\n");
+    TEST_MUSTPASS(volume->SetSystemOutputMute(false));
+    TEST_MUSTPASS(volume->GetSystemOutputMute(outputMute));
+    TEST_MUSTPASS(outputMute);
+    SLEEP(1000);
+
+    // System Input mute test
+    TEST_LOG("Enabling system input muting\n");
+    bool inputMute = true;
+    TEST_MUSTPASS(volume->GetSystemInputMute(inputMute));
+    TEST_MUSTPASS(inputMute);
+    TEST_MUSTPASS(volume->SetSystemInputMute(true));
+    // This is needed to avoid error using pulse
+    SLEEP(100);
+    TEST_MUSTPASS(volume->GetSystemInputMute(inputMute));
+    TEST_MUSTPASS(!inputMute);
+    SLEEP(1000);
+    TEST_LOG("Disabling system input muting\n");
+    TEST_MUSTPASS(volume->SetSystemInputMute(false));
+    // This is needed to avoid error using pulse
+    SLEEP(100);
+    TEST_MUSTPASS(volume->GetSystemInputMute(inputMute));
+    TEST_MUSTPASS(inputMute);
+    SLEEP(1000);
+#endif // #if (!defined(MAC_IPHONE) && !defined(ANDROID))
+
+#if(!defined(MAC_IPHONE) && !defined(ANDROID))
+    // Test Input & Output levels
+    TEST_LOG("Testing input & output levels for 10 seconds (dT=1 second)\n");
+    TEST_LOG("Speak in microphone to vary the levels...\n");
+    unsigned int inputLevel(0);
+    unsigned int outputLevel(0);
+    unsigned int inputLevelFullRange(0);
+    unsigned int outputLevelFullRange(0);
+
+    for (int t = 0; t < 5; t++)
+    {
+        SLEEP(1000);
+        TEST_MUSTPASS(volume->GetSpeechInputLevel(inputLevel));
+        TEST_MUSTPASS(volume->GetSpeechOutputLevel(0, outputLevel));
+        TEST_MUSTPASS(volume->GetSpeechInputLevelFullRange(
+            inputLevelFullRange));
+        TEST_MUSTPASS(volume->GetSpeechOutputLevelFullRange(
+            0, outputLevelFullRange));
+        TEST_LOG("    warped levels (0-9)    : in=%5d, out=%5d\n",
+                 inputLevel, outputLevel);
+        TEST_LOG("    linear levels (0-32768): in=%5d, out=%5d\n",
+                 inputLevelFullRange, outputLevelFullRange);
+    }
+#endif // #if (!defined(MAC_IPHONE) && !defined(ANDROID))
+
+    if (file)
+    {
+        TEST_LOG("==> Start playing a file as microphone again \n");
+        TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0,
+                                                         micFile,
+                                                         true,
+                                                         true));
+        SLEEP(1000);
+    }
+
+#if !defined(MAC_IPHONE)
+    // Channel scaling test
+    TEST_LOG("Channel scaling\n");
+    float scaling = -1.0;
+    TEST_MUSTPASS(volume->GetChannelOutputVolumeScaling(0, scaling));
+    TEST_MUSTPASS(1.0 != scaling);
+    TEST_MUSTPASS(volume->SetChannelOutputVolumeScaling(0, (float)0.1));
+    TEST_MUSTPASS(volume->GetChannelOutputVolumeScaling(0, scaling));
+    TEST_MUSTPASS(!((scaling > 0.099) && (scaling < 0.101)));
+    SLEEP(1000);
+    TEST_MUSTPASS(volume->SetChannelOutputVolumeScaling(0, (float)1.0));
+    TEST_MUSTPASS(volume->GetChannelOutputVolumeScaling(0, scaling));
+    TEST_MUSTPASS(1.0 != scaling);
+#endif // #if !defined(MAC_IPHONE)
+
+#if !defined(MAC_IPHONE) && !defined(ANDROID)
+    // Channel panning test
+    TEST_LOG("Channel panning\n");
+    float left = -1.0, right = -1.0;
+    TEST_MUSTPASS(volume->GetOutputVolumePan(0, left, right));
+    TEST_MUSTPASS(!((left == 1.0) && (right == 1.0)));
+    TEST_LOG("Panning to left\n");
+    TEST_MUSTPASS(volume->SetOutputVolumePan(0, (float)0.8, (float)0.1));
+    TEST_MUSTPASS(volume->GetOutputVolumePan(0, left, right));
+    TEST_MUSTPASS(!((left > 0.799) && (left < 0.801)));
+    TEST_MUSTPASS(!((right > 0.099) && (right < 0.101)));
+    SLEEP(1000);
+    TEST_LOG("Back to center\n");
+    TEST_MUSTPASS(volume->SetOutputVolumePan(0, (float)1.0, (float)1.0));
+    SLEEP(1000);
+    left = -1.0; right = -1.0;
+    TEST_MUSTPASS(volume->GetOutputVolumePan(0, left, right));
+    TEST_MUSTPASS(!((left == 1.0) && (right == 1.0)));
+    TEST_LOG("Panning channel to right\n");
+    TEST_MUSTPASS(volume->SetOutputVolumePan(0, (float)0.1, (float)0.8));
+    SLEEP(100);
+    TEST_MUSTPASS(volume->GetOutputVolumePan(0, left, right));
+    TEST_MUSTPASS(!((left > 0.099) && (left < 0.101)));
+    TEST_MUSTPASS(!((right > 0.799) && (right < 0.801)));
+    SLEEP(1000);
+    TEST_LOG("Channel back to center\n");
+    TEST_MUSTPASS(volume->SetOutputVolumePan(0, (float)1.0, (float)1.0));
+    SLEEP(1000);
+#else
+    TEST_LOG("Skipping stereo tests\n");
+#endif // #if !defined(MAC_IPHONE) && !defined(ANDROID))
+
+#else
+    TEST_LOG("\n\n+++ Volume tests NOT ENABLED +++\n");
+#endif // #ifdef _TEST_VOLUME_
+
+    ///////
+    // AudioProcessing
+
+#ifdef _TEST_AUDIO_PROCESSING_
+    TEST_LOG("\n\n+++ AudioProcessing tests +++\n\n");
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+    bool test;
+    TEST_LOG("AGC calls\n");
+#if (defined(MAC_IPHONE) || defined(ANDROID))
+    TEST_LOG("Must be OFF by default\n");
+    test = true;
+    AgcModes agcMode = kAgcAdaptiveAnalog;
+    TEST_MUSTPASS(apm->GetAgcStatus(test, agcMode));
+    TEST_MUSTPASS(test);
+    TEST_MUSTPASS(kAgcAdaptiveDigital != agcMode);
+#else
+    TEST_LOG("Must be ON by default\n");
+    test = false;
+    AgcModes agcMode = kAgcAdaptiveAnalog;
+    TEST_MUSTPASS(apm->GetAgcStatus(test, agcMode));
+    TEST_MUSTPASS(!test);
+    TEST_MUSTPASS(kAgcAdaptiveAnalog != agcMode);
+
+    TEST_LOG("Turn off AGC\n");
+    // must set value in first call!
+    TEST_MUSTPASS(apm->SetAgcStatus(false, kAgcDefault));
+    TEST_LOG("Should be OFF now\n");
+    TEST_MUSTPASS(apm->GetAgcStatus(test, agcMode));
+    TEST_MUSTPASS(test);
+    TEST_MUSTPASS(kAgcAdaptiveAnalog != agcMode);
+#endif // #if (defined(MAC_IPHONE) || defined(ANDROID))
+
+    TEST_LOG("Turn ON AGC\n");
+#if (defined(MAC_IPHONE) || defined(ANDROID))
+    TEST_MUSTPASS(apm->SetAgcStatus(true, kAgcAdaptiveDigital));
+#else
+    TEST_MUSTPASS(apm->SetAgcStatus(true));
+#endif
+    TEST_LOG("Should be ON now\n");
+    TEST_MUSTPASS(apm->GetAgcStatus(test, agcMode));
+    TEST_MUSTPASS(!test);
+#if (defined(MAC_IPHONE) || defined(ANDROID))
+    TEST_MUSTPASS(kAgcAdaptiveDigital != agcMode);
+#else
+    TEST_MUSTPASS(kAgcAdaptiveAnalog != agcMode);
+#endif
+
+#if (defined(MAC_IPHONE) || defined(ANDROID))
+    TEST_LOG("Testing Type settings\n");
+    // Should fail
+    TEST_MUSTPASS(!apm->SetAgcStatus(true, kAgcAdaptiveAnalog));
+    // Should fail
+    TEST_MUSTPASS(apm->SetAgcStatus(true, kAgcFixedDigital));
+    // Should fail
+    TEST_MUSTPASS(apm->SetAgcStatus(true, kAgcAdaptiveDigital));
+
+    TEST_LOG("Turn off AGC\n");
+    TEST_MUSTPASS(apm->SetAgcStatus(false));
+    TEST_LOG("Should be OFF now\n");
+    TEST_MUSTPASS(apm->GetAgcStatus(test, agcMode));
+    TEST_MUSTPASS(test);
+    TEST_MUSTPASS(kAgcAdaptiveDigital != agcMode);
+#else
+    TEST_LOG("Testing Mode settings\n");
+    TEST_MUSTPASS(apm->SetAgcStatus(true, kAgcFixedDigital));
+    TEST_MUSTPASS(apm->GetAgcStatus(test, agcMode));
+    TEST_MUSTPASS(kAgcFixedDigital != agcMode);
+    TEST_MUSTPASS(apm->SetAgcStatus(true, kAgcAdaptiveDigital));
+    TEST_MUSTPASS(apm->GetAgcStatus(test, agcMode));
+    TEST_MUSTPASS(kAgcAdaptiveDigital != agcMode);
+    TEST_MUSTPASS(apm->SetAgcStatus(true, kAgcAdaptiveAnalog));
+    TEST_MUSTPASS(apm->GetAgcStatus(test, agcMode));
+    TEST_MUSTPASS(kAgcAdaptiveAnalog != agcMode);
+#endif // #if (defined(MAC_IPHONE) || defined(ANDROID))
+
+    TEST_LOG("rxAGC calls\n");
+    // Note the following test is not tested in iphone, android and wince,
+    // you may run into issue
+
+    bool rxAGCTemp(false);
+    AgcModes rxAGCModeTemp(kAgcAdaptiveAnalog);
+    // Store current state
+    TEST_MUSTPASS(apm->GetAgcStatus(rxAGCTemp, rxAGCModeTemp));
+    TEST_LOG("Turn off near-end AGC\n");
+    TEST_MUSTPASS(apm->SetAgcStatus(false));
+
+    TEST_LOG("rxAGC Must be OFF by default\n");
+    test = true;
+    AgcModes rxAGCMode = kAgcAdaptiveDigital;
+    TEST_MUSTPASS(apm->GetRxAgcStatus(0, test, agcMode));
+    TEST_MUSTPASS(test);
+    TEST_MUSTPASS(kAgcAdaptiveDigital != rxAGCMode);
+
+    TEST_LOG("Turn off rxAGC\n");
+    // must set value in first call!
+    TEST_MUSTPASS(apm->SetRxAgcStatus(0, false, kAgcDefault));
+    TEST_LOG("Should be OFF now\n");
+    TEST_MUSTPASS(apm->GetRxAgcStatus(0, test, agcMode));
+    TEST_MUSTPASS(test);
+    TEST_MUSTPASS(kAgcAdaptiveDigital != rxAGCMode);
+
+    TEST_LOG("Turn ON AGC\n");
+    TEST_MUSTPASS(apm->SetRxAgcStatus(0, true));
+    TEST_LOG("Should be ON now\n");
+    TEST_MUSTPASS(apm->GetRxAgcStatus(0, test, agcMode));
+    TEST_MUSTPASS(!test);
+    TEST_MUSTPASS(kAgcAdaptiveDigital != agcMode);
+
+    TEST_LOG("Testing Type settings\n");
+    // Should fail
+    TEST_MUSTPASS(!apm->SetRxAgcStatus(0, true, kAgcAdaptiveAnalog));
+    TEST_MUSTPASS(apm->SetRxAgcStatus(0, true, kAgcFixedDigital));
+    TEST_MUSTPASS(apm->GetRxAgcStatus(0, test, agcMode));
+    TEST_MUSTPASS(kAgcFixedDigital != agcMode);
+    TEST_MUSTPASS(apm->SetRxAgcStatus(0, true, kAgcAdaptiveDigital));
+    TEST_MUSTPASS(apm->GetRxAgcStatus(0, test, agcMode));
+    TEST_MUSTPASS(kAgcAdaptiveDigital != agcMode);
+
+    TEST_LOG("Turn off AGC\n");
+    TEST_MUSTPASS(apm->SetRxAgcStatus(0, false));
+    TEST_LOG("Should be OFF now\n");
+    TEST_MUSTPASS(apm->GetRxAgcStatus(0, test, agcMode));
+    TEST_MUSTPASS(test);
+    TEST_MUSTPASS(kAgcAdaptiveDigital != agcMode);
+
+    // recover the old AGC mode
+    TEST_MUSTPASS(apm->SetAgcStatus(rxAGCTemp, rxAGCModeTemp));
+
+#else
+    TEST_LOG("Skipping AGC tests - WEBRTC_VOICE_ENGINE_AGC not defined \n");
+#endif  // #ifdef WEBRTC_VOICE_ENGINE_AGC
+
+#ifdef WEBRTC_VOICE_ENGINE_ECHO
+    TEST_LOG("EC calls\n");
+    TEST_LOG("Must be OFF by default\n");
+#if (defined(MAC_IPHONE) || defined(ANDROID))
+    const EcModes ecModeDefault = kEcAecm;
+#else
+    const EcModes ecModeDefault = kEcAec;
+#endif
+    test = true;
+    EcModes ecMode = kEcAec;
+    AecmModes aecmMode = kAecmSpeakerphone;
+    bool enabledCNG(false);
+    TEST_MUSTPASS(apm->GetEcStatus(test, ecMode));
+    TEST_MUSTPASS(test);
+    TEST_MUSTPASS(ecModeDefault != ecMode);
+    TEST_MUSTPASS(apm->GetAecmMode(aecmMode, enabledCNG));
+    TEST_LOG("default AECM: mode=%d CNG: mode=%d\n",aecmMode, enabledCNG);
+    TEST_MUSTPASS(kAecmSpeakerphone != aecmMode);
+    TEST_MUSTPASS(enabledCNG != true);
+    TEST_MUSTPASS(apm->SetAecmMode(kAecmQuietEarpieceOrHeadset, false));
+    TEST_MUSTPASS(apm->GetAecmMode(aecmMode, enabledCNG));
+    TEST_LOG("change AECM to mode=%d CNG to false\n",aecmMode);
+    TEST_MUSTPASS(aecmMode != kAecmQuietEarpieceOrHeadset);
+    TEST_MUSTPASS(enabledCNG != false);
+
+    TEST_LOG("Turn ON EC\n");
+    TEST_MUSTPASS(apm->SetEcStatus(true, ecModeDefault));
+    TEST_LOG("Should be ON now\n");
+    TEST_MUSTPASS(apm->GetEcStatus(test, ecMode));
+    TEST_MUSTPASS(!test);
+    TEST_MUSTPASS(ecModeDefault != ecMode);
+
+#if (!defined(MAC_IPHONE) && !defined(ANDROID))
+    TEST_MUSTPASS(apm->SetEcStatus(true, kEcAec));
+    TEST_MUSTPASS(apm->GetEcStatus(test, ecMode));
+    TEST_MUSTPASS(kEcAec != ecMode);
+
+    TEST_MUSTPASS(apm->SetEcStatus(true, kEcConference));
+    TEST_MUSTPASS(apm->GetEcStatus(test, ecMode));
+    TEST_MUSTPASS(kEcAec != ecMode);
+
+
+    // the samplefreq for AudioProcessing is 32k, so it wont work to
+    // activate AECM
+    TEST_MUSTPASS(apm->SetEcStatus(true, kEcAecm));
+    TEST_MUSTPASS(apm->GetEcStatus(test, ecMode));
+    TEST_MUSTPASS(kEcAecm != ecMode);
+#endif
+
+    // set kEcAecm mode
+    TEST_LOG("Testing AECM Mode settings\n");
+    TEST_MUSTPASS(apm->SetEcStatus(true, kEcAecm));
+    TEST_MUSTPASS(apm->GetEcStatus(test, ecMode));
+    TEST_LOG("EC: enabled=%d, ECmode=%d\n", test, ecMode);
+    TEST_MUSTPASS(test != true);
+    TEST_MUSTPASS(ecMode != kEcAecm);
+
+    // AECM mode, get and set 
+    TEST_MUSTPASS(apm->GetAecmMode(aecmMode, enabledCNG));
+    TEST_MUSTPASS(aecmMode != kAecmQuietEarpieceOrHeadset);
+    TEST_MUSTPASS(enabledCNG != false);
+    TEST_MUSTPASS(apm->SetAecmMode(kAecmEarpiece, true));
+    TEST_MUSTPASS(apm->GetAecmMode(aecmMode, enabledCNG));
+    TEST_LOG("AECM: mode=%d CNG: mode=%d\n",aecmMode, enabledCNG);
+    TEST_MUSTPASS(aecmMode != kAecmEarpiece);
+    TEST_MUSTPASS(enabledCNG != true);
+    TEST_MUSTPASS(apm->SetAecmMode(kAecmEarpiece, false));
+    TEST_MUSTPASS(apm->GetAecmMode(aecmMode, enabledCNG));
+    TEST_LOG("AECM: mode=%d CNG: mode=%d\n",aecmMode, enabledCNG);
+    TEST_MUSTPASS(aecmMode != kAecmEarpiece);
+    TEST_MUSTPASS(enabledCNG != false);
+    TEST_MUSTPASS(apm->SetAecmMode(kAecmLoudEarpiece, true));
+    TEST_MUSTPASS(apm->GetAecmMode(aecmMode, enabledCNG));
+    TEST_LOG("AECM: mode=%d CNG: mode=%d\n",aecmMode, enabledCNG);
+    TEST_MUSTPASS(aecmMode != kAecmLoudEarpiece);
+    TEST_MUSTPASS(enabledCNG != true);
+    TEST_MUSTPASS(apm->SetAecmMode(kAecmSpeakerphone, false));
+    TEST_MUSTPASS(apm->GetAecmMode(aecmMode, enabledCNG));
+    TEST_LOG("AECM: mode=%d CNG: mode=%d\n",aecmMode, enabledCNG);
+    TEST_MUSTPASS(aecmMode != kAecmSpeakerphone);
+    TEST_MUSTPASS(enabledCNG != false);
+    TEST_MUSTPASS(apm->SetAecmMode(kAecmLoudSpeakerphone, true));
+    TEST_MUSTPASS(apm->GetAecmMode(aecmMode, enabledCNG));
+    TEST_LOG("AECM: mode=%d CNG: mode=%d\n",aecmMode, enabledCNG);
+    TEST_MUSTPASS(aecmMode != kAecmLoudSpeakerphone);
+    TEST_MUSTPASS(enabledCNG != true);
+
+    TEST_LOG("Turn OFF AEC\n");
+    TEST_MUSTPASS(apm->SetEcStatus(false));
+    TEST_LOG("Should be OFF now\n");
+    TEST_MUSTPASS(apm->GetEcStatus(test, ecMode));
+    TEST_MUSTPASS(test);
+#else
+    TEST_LOG("Skipping echo cancellation tests -"
+        " WEBRTC_VOICE_ENGINE_ECHO not defined \n");
+#endif  // #ifdef WEBRTC_VOICE_ENGINE_ECHO
+
+#ifdef WEBRTC_VOICE_ENGINE_NR
+    TEST_LOG("NS calls\n");
+    TEST_LOG("Must be OFF by default\n");
+
+    NsModes nsModeDefault = kNsModerateSuppression;
+
+    test = true;
+    NsModes nsMode = kNsVeryHighSuppression;
+    TEST_MUSTPASS(apm->GetNsStatus(test, nsMode));
+    TEST_MUSTPASS(test);
+    TEST_MUSTPASS(nsModeDefault != nsMode);
+
+    TEST_LOG("Turn ON NS\n");
+    TEST_MUSTPASS(apm->SetNsStatus(true));
+    TEST_LOG("Should be ON now\n");
+    TEST_MUSTPASS(apm->GetNsStatus(test, nsMode));
+    TEST_MUSTPASS(!test);
+    TEST_MUSTPASS(nsModeDefault != nsMode);
+
+    TEST_LOG("Testing Mode settings\n");
+    TEST_MUSTPASS(apm->SetNsStatus(true, kNsLowSuppression));
+    TEST_MUSTPASS(apm->GetNsStatus(test, nsMode));
+    TEST_MUSTPASS(kNsLowSuppression != nsMode);
+    TEST_MUSTPASS(apm->SetNsStatus(true, kNsModerateSuppression));
+    TEST_MUSTPASS(apm->GetNsStatus(test, nsMode));
+    TEST_MUSTPASS(kNsModerateSuppression != nsMode);
+    TEST_MUSTPASS(apm->SetNsStatus(true, kNsHighSuppression));
+    TEST_MUSTPASS(apm->GetNsStatus(test, nsMode));
+    TEST_MUSTPASS(kNsHighSuppression != nsMode);
+    TEST_MUSTPASS(apm->SetNsStatus(true, kNsVeryHighSuppression));
+    TEST_MUSTPASS(apm->GetNsStatus(test, nsMode));
+    TEST_MUSTPASS(kNsVeryHighSuppression != nsMode);
+    TEST_MUSTPASS(apm->SetNsStatus(true, kNsConference));
+    TEST_MUSTPASS(apm->GetNsStatus(test, nsMode));
+    TEST_MUSTPASS(kNsHighSuppression != nsMode);
+    TEST_MUSTPASS(apm->SetNsStatus(true, kNsDefault));
+    TEST_MUSTPASS(apm->GetNsStatus(test, nsMode));
+    TEST_MUSTPASS(nsModeDefault != nsMode);
+
+    TEST_LOG("Turn OFF NS\n");
+    TEST_MUSTPASS(apm->SetNsStatus(false));
+    TEST_LOG("Should be OFF now\n");
+    TEST_MUSTPASS(apm->GetNsStatus(test, nsMode));
+    TEST_MUSTPASS(test);
+
+
+    TEST_LOG("rxNS calls\n");
+    TEST_LOG("rxNS Must be OFF by default\n");
+
+    TEST_MUSTPASS(apm->GetRxNsStatus(0, test, nsMode));
+    TEST_MUSTPASS(test);
+    TEST_MUSTPASS(nsModeDefault != nsMode);
+
+    TEST_LOG("Turn ON rxNS\n");
+    TEST_MUSTPASS(apm->SetRxNsStatus(0, true));
+    TEST_LOG("Should be ON now\n");
+    TEST_MUSTPASS(apm->GetRxNsStatus(0, test, nsMode));
+    TEST_MUSTPASS(!test);
+    TEST_MUSTPASS(nsModeDefault != nsMode);
+
+    TEST_LOG("Testing Mode settings\n");
+    TEST_MUSTPASS(apm->SetRxNsStatus(0, true, kNsLowSuppression));
+    TEST_MUSTPASS(apm->GetRxNsStatus(0, test, nsMode));
+    TEST_MUSTPASS(kNsLowSuppression != nsMode);
+    TEST_MUSTPASS(apm->SetRxNsStatus(0, true, kNsModerateSuppression));
+    TEST_MUSTPASS(apm->GetRxNsStatus(0, test, nsMode));
+    TEST_MUSTPASS(kNsModerateSuppression != nsMode);
+    TEST_MUSTPASS(apm->SetRxNsStatus(0, true, kNsHighSuppression));
+    TEST_MUSTPASS(apm->GetRxNsStatus(0, test, nsMode));
+    TEST_MUSTPASS(kNsHighSuppression != nsMode);
+    TEST_MUSTPASS(apm->SetRxNsStatus(0, true, kNsVeryHighSuppression));
+    TEST_MUSTPASS(apm->GetRxNsStatus(0, test, nsMode));
+    TEST_MUSTPASS(kNsVeryHighSuppression != nsMode);
+    TEST_MUSTPASS(apm->SetRxNsStatus(0, true, kNsConference));
+    TEST_MUSTPASS(apm->GetRxNsStatus(0, test, nsMode));
+    TEST_MUSTPASS(kNsHighSuppression != nsMode);
+    TEST_MUSTPASS(apm->SetRxNsStatus(0, true, kNsDefault));
+    TEST_MUSTPASS(apm->GetRxNsStatus(0, test, nsMode));
+    TEST_MUSTPASS(nsModeDefault != nsMode);
+
+    TEST_LOG("Turn OFF NS\n");
+    TEST_MUSTPASS(apm->SetRxNsStatus(0, false));
+    TEST_LOG("Should be OFF now\n");
+    TEST_MUSTPASS(apm->GetRxNsStatus(0, test, nsMode));
+    TEST_MUSTPASS(test);
+
+#else
+    TEST_LOG("Skipping NS tests - WEBRTC_VOICE_ENGINE_NR not defined \n");
+#endif  // #ifdef WEBRTC_VOICE_ENGINE_NR
+
+    // TODO(xians), enable the metrics test when APM is ready
+    /*
+#if (!defined(MAC_IPHONE) && !defined(ANDROID) && defined(WEBRTC_VOICE_ENGINE_NR))
+    TEST_LOG("Speech, Noise and Echo Metric calls\n");
+    TEST_MUSTPASS(apm->GetMetricsStatus(enabled));   // check default
+    TEST_MUSTPASS(enabled != false);
+    TEST_MUSTPASS(apm->SetMetricsStatus(true));      // enable metrics
+#ifdef WEBRTC_VOICE_ENGINE_ECHO
+    // must enable AEC to get valid echo metrics
+    TEST_MUSTPASS(apm->SetEcStatus(true, kEcAec));
+#endif
+    TEST_MUSTPASS(apm->GetMetricsStatus(enabled));
+    TEST_MUSTPASS(enabled != true);
+
+    TEST_LOG("Speak into microphone and check metrics for 10 seconds...\n");
+    int speech_tx, speech_rx;
+    int noise_tx, noise_rx;
+#ifdef WEBRTC_VOICE_ENGINE_ECHO
+    int ERLE, ERL, RERL, A_NLP;
+#endif
+    for (int t = 0; t < 5; t++)
+    {
+        SLEEP(2000);
+        TEST_MUSTPASS(apm->GetSpeechMetrics(speech_tx, speech_rx));
+        TEST_LOG("    Speech: Tx=%5d, Rx=%5d [dBm0]\n", speech_tx, speech_rx);
+        TEST_MUSTPASS(apm->GetNoiseMetrics(noise_tx, noise_rx));
+        TEST_LOG("    Noise : Tx=%5d, Rx=%5d [dBm0]\n", noise_tx, noise_rx);
+#ifdef WEBRTC_VOICE_ENGINE_ECHO
+        TEST_MUSTPASS(apm->GetEchoMetrics(ERL, ERLE, RERL, A_NLP));
+        TEST_LOG("    Echo  : ERL=%5d, ERLE=%5d, RERL=%5d, A_NLP=%5d [dB]\n",
+                 ERL, ERLE, RERL, A_NLP);
+#endif
+    }
+    TEST_MUSTPASS(apm->SetMetricsStatus(false));     // disable metrics
+#else
+    TEST_LOG("Skipping apm metrics tests - MAC_IPHONE/ANDROID defined \n");
+#endif // #if (!defined(MAC_IPHONE) && !d...
+*/
+    // VAD/DTX indication
+    TEST_LOG("Get voice activity indication \n");
+    if (codec)
+    {
+        bool v = true, dummy2;
+        VadModes dummy1;
+        TEST_MUSTPASS(codec->GetVADStatus(0, v, dummy1, dummy2));
+        TEST_MUSTPASS(v); // Make sure VAD is disabled
+    }
+    TEST_MUSTPASS(1 != apm->VoiceActivityIndicator(0));
+    if (codec && volume)
+    {
+        TEST_LOG ("RX VAD detections may vary depending on current signal"
+            " and mic input \n");
+#if !defined(ANDROID) && !defined(MAC_IPHONE)
+        RxCallback rxc;
+        TEST_MUSTPASS(apm->RegisterRxVadObserver(0, rxc));
+#endif
+        TEST_MUSTPASS(codec->SetVADStatus(0, true));
+        TEST_MUSTPASS(volume->SetInputMute(0, true));
+        if (file)
+        {
+            TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0));
+        }
+        SLEEP(500); // After sleeping we should have detected silence
+        TEST_MUSTPASS(0 != apm->VoiceActivityIndicator(0));
+#if !defined(ANDROID) && !defined(MAC_IPHONE)
+        TEST_MUSTPASS(0 != rxc._vadDecision);
+#endif
+        if (file)
+        {
+            TEST_LOG("Start playing a file as microphone again \n");
+            TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0,
+                                                             micFile,
+                                                             true,
+                                                             true));
+        }
+        else
+        {
+            TEST_LOG("==> Make sure you talk into the microphone \n");
+        }
+        TEST_MUSTPASS(codec->SetVADStatus(0, false));
+        TEST_MUSTPASS(volume->SetInputMute(0, false));
+        SLEEP(500); // Sleep time selected by looking in mic play file, after
+                    // sleep we should have detected voice
+        TEST_MUSTPASS(1 != apm->VoiceActivityIndicator(0));
+#if !defined(ANDROID) && !defined(MAC_IPHONE)
+        TEST_MUSTPASS(1 != rxc._vadDecision);
+        TEST_LOG("Disabling RX VAD detection, make sure you see no "
+            "detections\n");
+        TEST_MUSTPASS(apm->DeRegisterRxVadObserver(0));
+        SLEEP(2000);
+#endif
+    }
+    else
+    {
+        TEST_LOG("Skipping voice activity indicator tests - codec and"
+            " volume APIs not available \n");
+    }
+
+#else
+    TEST_LOG("\n\n+++ AudioProcessing tests NOT ENABLED +++\n");
+#endif  // #ifdef _TEST_AUDIO_PROCESSING_
+
+    ////////
+    // File
+
+#ifdef _TEST_FILE_
+    TEST_LOG("\n\n+++ File tests +++\n\n");
+
+    // test of UTF8 using swedish letters åäö
+
+    char fileName[64];
+    fileName[0] = (char)0xc3;
+    fileName[1] = (char)0xa5;
+    fileName[2] = (char)0xc3;
+    fileName[3] = (char)0xa4;
+    fileName[4] = (char)0xc3;
+    fileName[5] = (char)0xb6;
+    fileName[6] = '.';
+    fileName[7] = 'p';
+    fileName[8] = 'c';
+    fileName[9] = 'm';
+    fileName[10] = 0;
+
+    // test of UTF8 using japanese Hirigana "ぁあ"letter small A and letter A
+/*    fileName[0] = (char)0xe3;
+    fileName[1] = (char)0x81;
+    fileName[2] = (char)0x81;
+    fileName[3] = (char)0xe3;
+    fileName[4] = (char)0x81;
+    fileName[5] = (char)0x82;
+    fileName[6] = '.';
+    fileName[7] = 'p';
+    fileName[8] = 'c';
+    fileName[9] = 'm';
+    fileName[10] = 0;
+*/
+
+    // Part of the cyrillic alpabet
+    // Ф    Х   Ѡ   Ц   Ч   Ш   Щ   Ъ   ЪІ  Ь   Ѣ
+
+    const char* recName = GetFilename(fileName);
+    // Generated with   
+#if _WIN32
+/*   char tempFileNameUTF8[200];
+     int err = WideCharToMultiByte(CP_UTF8,0,L"åäö", -1, tempFileNameUTF8,
+        sizeof(tempFileNameUTF8), NULL, NULL);
+*/
+#endif
+
+    //Stop the current file
+    TEST_LOG("Stop playing file as microphone \n");
+    TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0));
+    TEST_LOG("==> Talk into the microphone \n");
+    SLEEP(1000);
+    TEST_LOG("Record mic for 3 seconds in PCM format\n");
+    TEST_MUSTPASS(file->StartRecordingMicrophone(recName));
+    SLEEP(3000);
+    TEST_MUSTPASS(file->StopRecordingMicrophone());
+    TEST_LOG("Play out the recorded file...\n");
+    TEST_MUSTPASS(file->StartPlayingFileLocally(0, recName));
+    SLEEP(2000);
+#ifndef _INSTRUMENTATION_TESTING_
+    TEST_LOG("After 2 seconds we should still be playing\n");
+    TEST_MUSTPASS(!file->IsPlayingFileLocally(0));
+#endif
+    TEST_LOG("Set scaling\n"); 
+    TEST_MUSTPASS(file->ScaleLocalFilePlayout(0,(float)0.11));
+    SLEEP(1100);
+    TEST_LOG("After 3.1 seconds we should NOT be playing\n");
+    TEST_MUSTPASS(file->IsPlayingFileLocally(0));
+
+    CodecInst codec;
+    TEST_LOG("Record speaker for 3 seconds to wav file\n");
+    memset(&codec, 0, sizeof(CodecInst));
+    strcpy(codec.plname,"pcmu");
+    codec.plfreq=8000;
+    codec.channels=1;
+    codec.pacsize=160;
+    codec.pltype=0;
+    codec.rate=64000;
+    TEST_MUSTPASS(file->StartRecordingPlayout(0,recName,&codec));
+    SLEEP(3000);
+    TEST_MUSTPASS(file->StopRecordingPlayout(0));
+
+    TEST_LOG("Play file as mic, looping for 3 seconds\n");
+    TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0,
+                                                     recName,
+                                                     1,
+                                                     0,
+                                                     kFileFormatWavFile));
+    SLEEP(3000);
+    TEST_LOG("After 3 seconds we should still be playing\n");
+    TEST_MUSTPASS(!file->IsPlayingFileAsMicrophone(0));
+    SLEEP(600);
+    TEST_LOG("After 3.6 seconds we should still be playing\n");
+    TEST_MUSTPASS(!file->IsPlayingFileAsMicrophone(0));
+
+    TEST_LOG("Set scaling\n");
+    TEST_MUSTPASS(file->ScaleFileAsMicrophonePlayout(0,(float)0.11));
+    SLEEP(200);
+
+    TEST_LOG("Stop playing file as microphone\n");
+    TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0));
+
+    TEST_LOG("==> Start playing a file as microphone again \n");
+    TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0, micFile, true , true));
+#else
+    TEST_LOG("\n\n+++ File tests NOT ENABLED +++\n");
+#endif  // #ifdef _TEST_FILE_
+
+#ifdef _XTENDED_TEST_FILE_
+    // Create unique trace files for this test
+    TEST_MUSTPASS(base->SetTraceFileName(GetFilename("VoEFile_trace.txt")));
+    TEST_MUSTPASS(base->SetDebugTraceFileName(GetFilename(
+        "VoEFile_trace_debug.txt")));
+    // turn off default AGC during these tests
+    TEST_MUSTPASS(apm->SetAgcStatus(false));
+    int res = xtend.TestFile(file);
+#ifndef MAC_IPHONE
+    TEST_MUSTPASS(apm->SetAgcStatus(true)); // restore AGC state
+#endif
+    TEST_MUSTPASS(base->Terminate());
+    return res;
+#endif
+
+    ////////////
+    // Network
+
+#ifdef _TEST_NETWORK_
+    TEST_LOG("\n\n+++ Network tests +++\n\n");
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+    int sourceRtpPort = 1234;
+    int sourceRtcpPort = 1235;
+
+    int filterPort = -1;
+    int filterPortRTCP = -1;
+    char sourceIp[32] = "127.0.0.1";
+    char filterIp[64] = {0};
+
+    SLEEP(200); // Make sure we have received packets
+    
+    TEST_MUSTPASS(netw->GetSourceInfo(0,
+                                      sourceRtpPort,
+                                      sourceRtcpPort,
+                                      sourceIp));
+
+    TEST_LOG("sourceIp = %s, sourceRtpPort = %d, sourceRtcpPort = %d\n",
+             sourceIp, sourceRtpPort, sourceRtcpPort);
+    TEST_MUSTPASS(8000 != sourceRtpPort);
+    TEST_MUSTPASS(8001 != sourceRtcpPort);
+
+    TEST_MUSTPASS(netw->GetSourceFilter(0,
+                                        filterPort,
+                                        filterPortRTCP,
+                                        filterIp));
+    TEST_MUSTPASS(0 != filterPort);
+    TEST_MUSTPASS(0 != filterPortRTCP);
+    TEST_MUSTPASS(_stricmp(filterIp, ""));
+
+    TEST_LOG("Set filter port to %d => should hear audio\n", sourceRtpPort);
+    TEST_MUSTPASS(netw->SetSourceFilter(0,
+                                        sourceRtpPort,
+                                        sourceRtcpPort,
+                                        "0.0.0.0"));
+    TEST_MUSTPASS(netw->GetSourceFilter(0,
+                                        filterPort,
+                                        filterPortRTCP,
+                                        filterIp));
+    TEST_MUSTPASS(sourceRtpPort != filterPort);
+    TEST_MUSTPASS(sourceRtcpPort != filterPortRTCP);
+    TEST_MUSTPASS(_stricmp(filterIp, "0.0.0.0"));
+    SLEEP(1000);
+    TEST_LOG("Set filter port to %d => should *not* hear audio\n",
+             sourceRtpPort+10);
+    TEST_MUSTPASS(netw->SetSourceFilter(0, sourceRtpPort+10));
+    TEST_MUSTPASS(netw->GetSourceFilter(0,
+                                        filterPort,
+                                        filterPortRTCP,
+                                        filterIp));
+    TEST_MUSTPASS(sourceRtpPort+10 != filterPort);
+    SLEEP(1000);
+    TEST_LOG("Disable port filter => should hear audio again\n");
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0));
+    SLEEP(1000);
+
+    if(rtp_rtcp)
+    {
+        TEST_MUSTPASS(rtp_rtcp->SetRTCP_CNAME(0, "Tomas"));
+    }
+  
+    TEST_LOG("Set filter IP to %s => should hear audio\n", sourceIp);
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0, sourceRtcpPort+10, sourceIp));
+    TEST_MUSTPASS(netw->GetSourceFilter(0,
+                                        filterPort,
+                                        filterPortRTCP,
+                                        filterIp));
+    TEST_MUSTPASS(_stricmp(filterIp, sourceIp));
+    SLEEP(1000);
+    TEST_LOG("Set filter IP to 10.10.10.10 => should *not* hear audio\n");
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0, sourceRtcpPort+10,
+                                        "10.10.10.10"));
+    TEST_MUSTPASS(netw->GetSourceFilter(0, filterPort, filterPort, filterIp));
+    TEST_MUSTPASS(_stricmp(filterIp, "10.10.10.10"));
+    SLEEP(1000);
+    TEST_LOG("Disable IP filter => should hear audio again\n");
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0, sourceRtcpPort+10, "0.0.0.0"));
+    SLEEP(1000);
+    TEST_LOG("Set filter IP to 10.10.10.10 => should *not* hear audio\n");
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0, sourceRtcpPort+10,
+                                        "10.10.10.10"));
+    SLEEP(1000);
+
+    if(rtp_rtcp)
+    {
+        char tmpStr[64];
+        SLEEP(2000);
+        TEST_LOG("Checking RTCP port filter with CNAME...\n");
+        TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCP_CNAME(0, tmpStr));
+        TEST_MUSTPASS(!_stricmp("Tomas", tmpStr));
+        TEST_MUSTPASS(rtp_rtcp->SetRTCP_CNAME(0, "Niklas"));
+    }
+    else
+    {
+        TEST_LOG("Skipping RTCP port filter test since there is no RTP/RTCP "
+            "interface!\n");
+    }
+
+    TEST_LOG("Disable IP filter => should hear audio again\n");
+    TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, NULL));
+    TEST_MUSTPASS(netw->GetSourceFilter(0, filterPort, filterPortRTCP,
+                                        filterIp));
+    TEST_MUSTPASS(_stricmp(filterIp, ""));
+    SLEEP(1000);
+
+    TEST_LOG("Wait 2 seconds for packet timeout...\n");
+    TEST_LOG("You should see runtime error %d\n", VE_RECEIVE_PACKET_TIMEOUT);
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(netw->SetPacketTimeoutNotification(0, true, 2));
+    SLEEP(3000);
+
+ #if !defined(_INSTRUMENTATION_TESTING_)
+    TEST_LOG("obs.code is %d\n", obs.code);
+    TEST_MUSTPASS(obs.code != VE_RECEIVE_PACKET_TIMEOUT);
+ #endif
+    obs.code=-1;
+    TEST_MUSTPASS(base->StartSend(0));
+    if (file)
+    {
+        TEST_LOG("Start playing a file as microphone again \n");
+        TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0,
+                                                         micFile,
+                                                         true,
+                                                         true));
+    }
+    TEST_LOG("You should see runtime error %d\n", VE_PACKET_RECEIPT_RESTARTED);
+    SLEEP(1000);
+ #if !defined(_INSTRUMENTATION_TESTING_)
+    TEST_MUSTPASS(obs.code != VE_PACKET_RECEIPT_RESTARTED);
+ #endif
+
+ #if !defined(_INSTRUMENTATION_TESTING_)
+    TEST_LOG("Disabling observer, no runtime error should be seen...\n");
+    TEST_MUSTPASS(base->DeRegisterVoiceEngineObserver());
+    obs.code = -1;
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(netw->SetPacketTimeoutNotification(0, true, 2));
+    SLEEP(2500);
+    TEST_MUSTPASS(obs.code != -1);
+    // disable notifications to avoid additional 8082 callbacks
+    TEST_MUSTPASS(netw->SetPacketTimeoutNotification(0, false, 2));
+    TEST_MUSTPASS(base->StartSend(0));
+    if (file)
+    {
+        TEST_LOG("Start playing a file as microphone again \n");
+        TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0,
+                                                         micFile,
+                                                         true,
+                                                         true));
+    }
+    SLEEP(1000);
+///    TEST_MUSTPASS(obs.code != -1);
+    TEST_LOG("Enabling observer again\n");
+    TEST_MUSTPASS(base->RegisterVoiceEngineObserver(obs));
+ #endif
+
+    TEST_LOG("Enable dead-or-alive callbacks for 4 seconds (dT=1sec)...\n");
+    TEST_LOG("You should see ALIVE messages\n");
+
+    MyDeadOrAlive obs;
+    TEST_MUSTPASS(netw->RegisterDeadOrAliveObserver(0, obs));
+    TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, true, 1));
+    SLEEP(4000);
+
+    // stop sending and flush dead-or-alive states
+    if (rtp_rtcp)
+    {
+        TEST_MUSTPASS(rtp_rtcp->SetRTCPStatus(0, false));
+    }
+    TEST_MUSTPASS(base->StopSend(0));
+    SLEEP(500);
+
+    TEST_LOG("Disable sending for 4 seconds (dT=1sec)...\n");
+    TEST_LOG("You should see DEAD messages (one ALIVE message might"
+        " sneak in if you are unlucky)\n");
+    SLEEP(4000);
+    TEST_LOG("Disable dead-or-alive callbacks.\n");
+    TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, false));
+
+    TEST_LOG("Enabling external transport\n");
+    TEST_MUSTPASS(base->StopReceive(0));
+
+    // recreate the channel to ensure that we can switch from transport
+    // to external transport
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    TEST_MUSTPASS(base->CreateChannel());
+ 
+    TEST_MUSTPASS(netw->RegisterExternalTransport(0, ch0transport));
+
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    TEST_MUSTPASS(base->StartPlayout(0));
+    if (file)
+    {
+        TEST_LOG("Start playing a file as microphone again using"
+            " external transport\n");
+        TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0,
+                                                         micFile,
+                                                         true,
+                                                         true));
+    }
+    SLEEP(4000);
+
+    TEST_LOG("Disabling external transport\n");
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+
+    TEST_MUSTPASS(netw->DeRegisterExternalTransport(0));
+
+    TEST_MUSTPASS(base->SetSendDestination(0, 8000, "127.0.0.1"));
+    TEST_MUSTPASS(base->SetLocalReceiver(0, 8000));
+
+    TEST_MUSTPASS(base->StartReceive(0));
+    TEST_MUSTPASS(base->StartSend(0));
+    TEST_MUSTPASS(base->StartPlayout(0));
+    if (file)
+    {
+        TEST_LOG("Start playing a file as microphone again using transport\n");
+        TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0, micFile, true,
+                                                         true));
+    }
+    SLEEP(2000);
+#else
+    TEST_LOG("Skipping network tests - "
+        "WEBRTC_EXTERNAL_TRANSPORT is defined \n");
+#endif // #ifndef WEBRTC_EXTERNAL_TRANSPORT
+#else
+    TEST_LOG("\n\n+++ Network tests NOT ENABLED +++\n");
+#endif  // #ifdef _TEST_NETWORK_
+
+    ///////////////
+    // CallReport
+
+#ifdef _TEST_CALL_REPORT_
+    TEST_LOG("\n\n+++ CallReport tests +++\n\n");
+#if (defined(WEBRTC_VOICE_ENGINE_ECHO) && defined(WEBRTC_VOICE_ENGINE_NR))
+    // TODO(xians), enale the tests when APM is ready
+    /*
+    TEST(ResetCallReportStatistics);ANL();
+    TEST_MUSTPASS(!report->ResetCallReportStatistics(-2));
+    TEST_MUSTPASS(!report->ResetCallReportStatistics(1));
+    TEST_MUSTPASS(report->ResetCallReportStatistics(0));
+    TEST_MUSTPASS(report->ResetCallReportStatistics(-1));
+
+    bool onOff;
+    LevelStatistics stats;
+    TEST_MUSTPASS(apm->GetMetricsStatus(onOff));
+    TEST_MUSTPASS(onOff != false);
+    // All values should be -100 dBm0 when metrics are disabled
+    TEST(GetSpeechAndNoiseSummary);ANL();
+    TEST_MUSTPASS(report->GetSpeechAndNoiseSummary(stats));
+    TEST_MUSTPASS(stats.noise_rx.min != -100);
+    TEST_MUSTPASS(stats.noise_rx.max != -100);
+    TEST_MUSTPASS(stats.noise_rx.average != -100);
+    TEST_MUSTPASS(stats.noise_tx.min != -100);
+    TEST_MUSTPASS(stats.noise_tx.max != -100);
+    TEST_MUSTPASS(stats.noise_tx.average != -100);
+    TEST_MUSTPASS(stats.speech_rx.min != -100);
+    TEST_MUSTPASS(stats.speech_rx.max != -100);
+    TEST_MUSTPASS(stats.speech_rx.average != -100);
+    TEST_MUSTPASS(stats.speech_tx.min != -100);
+    TEST_MUSTPASS(stats.speech_tx.max != -100);
+    TEST_MUSTPASS(stats.speech_tx.average != -100);
+    TEST_MUSTPASS(apm->SetMetricsStatus(true));
+    SLEEP(3000);
+    // All values should *not* be -100 dBm0 when metrics are enabled
+    // (check Rx side only since user might be silent)
+    TEST_MUSTPASS(report->GetSpeechAndNoiseSummary(stats));
+    TEST_MUSTPASS(stats.noise_rx.min == -100);
+    TEST_MUSTPASS(stats.noise_rx.max == -100);
+    TEST_MUSTPASS(stats.noise_rx.average == -100);
+    TEST_MUSTPASS(stats.speech_rx.min == -100);
+    TEST_MUSTPASS(stats.speech_rx.max == -100);
+    TEST_MUSTPASS(stats.speech_rx.average == -100);
+
+    EchoStatistics echo;
+    TEST(GetEchoMetricSummary);ANL();
+    // all outputs will be -100 in loopback (skip further tests)
+    TEST_MUSTPASS(report->GetEchoMetricSummary(echo));
+
+    StatVal delays;
+    TEST(GetRoundTripTimeSummary);ANL();
+    rtp_rtcp->SetRTCPStatus(0, false);
+    // All values should be -1 since RTCP is off
+    TEST_MUSTPASS(report->GetRoundTripTimeSummary(0, delays));
+    TEST_MUSTPASS(delays.min != -1);
+    TEST_MUSTPASS(delays.max != -1);
+    TEST_MUSTPASS(delays.max != -1);
+    rtp_rtcp->SetRTCPStatus(0, true);
+    SLEEP(5000); // gives time for RTCP
+    TEST_MUSTPASS(report->GetRoundTripTimeSummary(0, delays));
+    TEST_MUSTPASS(delays.min == -1);
+    TEST_MUSTPASS(delays.max == -1);
+    TEST_MUSTPASS(delays.max == -1);
+    rtp_rtcp->SetRTCPStatus(0, false);
+
+    int nDead;
+    int nAlive;
+    // -1 will be returned since dead-or-alive is not active
+    TEST(GetDeadOrAliveSummary);ANL();
+    TEST_MUSTPASS(report->GetDeadOrAliveSummary(0, nDead, nAlive) != -1);
+    // we don't need these callbacks any longer
+    TEST_MUSTPASS(netw->DeRegisterDeadOrAliveObserver(0));
+    TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, true, 1));
+    SLEEP(2000);
+    // All results should be >= 0 since dead-or-alive is active
+    TEST_MUSTPASS(report->GetDeadOrAliveSummary(0, nDead, nAlive));
+    TEST_MUSTPASS(nDead == -1);TEST_MUSTPASS(nAlive == -1)
+    TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, false));
+
+    TEST(WriteReportToFile);ANL();
+    TEST_MUSTPASS(!report->WriteReportToFile(NULL));
+    TEST_MUSTPASS(report->WriteReportToFile("call_report.txt"));
+    */
+#else
+    TEST_LOG("Skipping CallReport tests since both EC and NS are required\n");
+#endif
+#else
+    TEST_LOG("\n\n+++ CallReport tests NOT ENABLED +++\n");
+#endif // #ifdef _TEST_CALL_REPORT_
+
+    //////////////
+    // Video Sync
+
+#ifdef _TEST_VIDEO_SYNC_
+    TEST_LOG("\n\n+++ Video sync tests +++\n\n");
+
+    unsigned int val;
+    TEST_MUSTPASS(vsync->GetPlayoutTimestamp(0, val));
+    TEST_LOG("Playout timestamp = %lu\n", (long unsigned int)val);
+
+    TEST_LOG("Init timestamp and sequence number manually\n");
+    TEST_MUSTPASS(!vsync->SetInitTimestamp(0, 12345));
+    TEST_MUSTPASS(!vsync->SetInitSequenceNumber(0, 123));
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(vsync->SetInitTimestamp(0, 12345));
+    TEST_MUSTPASS(vsync->SetInitSequenceNumber(0, 123));
+    TEST_MUSTPASS(base->StartSend(0));
+    if (file)
+    {
+        TEST_LOG("Start playing a file as microphone again \n");
+        TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0,
+                                                         micFile,
+                                                         true,
+                                                         true));
+    }
+    SLEEP(3000);
+
+    TEST_LOG("Check delay estimates during 15 seconds, verify that "
+        "they stabilize during this time\n");
+    int valInt = -1;
+    for (int i = 0; i < 15; i++)
+    {
+        TEST_MUSTPASS(vsync->GetDelayEstimate(0, valInt));
+        TEST_LOG("Delay estimate = %d ms\n", valInt);
+#if defined(MAC_IPHONE)
+        TEST_MUSTPASS(valInt <= 30);    
+#else
+        TEST_MUSTPASS(valInt <= 45); // 45=20+25 => can't be this low
+#endif
+        SLEEP(1000);
+    }
+
+    TEST_LOG("Setting NetEQ min delay to 500 milliseconds and repeat "
+        "the test above\n");
+    TEST_MUSTPASS(vsync->SetMinimumPlayoutDelay(0, 500));
+    for (int i = 0; i < 15; i++)
+    {
+        TEST_MUSTPASS(vsync->GetDelayEstimate(0, valInt));
+        TEST_LOG("Delay estimate = %d ms\n", valInt);
+        TEST_MUSTPASS(valInt <= 45);
+        SLEEP(1000);
+    }
+
+    TEST_LOG("Setting NetEQ min delay to 0 milliseconds and repeat"
+        " the test above\n");
+    TEST_MUSTPASS(vsync->SetMinimumPlayoutDelay(0, 0));
+    for (int i = 0; i < 15; i++)
+    {
+        TEST_MUSTPASS(vsync->GetDelayEstimate(0, valInt));
+        TEST_LOG("Delay estimate = %d ms\n", valInt);
+        TEST_MUSTPASS(valInt <= 45);
+        SLEEP(1000);
+    }
+
+#if (defined (_WIN32) || (defined(WEBRTC_LINUX)) && !defined(ANDROID))
+    valInt = -1;
+    TEST_MUSTPASS(vsync->GetPlayoutBufferSize(valInt));
+    TEST_LOG("Soundcard buffer size = %d ms\n", valInt);
+#endif
+#else
+    TEST_LOG("\n\n+++ Video sync tests NOT ENABLED +++\n");
+#endif  // #ifdef _TEST_VIDEO_SYNC_
+
+    //////////////
+    // Encryption
+
+#ifdef _TEST_ENCRYPT_
+    TEST_LOG("\n\n+++ Encryption tests +++\n\n");
+
+#ifdef WEBRTC_SRTP
+    TEST_LOG("SRTP tests:\n");
+
+    unsigned char encrKey[30] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 0,
+        1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0};
+  
+    TEST_LOG("Enable SRTP encryption and decryption, you should still hear"
+        " the voice\n");
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0,
+                                          kCipherAes128CounterMode,
+                                          30,
+                                          kAuthHmacSha1,
+        20, 4, kEncryptionAndAuthentication, encrKey));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0,
+                                             kCipherAes128CounterMode,
+                                             30,
+                                             kAuthHmacSha1,
+        20, 4, kEncryptionAndAuthentication, encrKey));
+    SLEEP(2000);
+
+    TEST_LOG("Disabling decryption, you should hear nothing or garbage\n");     
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    SLEEP(2000);
+
+    TEST_LOG("Enable decryption again, you should hear the voice again\n");     
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0,
+                                             kCipherAes128CounterMode,
+                                             30,
+                                             kAuthHmacSha1,
+        20, 4, kEncryptionAndAuthentication, encrKey));
+    SLEEP(2000);
+
+    TEST_LOG("Disabling encryption and enabling decryption, you should"
+        " hear nothing\n");
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    SLEEP(2000);
+
+    TEST_LOG("Back to normal\n");
+    // both SRTP sides are now inactive
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    SLEEP(2000);
+
+    TEST_LOG("Enable SRTP and SRTCP encryption and decryption,"
+        " you should still hear the voice\n");
+    TEST_MUSTPASS(encrypt->EnableSRTPSend(0,
+                                          kCipherAes128CounterMode,
+                                          30,
+                                          kAuthHmacSha1,
+        20, 4, kEncryptionAndAuthentication, encrKey, true));
+    TEST_MUSTPASS(encrypt->EnableSRTPReceive(0,
+                                             kCipherAes128CounterMode,
+                                             30,
+                                             kAuthHmacSha1,
+        20, 4, kEncryptionAndAuthentication, encrKey, true));
+    SLEEP(2000);
+
+    TEST_LOG("Back to normal\n");
+    TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+    // both SRTP sides are now inactive
+    TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+    SLEEP(2000);
+
+#else
+    TEST_LOG("Skipping SRTP tests - WEBRTC_SRTP not defined \n");
+#endif // #ifdef WEBRTC_SRTP
+
+    TEST_LOG("\nExternal encryption tests:\n");
+    my_encryption * encObj = new my_encryption;
+    TEST_MUSTPASS(encrypt->RegisterExternalEncryption(0, *encObj));
+    TEST_LOG("Encryption enabled but you should still hear the voice\n");
+    SLEEP(2000);
+    TEST_LOG("Removing encryption object and deleting it\n");
+    TEST_MUSTPASS(encrypt->DeRegisterExternalEncryption(0));
+    delete encObj;
+    SLEEP(2000);
+#else
+    TEST_LOG("\n\n+++ Encryption tests NOT ENABLED +++\n");
+#endif // #ifdef _TEST_ENCRYPT_
+
+    //////////////////
+    // External media
+
+#ifdef _TEST_XMEDIA_
+    TEST_LOG("\n\n+++ External media tests +++\n\n");
+
+#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+    TEST_LOG("Stop playing file as microphone \n");
+    TEST_LOG("==> Talk into the microphone \n");
+    TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0));
+
+    TEST_LOG("Enabling external playout\n");
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(xmedia->SetExternalPlayoutStatus(true));
+    TEST_MUSTPASS(base->StartPlayout(0));
+    TEST_MUSTPASS(base->StartSend(0));
+
+    TEST_LOG("Writing 2 secs of play data to vector\n");
+    int getLen;
+    WebRtc_Word16 speechData[32000];
+    for (int i = 0; i < 200; i++)
+    {
+        TEST_MUSTPASS(xmedia->ExternalPlayoutGetData(speechData+i*160, 
+                                                     16000, 
+                                                     100, 
+                                                     getLen));
+        TEST_MUSTPASS(160 != getLen);
+        SLEEP(10);
+    }
+
+    TEST_LOG("Disabling external playout\n");
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(xmedia->SetExternalPlayoutStatus(false));
+    TEST_MUSTPASS(base->StartPlayout(0));
+
+    TEST_LOG("Enabling external recording\n");
+    TEST_MUSTPASS(xmedia->SetExternalRecordingStatus(true));
+    TEST_MUSTPASS(base->StartSend(0));
+
+    TEST_LOG("Inserting record data from vector\n");
+    for (int i = 0; i < 200; i++)
+    {
+        TEST_MUSTPASS(xmedia->ExternalRecordingInsertData(speechData+i*160, 
+                                                          160, 
+                                                          16000, 
+                                                          20));
+        SLEEP(10);
+    }
+
+    TEST_LOG("Disabling external recording\n");
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(xmedia->SetExternalRecordingStatus(false));
+    TEST_MUSTPASS(base->StartSend(0));
+
+    TEST_LOG("==> Start playing a file as microphone again \n");
+    TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0, micFile, true , true));
+#else
+    TEST_LOG("Skipping external rec and playout tests - \
+             WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT not defined \n");
+#endif // WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+
+    TEST_LOG("Enabling playout external media processing => "
+             "played audio should now be affected \n");
+    TEST_MUSTPASS(xmedia->RegisterExternalMediaProcessing(
+        -1, kPlaybackAllChannelsMixed, mobj));
+    SLEEP(2000);
+    TEST_LOG("Back to normal again \n");
+    TEST_MUSTPASS(xmedia->DeRegisterExternalMediaProcessing(
+        -1, kPlaybackAllChannelsMixed));
+    SLEEP(2000);
+    // Note that we must do per channel here because PlayFileAsMicrophone
+    // is only done on ch 0.
+    TEST_LOG("Enabling recording external media processing => "
+             "played audio should now be affected \n");
+    TEST_MUSTPASS(xmedia->RegisterExternalMediaProcessing(
+        0, kRecordingPerChannel, mobj));
+    SLEEP(2000);
+    TEST_LOG("Back to normal again \n");
+    TEST_MUSTPASS(xmedia->DeRegisterExternalMediaProcessing(
+        0, kRecordingPerChannel));
+    SLEEP(2000);
+    TEST_LOG("Enabling recording external media processing => "
+             "speak and make sure that voice is affected \n");
+    TEST_MUSTPASS(xmedia->RegisterExternalMediaProcessing(
+        -1, kRecordingAllChannelsMixed, mobj));
+    SLEEP(2000);
+    TEST_LOG("Back to normal again \n");
+    TEST_MUSTPASS(xmedia->DeRegisterExternalMediaProcessing(
+        -1, kRecordingAllChannelsMixed));
+    SLEEP(2000);
+#else
+    TEST_LOG("\n\n+++ External media tests NOT ENABLED +++\n");
+#endif // #ifdef _TEST_XMEDIA_
+
+    /////////////////////
+    // NetEQ statistics
+
+#ifdef _TEST_NETEQ_STATS_
+    TEST_LOG("\n\n+++ NetEQ statistics tests +++\n\n");
+
+#ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
+    NetworkStatistics nStats;
+    TEST_MUSTPASS(neteqst->GetNetworkStatistics(0, nStats));
+    TEST_LOG("\nNetwork statistics: \n");
+    TEST_LOG("    currentAccelerateRate     = %hu \n",
+             nStats.currentAccelerateRate);
+    TEST_LOG("    currentBufferSize         = %hu \n",
+             nStats.currentBufferSize);
+    TEST_LOG("    currentDiscardRate        = %hu \n",
+             nStats.currentDiscardRate);
+    TEST_LOG("    currentExpandRate         = %hu \n",
+             nStats.currentExpandRate);
+    TEST_LOG("    currentPacketLossRate     = %hu \n",
+             nStats.currentPacketLossRate);
+    TEST_LOG("    currentPreemptiveRate     = %hu \n",
+             nStats.currentPreemptiveRate);
+    TEST_LOG("    preferredBufferSize       = %hu \n",
+             nStats.preferredBufferSize);
+
+    JitterStatistics jStats;
+    TEST_MUSTPASS(neteqst->GetJitterStatistics(0, jStats));
+    TEST_LOG("\nJitter statistics: \n");
+    TEST_LOG("    jbMinSize                 = %u \n",
+             jStats.jbMinSize);
+    TEST_LOG("    jbMaxSize                 = %u \n",
+             jStats.jbMaxSize);
+    TEST_LOG("    jbAvgSize                 = %u \n",
+             jStats.jbAvgSize);
+    TEST_LOG("    jbChangeCount             = %u \n",
+             jStats.jbChangeCount);
+    TEST_LOG("    lateLossMs                = %u \n",
+             jStats.lateLossMs);
+    TEST_LOG("    accelerateMs              = %u \n",
+             jStats.accelerateMs);
+    TEST_LOG("    flushedMs                 = %u \n",
+             jStats.flushedMs);
+    TEST_LOG("    generatedSilentMs         = %u \n",
+             jStats.generatedSilentMs);
+    TEST_LOG("    interpolatedVoiceMs       = %u \n",
+             jStats.interpolatedVoiceMs);
+    TEST_LOG("    interpolatedSilentMs      = %u \n",
+             jStats.interpolatedSilentMs);
+    TEST_LOG("    countExpandMoreThan120ms  = %u \n",
+             jStats.countExpandMoreThan120ms);
+    TEST_LOG("    countExpandMoreThan250ms  = %u \n",
+             jStats.countExpandMoreThan250ms);
+    TEST_LOG("    countExpandMoreThan500ms  = %u \n",
+             jStats.countExpandMoreThan500ms);
+    TEST_LOG("    countExpandMoreThan2000ms = %u \n",
+             jStats.countExpandMoreThan2000ms);
+    TEST_LOG("    longestExpandDurationMs   = %u \n",
+             jStats.longestExpandDurationMs);
+    TEST_LOG("    countIAT500ms             = %u \n",
+             jStats.countIAT500ms);
+    TEST_LOG("    countIAT1000ms            = %u \n",
+             jStats.countIAT1000ms);
+    TEST_LOG("    countIAT2000ms            = %u \n",
+             jStats.countIAT2000ms);
+    TEST_LOG("    longestIATms              = %u \n",
+             jStats.longestIATms);
+    TEST_LOG("    minPacketDelayMs          = %u \n",
+             jStats.minPacketDelayMs);
+    TEST_LOG("    maxPacketDelayMs          = %u \n",
+             jStats.maxPacketDelayMs);
+    TEST_LOG("    avgPacketDelayMs          = %u \n",
+             jStats.avgPacketDelayMs);
+
+    unsigned short preferredBufferSize;
+    TEST_MUSTPASS(neteqst->GetPreferredBufferSize(0, preferredBufferSize));
+    TEST_MUSTPASS(preferredBufferSize != nStats.preferredBufferSize);
+
+    TEST_MUSTPASS(neteqst->ResetJitterStatistics(0));
+#else
+    TEST_LOG("Skipping NetEQ statistics tests - "
+        "WEBRTC_VOICE_ENGINE_NETEQ_STATS_API not defined \n");
+#endif // #ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
+#else
+    TEST_LOG("\n\n+++ NetEQ statistics tests NOT ENABLED +++\n");
+#endif // #ifdef _TEST_NETEQ_STATS_
+
+ 
+    //////////////////
+    // Stop streaming
+
+    TEST_LOG("\n\n+++ Stop streaming +++\n\n");
+
+    TEST_LOG("Stop playout, sending and listening \n");
+    TEST_MUSTPASS(base->StopPlayout(0));
+    TEST_MUSTPASS(base->StopSend(0));
+    TEST_MUSTPASS(base->StopReceive(0));
+
+// Exit:
+
+    TEST_LOG("Delete channel and terminate VE \n");
+    TEST_MUSTPASS(base->DeleteChannel(0));
+    TEST_MUSTPASS(base->Terminate());
+
+    return 0;
+}
+
+int runAutoTest(TestType testType, ExtendedSelection extendedSel)
+{
+    SubAPIManager apiMgr;
+    apiMgr.DisplayStatus();
+
+#ifdef MAC_IPHONE
+    // Write mic file path to buffer
+    TEST_LOG("Get mic file path \n");
+    if (0 != GetResource("audio_long16.pcm", micFile, 256))
+    {
+        TEST_LOG("Failed get mic file path! \n");
+    }
+#endif
+
+    ////////////////////////////////////
+    // Create VoiceEngine and sub API:s
+
+    voetest::VoETestManager tm;
+    tm.GetInterfaces();
+
+    //////////////////////
+    // Run standard tests
+
+    int mainRet(-1);
+    if (testType == Standard)
+    {
+        mainRet = tm.DoStandardTest();
+
+        ////////////////////////////////
+        // Create configuration summary
+
+        TEST_LOG("\n\n+++ Creating configuration summary file +++\n");
+        createSummary(tm.VoiceEnginePtr());
+    }
+    else if (testType == Extended)
+    {
+        VoEExtendedTest xtend(tm);
+
+        mainRet = 0;
+        while (extendedSel != XSEL_None)
+        {
+            if (extendedSel == XSEL_Base || extendedSel == XSEL_All)
+            {
+                if ((mainRet = xtend.TestBase()) == -1)
+                    break;
+                xtend.TestPassed("Base");
+            }
+            if (extendedSel == XSEL_CallReport || extendedSel == XSEL_All)
+            {
+                if ((mainRet = xtend.TestCallReport()) == -1)
+                    break;
+                xtend.TestPassed("CallReport");
+            }
+            if (extendedSel == XSEL_Codec || extendedSel == XSEL_All)
+            {
+                if ((mainRet = xtend.TestCodec()) == -1)
+                    break;
+                xtend.TestPassed("Codec");
+            }
+            if (extendedSel == XSEL_DTMF || extendedSel == XSEL_All)
+            {
+                if ((mainRet = xtend.TestDtmf()) == -1)
+                    break;
+                xtend.TestPassed("Dtmf");
+            }
+            if (extendedSel == XSEL_Encryption || extendedSel == XSEL_All)
+            {
+                if ((mainRet = xtend.TestEncryption()) == -1)
+                    break;
+                xtend.TestPassed("Encryption");
+            }
+            if (extendedSel == XSEL_ExternalMedia || extendedSel == XSEL_All)
+            {
+                if ((mainRet = xtend.TestExternalMedia()) == -1)
+                    break;
+                xtend.TestPassed("ExternalMedia");
+            }
+            if (extendedSel == XSEL_File || extendedSel == XSEL_All)
+            {
+                if ((mainRet = xtend.TestFile()) == -1)
+                    break;
+                xtend.TestPassed("File");
+            }
+            if (extendedSel == XSEL_Hardware || extendedSel == XSEL_All)
+            {
+                if ((mainRet = xtend.TestHardware()) == -1)
+                    break;
+                xtend.TestPassed("Hardware");
+            }
+            if (extendedSel == XSEL_NetEqStats || extendedSel == XSEL_All)
+            {
+                if ((mainRet = xtend.TestNetEqStats()) == -1)
+                    break;
+                xtend.TestPassed("NetEqStats");
+            }
+            if (extendedSel == XSEL_Network || extendedSel == XSEL_All)
+            {
+                if ((mainRet = xtend.TestNetwork()) == -1)
+                    break;
+                xtend.TestPassed("Network");
+            }
+            if (extendedSel == XSEL_RTP_RTCP || extendedSel == XSEL_All)
+            {
+                if ((mainRet = xtend.TestRTP_RTCP()) == -1)
+                    break;
+                xtend.TestPassed("RTP_RTCP");
+            }
+            if (extendedSel == XSEL_VideoSync || extendedSel == XSEL_All)
+            {
+                if ((mainRet = xtend.TestVideoSync()) == -1)
+                    break;
+                xtend.TestPassed("VideoSync");
+            }
+            if (extendedSel == XSEL_VolumeControl || extendedSel == XSEL_All)
+            {
+                if ((mainRet = xtend.TestVolumeControl()) == -1)
+                    break;
+                xtend.TestPassed("VolumeControl");
+            }
+            if (extendedSel == XSEL_AudioProcessing || extendedSel == XSEL_All)
+            {
+                if ((mainRet = xtend.TestAPM()) == -1)
+                    break;
+                xtend.TestPassed("AudioProcessing");
+            }
+            apiMgr.GetExtendedMenuSelection(extendedSel);
+        }  // while (extendedSel != XSEL_None)
+    }
+    else if (testType == Stress)
+    {
+        VoEStressTest stressTest(tm);
+        mainRet = stressTest.DoTest();
+    }
+    else if (testType == Unit)
+    {
+        VoEUnitTest unitTest(tm);
+        mainRet = unitTest.DoTest();
+    }
+    else if (testType == CPU)
+    {
+        VoECpuTest cpuTest(tm);
+        mainRet = cpuTest.DoTest();
+    }
+    else
+    {
+        // Should never end up here
+        TEST_LOG("INVALID SELECTION \n");
+    }
+
+
+    //////////////////
+    // Release/Delete
+
+    int releaseOK = tm.ReleaseInterfaces();
+
+    if ((0 == mainRet) && (releaseOK != -1))
+    {
+        TEST_LOG("\n\n*** All tests passed *** \n\n");
+    }
+    else
+    {
+        TEST_LOG("\n\n*** Test failed! *** \n");
+    }
+
+    return 0;
+}
+
+void createSummary(VoiceEngine* ve)
+{
+    int len;
+    char str[256];
+
+#ifdef MAC_IPHONE
+    char summaryFilename[256];
+    GetDocumentsDir(summaryFilename, 256);
+    strcat(summaryFilename, "/summary.txt");
+#endif
+
+    VoEBase* base = VoEBase::GetInterface(ve);
+    FILE* stream = fopen(summaryFilename, "wt");
+
+    sprintf(str, "WebRTc VoiceEngine ");
+#if defined(_WIN32)
+    strcat(str, "Win");
+#elif defined(WEBRTC_LINUX) && defined(WEBRTC_TARGET_PC) && !defined(ANDROID)
+    strcat(str, "Linux");
+#elif defined(WEBRTC_MAC) && !defined(MAC_IPHONE)
+    strcat(str, "Mac");
+#elif defined(ANDROID)
+    strcat(str, "Android");
+#elif defined(MAC_IPHONE)
+    strcat(str, "iPhone");
+#endif
+    // Add for other platforms as needed
+
+    fprintf(stream, "%s\n", str);
+    len = (int)strlen(str);
+    for (int i=0; i<len; i++)
+    {
+        fprintf(stream, "=");
+    }
+    fprintf(stream, "\n\n");
+
+    char version[1024];
+    char veVersion[24];
+    base->GetVersion(version);
+    // find first NL <=> end of VoiceEngine version string
+    int pos = (int)strcspn(version, "\n");
+    strncpy(veVersion, version, pos);
+    veVersion[pos] = '\0';
+    sprintf(str, "Version:                    %s\n", veVersion);
+    fprintf(stream, "%s\n", str);
+
+    sprintf(str, "Build date & time:          %s\n", BUILDDATE " " BUILDTIME);
+    fprintf(stream, "%s\n", str);
+
+    strcpy(str, "G.711 A-law");
+    fprintf(stream, "\nSupported codecs:           %s\n", str); 
+    strcpy(str, "                            G.711 mu-law");
+    fprintf(stream, "%s\n", str);
+#ifdef WEBRTC_CODEC_EG711
+    strcpy(str, "                            Enhanced G.711 A-law");
+    fprintf(stream, "%s\n", str);
+    strcpy(str, "                            Enhanced G.711 mu-law");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_CODEC_IPCMWB
+    strcpy(str, "                            iPCM-wb");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_CODEC_ILBC
+    strcpy(str, "                            iLBC");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_CODEC_ISAC
+    strcpy(str, "                            iSAC");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_CODEC_ISACLC
+    strcpy(str, "                            iSAC-LC");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_CODEC_G722
+    strcpy(str, "                            G.722");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_CODEC_G722_1
+    strcpy(str, "                            G.722.1");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_CODEC_G722_1C
+    strcpy(str, "                            G.722.1C");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_CODEC_G723
+    strcpy(str, "                            G.723");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_CODEC_G726
+    strcpy(str, "                            G.726");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_CODEC_G729
+    strcpy(str, "                            G.729");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_CODEC_G729_1
+    strcpy(str, "                            G.729.1");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_CODEC_GSMFR
+    strcpy(str, "                            GSM-FR");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_CODEC_GSMAMR
+    strcpy(str, "                            AMR");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_CODEC_GSMAMRWB
+    strcpy(str, "                            AMR-WB");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_CODEC_GSMEFR
+    strcpy(str, "                            GSM-EFR");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_CODEC_SPEEX
+    strcpy(str, "                            Speex");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_CODEC_SILK
+    strcpy(str, "                            Silk");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_CODEC_PCM16
+    strcpy(str, "                            L16");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef NETEQFIX_VOXWARE_SC3
+    strcpy(str, "                            Voxware SC3");
+    fprintf(stream, "%s\n", str);
+#endif
+    // Always included
+    strcpy(str, "                            AVT (RFC2833)");
+    fprintf(stream, "%s\n", str);
+#ifdef WEBRTC_CODEC_RED
+    strcpy(str, "                            RED (forward error correction)");
+    fprintf(stream, "%s\n", str);
+#endif
+
+    fprintf(stream, "\nEcho Control:               ");
+#ifdef WEBRTC_VOICE_ENGINE_ECHO
+    fprintf(stream, "Yes\n");
+#else
+    fprintf(stream, "No\n");
+#endif
+
+    fprintf(stream, "\nAutomatic Gain Control:     ");
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+    fprintf(stream, "Yes\n");
+#else
+    fprintf(stream, "No\n");
+#endif
+
+    fprintf(stream, "\nNoise Reduction:            ");
+#ifdef WEBRTC_VOICE_ENGINE_NR
+    fprintf(stream, "Yes\n");
+#else
+    fprintf(stream, "No\n");
+#endif
+
+    fprintf(stream, "\nSRTP:                       ");
+#ifdef WEBRTC_SRTP
+    fprintf(stream, "Yes\n");
+#else
+    fprintf(stream, "No\n");
+#endif
+
+    fprintf(stream, "\nExternal transport only:    ");
+#ifdef WEBRTC_EXTERNAL_TRANSPORT
+    fprintf(stream, "Yes\n");
+#else
+    fprintf(stream, "No\n");
+#endif
+
+    fprintf(stream, "\nTelephone event detection:  ");
+#ifdef WEBRTC_DTMF_DETECTION
+    fprintf(stream, "Yes\n");
+#else
+    fprintf(stream, "No\n");
+#endif
+
+    strcpy(str, "VoEBase");
+    fprintf(stream, "\nSupported sub-APIs:         %s\n", str); 
+#ifdef WEBRTC_VOICE_ENGINE_CODEC_API
+    strcpy(str, "                            VoECodec");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_DTMF_API
+    strcpy(str, "                            VoEDtmf");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_FILE_API
+    strcpy(str, "                            VoEFile");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_HARDWARE_API
+    strcpy(str, "                            VoEHardware");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_NETWORK_API
+    strcpy(str, "                            VoENetwork");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
+    strcpy(str, "                            VoERTP_RTCP");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
+    strcpy(str, "                            VoEVolumeControl");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
+    strcpy(str, "                            VoEAudioProcessing");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
+    strcpy(str, "                            VoeExternalMedia");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
+    strcpy(str, "                            VoENetEqStats");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
+    strcpy(str, "                            VoEEncryption");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
+    strcpy(str, "                            VoECallReport");
+    fprintf(stream, "%s\n", str);
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
+    strcpy(str, "                            VoEVideoSync");
+    fprintf(stream, "%s\n", str);
+#endif
+
+    fclose(stream);
+    base->Release();
+}
+
+/*********Knowledge Base******************/
+
+//An example for creating threads and calling VE API's from that thread.
+// Using thread.  A generic API/Class for all platforms.
+#ifdef THEADTEST // find first NL <=> end of VoiceEngine version string
+//Definition of Thread Class
+class ThreadTest 
+{
+public:
+    ThreadTest(
+        VoEBase* base);
+    ~ThreadTest() 
+    {
+        delete _myThread;
+    }
+    void Stop();
+private:
+    static bool StartSend(
+        void* obj);
+    bool StartSend();
+
+    ThreadWrapper* _myThread;
+    VoEBase* _base;
+
+    bool _stopped;
+};
+
+//Main function from where StartSend is invoked as a seperate thread.
+ThreadTest::ThreadTest(
+    VoEBase* base)
+    :
+    _stopped(false),
+    _base(base)
+{
+    //Thread Creation
+    _myThread = ThreadWrapper::CreateThread(StartSend, this, kLowPriority);
+    unsigned int id  = 0;
+    //Starting the thread
+    _myThread->Start(id);
+}
+
+//Calls the StartSend.  This is to avoid the static declaration issue.
+bool
+ThreadTest::StartSend(
+    void* obj)
+{
+    return ((ThreadTest*)obj)->StartSend(); 
+}
+
+
+bool
+ThreadTest::StartSend()
+{
+    _myThread->SetNotAlive();  //Ensures this function is called only once.
+    _base->StartSend(0);
+    return true;
+}
+
+void ThreadTest::Stop()
+{
+    _stopped = true;
+}
+
+//  Use the following to invoke ThreatTest from the main function.
+//  ThreadTest* threadtest = new ThreadTest(base);
+#endif
+
+// An example to create a thread and call VE API's call from that thread.
+// Specific to Windows Platform
+#ifdef THREAD_TEST_WINDOWS
+//Thread Declaration.  Need to be added in the class controlling/dictating
+// the main code.
+/**
+private:
+    static unsigned int WINAPI StartSend(void* obj);
+    unsigned int WINAPI StartSend();
+**/
+
+//Thread Definition
+unsigned int WINAPI mainTest::StartSend(void *obj)
+{
+    return ((mainTest*)obj)->StartSend();   
+}
+unsigned int WINAPI mainTest::StartSend()
+{
+    //base
+    base->StartSend(0);
+
+    //  TEST_MUSTPASS(base->StartSend(0));
+    TEST_LOG("hi hi hi");
+    return 0;
+}
+
+//Thread invoking.  From the main code
+/*****
+    unsigned int threadID=0;
+    if ((HANDLE)_beginthreadex(NULL,
+                               0,
+                               StartSend,
+                               (void*)this,
+                               0,
+                                &threadID) == NULL)
+        return false;
+****/
+
+#endif
+
+}  // namespace voetest
+
+
+
+// ----------------------------------------------------------------------------
+//                                       main
+// ----------------------------------------------------------------------------
+
+using namespace voetest;
+
+#if !defined(MAC_IPHONE) && !defined(ANDROID)
+int main(int , char** )
+{
+    SubAPIManager apiMgr;
+    apiMgr.DisplayStatus();
+
+    printf("----------------------------\n");
+    printf("Select type of test\n\n");
+    printf(" (0)  Quit\n");
+    printf(" (1)  Standard test\n");
+    printf(" (2)  Extended test(s)...\n");
+    printf(" (3)  Stress test(s)...\n");
+    printf(" (4)  Unit test(s)...\n");
+    printf(" (5)  CPU & memory reference test [Windows]...\n");
+    printf("\n: ");
+
+    int selection(0);
+
+    dummy = scanf("%d", &selection);
+
+    ExtendedSelection extendedSel(XSEL_Invalid);
+
+    enum TestType testType(Invalid);
+
+    switch (selection)
+    {
+    case 0:
+        return 0;
+    case 1:
+        testType = Standard;
+        break;
+    case 2:
+        testType = Extended;
+        while (!apiMgr.GetExtendedMenuSelection(extendedSel))
+            ;
+        break;
+    case 3:
+        testType = Stress;
+        break;
+    case 4:
+        testType = Unit;
+        break;
+    case 5:
+        testType = CPU;
+        break;
+    default:
+        TEST_LOG("Invalid selection!\n");
+        return 0;
+    }
+
+    // function that can be called
+    // from other entry functions
+    int retVal = runAutoTest(testType, extendedSel);
+
+    return retVal;
+}
+#endif //#if !defined(MAC_IPHONE)
diff --git a/voice_engine/main/test/auto_test/voe_standard_test.h b/voice_engine/main/test/auto_test/voe_standard_test.h
new file mode 100644
index 0000000..4c17bfa
--- /dev/null
+++ b/voice_engine/main/test/auto_test/voe_standard_test.h
@@ -0,0 +1,375 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_STANDARD_TEST_H
+#define WEBRTC_VOICE_ENGINE_VOE_STANDARD_TEST_H
+
+#include "voe_test_defines.h"
+#include "voe_test_interface.h"
+
+#include "voe_errors.h"
+#include "voe_base.h"
+#include "voe_file.h"
+#include "voe_dtmf.h"
+#include "voe_rtp_rtcp.h"
+#include "voe_audio_processing.h"
+#ifdef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
+#include "voe_call_report.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_CODEC_API
+#include "voe_codec.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
+#include "voe_encryption.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
+#include "voe_external_media.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_HARDWARE_API
+#include "voe_hardware.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_NETWORK_API
+#include "voe_network.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
+#include "voe_video_sync.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
+#include "voe_volume_control.h"
+#endif
+
+#ifdef _TEST_NETEQ_STATS_
+namespace webrtc
+{
+class CriticalSectionWrapper;
+class ThreadWrapper;
+class VoENetEqStats;
+}
+#endif
+
+#if defined(ANDROID)
+extern char mobileLogMsg[640];
+#endif
+
+namespace voetest
+{
+
+void createSummary(VoiceEngine* ve);
+void prepareDelivery();
+
+class MyRTPObserver: public VoERTPObserver
+{
+public:
+    MyRTPObserver();
+    ~MyRTPObserver();
+    virtual void OnIncomingCSRCChanged(const int channel,
+                                       const unsigned int CSRC,
+                                       const bool added);
+    virtual void OnIncomingSSRCChanged(const int channel,
+                                       const unsigned int SSRC);
+    void Reset();
+public:
+    unsigned int _SSRC[2];
+    unsigned int _CSRC[2][2]; // stores 2 SSRCs for each channel
+    bool _added[2][2];
+    int _size[2];
+};
+
+class MyTraceCallback: public TraceCallback
+{
+public:
+    void Print(const TraceLevel level, const char *traceString,
+               const int length);
+};
+
+class MyDeadOrAlive: public VoEConnectionObserver
+{
+public:
+    void OnPeriodicDeadOrAlive(const int channel, const bool alive);
+};
+
+class ErrorObserver: public VoiceEngineObserver
+{
+public:
+    ErrorObserver();
+    void CallbackOnError(const int channel, const int errCode);
+public:
+    int code;
+};
+
+class RtcpAppHandler: public VoERTCPObserver
+{
+public:
+    void OnApplicationDataReceived(const int channel,
+                                   const unsigned char subType,
+                                   const unsigned int name,
+                                   const unsigned char* data,
+                                   const unsigned short dataLengthInBytes);
+    void Reset();
+    ~RtcpAppHandler()
+    {
+    };
+    unsigned short _lengthBytes;
+    unsigned char _data[256];
+    unsigned char _subType;
+    unsigned int _name;
+};
+
+class DtmfCallback: public VoETelephoneEventObserver
+{
+public:
+    int counter;
+    DtmfCallback()
+    {
+        counter = 0;
+    }
+    virtual void OnReceivedTelephoneEventInband(const int channel,
+                                                const unsigned char eventCode,
+                                                const bool endOfEvent)
+    {
+        char msg[128];
+        if (endOfEvent)
+            sprintf(msg, "(event=%d, [END])", eventCode);
+        else
+            sprintf(msg, "(event=%d, [START])", eventCode);
+        TEST_LOG("%s", msg);
+        if (!endOfEvent)
+            counter++; // cound start of event only
+        fflush(NULL);
+    }
+
+    virtual void OnReceivedTelephoneEventOutOfBand(
+        const int channel,
+        const unsigned char eventCode,
+        const bool endOfEvent)
+    {
+        char msg[128];
+        if (endOfEvent)
+            sprintf(msg, "(event=%d, [END])", eventCode);
+        else
+            sprintf(msg, "(event=%d, [START])", eventCode);
+        TEST_LOG("%s", msg);
+        if (!endOfEvent)
+            counter++; // cound start of event only
+        fflush(NULL);
+    }
+};
+
+class my_encryption: public Encryption
+{
+    void encrypt(int channel_no, unsigned char * in_data,
+                 unsigned char * out_data, int bytes_in, int * bytes_out);
+    void decrypt(int channel_no, unsigned char * in_data,
+                 unsigned char * out_data, int bytes_in, int * bytes_out);
+    void encrypt_rtcp(int channel_no, unsigned char * in_data,
+                      unsigned char * out_data, int bytes_in, int * bytes_out);
+    void decrypt_rtcp(int channel_no, unsigned char * in_data,
+                      unsigned char * out_data, int bytes_in, int * bytes_out);
+};
+
+class RxCallback: public VoERxVadCallback
+{
+public:
+    RxCallback() :
+        _vadDecision(-1)
+    {
+    };
+
+    virtual void OnRxVad(int, int vadDecision)
+    {
+        char msg[128];
+        sprintf(msg, "RX VAD detected decision %d \n", vadDecision);
+        TEST_LOG("%s", msg);
+        _vadDecision = vadDecision;
+    }
+
+    int _vadDecision;
+};
+
+#ifdef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
+class MyMedia: public VoEMediaProcess
+{
+public:
+    virtual void Process(const int channel, const ProcessingTypes type,
+                         WebRtc_Word16 audio_10ms[], const int length,
+                         const int samplingFreqHz, const bool stereo);
+private:
+    int f;
+};
+#endif
+
+class SubAPIManager
+{
+public:
+    SubAPIManager() :
+        _base(true),
+        _callReport(false),
+        _codec(false),
+        _dtmf(false),
+        _encryption(false),
+        _externalMedia(false),
+        _file(false),
+        _hardware(false),
+        _netEqStats(false),
+        _network(false),
+        _rtp_rtcp(false),
+        _videoSync(false),
+        _volumeControl(false),
+        _apm(false),
+        _xsel(XSEL_Invalid)
+    {
+#ifdef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
+        _callReport = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_CODEC_API
+        _codec = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_DTMF_API
+        _dtmf = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
+        _encryption = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
+        _externalMedia = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_FILE_API
+        _file = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_HARDWARE_API
+        _hardware = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
+        _netEqStats = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_NETWORK_API
+        _network = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
+        _rtp_rtcp = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
+        _videoSync = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
+        _volumeControl = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
+        _apm = true;
+#endif
+    };
+
+    void DisplayStatus() const;
+    bool GetExtendedMenuSelection(ExtendedSelection& sel);
+
+private:
+    bool _base, _callReport, _codec, _dtmf, _encryption;
+    bool _externalMedia, _file, _hardware;
+    bool _netEqStats, _network, _rtp_rtcp, _videoSync, _volumeControl, _apm;
+    ExtendedSelection _xsel;
+};
+
+class VoETestManager
+{
+public:
+    VoETestManager();
+    ~VoETestManager();
+
+    void GetInterfaces();
+    int ReleaseInterfaces();
+    int DoStandardTest();
+
+    VoiceEngine* VoiceEnginePtr() const
+    {
+        return ve;
+    };
+    VoEBase* BasePtr() const
+    {
+        return base;
+    };
+    VoECodec* CodecPtr() const
+    {
+        return codec;
+    };
+    VoEVolumeControl* VolumeControlPtr() const
+    {
+        return volume;
+    };
+    VoEDtmf* DtmfPtr() const
+    {
+        return dtmf;
+    };
+    VoERTP_RTCP* RTP_RTCPPtr() const
+    {
+        return rtp_rtcp;
+    };
+    VoEAudioProcessing* APMPtr() const
+    {
+        return apm;
+    };
+    VoENetwork* NetworkPtr() const
+    {
+        return netw;
+    };
+    VoEFile* FilePtr() const
+    {
+        return file;
+    };
+    VoEHardware* HardwarePtr() const
+    {
+        return hardware;
+    };
+    VoEVideoSync* VideoSyncPtr() const
+    {
+        return vsync;
+    };
+    VoEEncryption* EncryptionPtr() const
+    {
+        return encrypt;
+    };
+    VoEExternalMedia* ExternalMediaPtr() const
+    {
+        return xmedia;
+    };
+    VoECallReport* CallReportPtr() const
+    {
+        return report;
+    };
+#ifdef _TEST_NETEQ_STATS_
+    VoENetEqStats* NetEqStatsPtr() const
+    {
+        return neteqst;
+    };
+#endif
+
+private:
+    VoiceEngine* ve;
+    VoEBase* base;
+    VoECodec* codec;
+    VoEVolumeControl* volume;
+    VoEDtmf* dtmf;
+    VoERTP_RTCP* rtp_rtcp;
+    VoEAudioProcessing* apm;
+    VoENetwork* netw;
+    VoEFile* file;
+    VoEHardware* hardware;
+    VoEVideoSync* vsync;
+    VoEEncryption* encrypt;
+    VoEExternalMedia* xmedia;
+    VoECallReport* report;
+#ifdef _TEST_NETEQ_STATS_
+    VoENetEqStats* neteqst;
+#endif
+    int instanceCount;
+};
+
+} // namespace voetest
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_STANDARD_TEST_H
diff --git a/voice_engine/main/test/auto_test/voe_stress_test.cc b/voice_engine/main/test/auto_test/voe_stress_test.cc
new file mode 100644
index 0000000..ba98b06
--- /dev/null
+++ b/voice_engine/main/test/auto_test/voe_stress_test.cc
@@ -0,0 +1,454 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+//       Some ideas of improvements:
+//       Break out common init and maybe terminate to separate function(s).
+//       How much trace should we have enabled?
+//       API error counter, to print info and return -1 if any error.
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+#include <cassert>
+#if defined(_WIN32)
+ #include <conio.h>
+#endif
+
+#include "voe_stress_test.h"
+#include "voe_standard_test.h"
+
+#include "../../source/voice_engine_defines.h"  // defines build macros
+
+#include "thread_wrapper.h"
+
+using namespace webrtc;
+
+namespace voetest {
+
+#define VALIDATE_STRESS(expr)                                   \
+    if (expr)                                                   \
+    {                                                           \
+        printf("Error at line: %i, %s \n", __LINE__, #expr);    \
+        printf("Error code: %i \n", base->LastError());  \
+    }
+
+#ifdef _WIN32
+ // Pause if supported
+ #define PAUSE_OR_SLEEP(x) PAUSE;
+#else
+ // Sleep a bit instead if pause not supported
+ #define PAUSE_OR_SLEEP(x) SLEEP(x);
+#endif
+
+extern char* GetFilename(char* filename);
+extern const char* GetFilename(const char* filename);
+extern int GetResource(char* resource, char* dest, int destLen);
+extern char* GetResource(char* resource);
+extern const char* GetResource(const char* resource);
+
+
+const char* VoEStressTest::_key = "====YUtFWRAAAAADBtIHgAAAAAEAAAAcAAAAAQBHU0ds"
+    "b2JhbCBJUCBTb3VuZAAC\nAAAAIwAAAExpY2Vuc2VkIHRvIE5vcnRlbCBOZXR3cm9rcwAAAAA"
+    "xAAAAZxZ7/u0M\niFYyTwSwko5Uutf7mh8S0O4rYZYTFidbzQeuGonuL17F/2oD/2pfDp3jL4"
+    "Rf3z/A\nnlJsEJgEtASkDNFuwLILjGY0pzjjAYQp3pCl6z6k2MtE06AirdjGLYCjENpq/opX"
+    "\nOrs3sIuwdYK5va/aFcsjBDmlsGCUM48RDYG9s23bIHYafXUC4ofOaubbZPWiPTmL\nEVJ8WH"
+    "4F9pgNjALc14oJXfON7r/3\n=EsLx";
+
+
+int VoEStressTest::DoTest()
+{
+    int test(-1);
+    while (test != 0)
+    {
+        test = MenuSelection();
+        switch (test)
+        {
+            case 0:
+                // Quit stress test
+                break;
+            case 1:
+                // All tests
+                StartStopTest();
+                CreateDeleteChannelsTest();
+                MultipleThreadsTest();
+                break;
+            case 2:
+                StartStopTest();
+                break;
+            case 3:
+                CreateDeleteChannelsTest();
+                break;
+            case 4:
+                MultipleThreadsTest();
+                break;
+            default:
+                // Should not be possible
+                printf("Invalid selection! (Test code error)\n");
+                assert(false);
+        } // switch
+    } // while
+
+    return 0;
+}
+
+
+int VoEStressTest::MenuSelection()
+{
+    printf("------------------------------------------------\n");
+    printf("Select stress test\n\n");
+    printf(" (0)  Quit\n");
+    printf(" (1)  All\n");
+    printf("- - - - - - - - - - - - - - - - - - - - - - - - \n");
+    printf(" (2)  Start/stop\n");
+    printf(" (3)  Create/delete channels\n");
+    printf(" (4)  Multiple threads\n");
+
+    const int maxMenuSelection = 4;
+    int selection(-1);
+    int dummy(0);
+
+    while ((selection < 0) || (selection > maxMenuSelection))
+    {
+        printf("\n: ");
+        dummy = scanf("%d", &selection);
+        if ((selection < 0) || (selection > maxMenuSelection))
+        {
+            printf("Invalid selection!\n");
+        }
+    }
+
+    return selection;
+}
+
+
+int VoEStressTest::StartStopTest()
+{
+    printf("------------------------------------------------\n");
+    printf("Running start/stop test\n");
+    printf("------------------------------------------------\n");
+
+    printf("\nNOTE: this thest will fail after a while if Core audio is used\n");
+    printf("because MS returns AUDCLNT_E_CPUUSAGE_EXCEEDED (VoE Error 10013).\n");
+
+    // Get sub-API pointers
+    VoEBase* base = _mgr.BasePtr();
+
+    // Set trace
+//     VALIDATE_STRESS(base->SetTraceFileName(
+//         GetFilename("VoEStressTest_StartStop_trace.txt")));
+//     VALIDATE_STRESS(base->SetDebugTraceFileName(
+//         GetFilename("VoEStressTest_StartStop_trace_debug.txt")));
+//     VALIDATE_STRESS(base->SetTraceFilter(kTraceStateInfo |
+//         kTraceWarning | kTraceError |
+//         kTraceCritical | kTraceApiCall |
+//         kTraceMemory | kTraceInfo));
+
+    VALIDATE_STRESS(base->Init());
+    VALIDATE_STRESS(base->CreateChannel());
+
+
+    ///////////// Start test /////////////
+
+    int numberOfLoops(2000);
+    int loopSleep(200);
+    int i(0);
+    int markInterval(20);
+
+    printf("Running %d loops with %d ms sleep. Mark every %d loop. \n",
+        numberOfLoops, loopSleep, markInterval);
+    printf("Test will take approximately %d minutes. \n",
+           numberOfLoops*loopSleep/1000/60+1);
+
+    for (i=0; i<numberOfLoops; ++i)
+    {
+        VALIDATE_STRESS(base->SetLocalReceiver(0, 4800));
+        VALIDATE_STRESS(base->SetSendDestination(0, 4800, "127.0.0.1"));
+        VALIDATE_STRESS(base->StartReceive(0));
+        VALIDATE_STRESS(base->StartPlayout(0));
+        VALIDATE_STRESS(base->StartSend(0));
+        if (!(i % markInterval)) MARK();
+        SLEEP(loopSleep);
+        VALIDATE_STRESS(base->StopSend(0));
+        VALIDATE_STRESS(base->StopPlayout(0));
+        VALIDATE_STRESS(base->StopReceive(0));
+    }
+    ANL();
+
+    VALIDATE_STRESS(base->SetLocalReceiver(0, 4800));
+    VALIDATE_STRESS(base->SetSendDestination(0, 4800, "127.0.0.1"));
+    VALIDATE_STRESS(base->StartReceive(0));
+    VALIDATE_STRESS(base->StartPlayout(0));
+    VALIDATE_STRESS(base->StartSend(0));
+    printf("Verify that audio is good. \n");
+    PAUSE_OR_SLEEP(20000);
+    VALIDATE_STRESS(base->StopSend(0));
+    VALIDATE_STRESS(base->StopPlayout(0));
+    VALIDATE_STRESS(base->StopReceive(0));
+
+    ///////////// End test /////////////
+
+
+    // Terminate
+    VALIDATE_STRESS(base->DeleteChannel(0));
+    VALIDATE_STRESS(base->Terminate());
+
+    printf("Test finished \n");
+
+    return 0;
+}
+
+
+int VoEStressTest::CreateDeleteChannelsTest()
+{
+    printf("------------------------------------------------\n");
+    printf("Running create/delete channels test\n");
+    printf("------------------------------------------------\n");
+
+    // Get sub-API pointers
+    VoEBase* base = _mgr.BasePtr();
+
+    // Set trace
+//     VALIDATE_STRESS(base->SetTraceFileName(
+//          GetFilename("VoEStressTest_CreateChannels_trace.txt")));
+//     VALIDATE_STRESS(base->SetDebugTraceFileName(
+//          GetFilename("VoEStressTest_CreateChannels_trace_debug.txt")));
+//     VALIDATE_STRESS(base->SetTraceFilter(kTraceStateInfo |
+//         kTraceWarning | kTraceError |
+//         kTraceCritical | kTraceApiCall |
+//         kTraceMemory | kTraceInfo));
+
+    VALIDATE_STRESS(base->Init());
+
+    ///////////// Start test /////////////
+
+    int numberOfLoops(10000);
+    int loopSleep(10);
+    int i(0);
+    int markInterval(200);
+
+    printf("Running %d loops with %d ms sleep. Mark every %d loop. \n",
+        numberOfLoops, loopSleep, markInterval);
+    printf("Test will take approximately %d minutes. \n",
+           numberOfLoops * loopSleep / 1000 / 60 + 1);
+
+    //       Some possible extensions include:
+    //       Different sleep times (fixed or random) or zero.
+    //       Start call on all or some channels.
+    //       Two parts: first have a slight overweight to creating channels,
+    //       then to deleting. (To ensure we hit max channels and go to zero.)
+    //       Make sure audio is OK after test has finished.
+
+    // Set up, start with maxChannels/2 channels
+    const int maxChannels = base->MaxNumOfChannels();
+    VALIDATE_STRESS(maxChannels < 1); // Should always have at least one channel
+    bool* channelState = new bool[maxChannels];
+    memset(channelState, 0, maxChannels*sizeof(bool));
+    int channel(0);
+    int noOfActiveChannels(0);
+    for (i=0; i<(maxChannels/2); ++i)
+    {
+        channel = base->CreateChannel();
+        VALIDATE_STRESS(channel < 0);
+        if (channel >= 0)
+        {
+            channelState[channel] = true;
+            ++noOfActiveChannels;
+        }
+    }
+    srand((unsigned int)time(NULL));
+    bool action(false);
+    double rnd(0.0);
+    int res(0);
+
+    // Create/delete channels with slight 
+    for (i=0; i<numberOfLoops; ++i)
+    {
+        // Randomize action (create or delete channel)
+        action = rand() <= (RAND_MAX / 2);
+        if (action)
+        {
+            if (noOfActiveChannels < maxChannels)
+            {
+                // Create new channel
+                channel = base->CreateChannel();
+                VALIDATE_STRESS(channel < 0);
+                if (channel >= 0)
+                {
+                    channelState[channel] = true;
+                    ++noOfActiveChannels;
+                }
+            }
+        }
+        else
+        {
+            if (noOfActiveChannels > 0)
+            {
+                // Delete random channel that's created [0, maxChannels - 1]
+                do
+                {
+                    rnd = static_cast<double>(rand());
+                    channel = static_cast<int>(rnd /
+                        (static_cast<double>(RAND_MAX) + 1.0f) * maxChannels);
+                } while (!channelState[channel]); // Must find a created channel
+
+                res = base->DeleteChannel(channel);
+                VALIDATE_STRESS(0 != res);
+                if (0 == res)
+                {
+                    channelState[channel] = false;
+                    --noOfActiveChannels;
+                }
+            }
+        }
+
+        if (!(i % markInterval)) MARK();
+        SLEEP(loopSleep);
+    }
+    ANL();
+
+    delete [] channelState;
+
+    ///////////// End test /////////////
+
+
+    // Terminate
+    VALIDATE_STRESS(base->Terminate()); // Deletes all channels
+
+    printf("Test finished \n");
+
+    return 0;
+}
+
+
+int VoEStressTest::MultipleThreadsTest()
+{
+    printf("------------------------------------------------\n");
+    printf("Running multiple threads test\n");
+    printf("------------------------------------------------\n");
+
+    // Get sub-API pointers
+    VoEBase* base = _mgr.BasePtr();
+
+    // Set trace
+//     VALIDATE_STRESS(base->SetTraceFileName(
+//        GetFilename("VoEStressTest_MultipleThreads_trace.txt")));
+//     VALIDATE_STRESS(base->SetDebugTraceFileName(
+//        GetFilename("VoEStressTest_MultipleThreads_trace_debug.txt")));
+//     VALIDATE_STRESS(base->SetTraceFilter(kTraceStateInfo |
+//        kTraceWarning | kTraceError |
+//        kTraceCritical | kTraceApiCall |
+//        kTraceMemory | kTraceInfo));
+
+    // Init
+    VALIDATE_STRESS(base->Init());
+    VALIDATE_STRESS(base->CreateChannel());
+
+
+    ///////////// Start test /////////////
+
+    int numberOfLoops(10000);
+    int loopSleep(0);
+    int i(0);
+    int markInterval(1000);
+
+    printf("Running %d loops with %d ms sleep. Mark every %d loop. \n",
+        numberOfLoops, loopSleep, markInterval);
+    printf("Test will take approximately %d minutes. \n",
+           numberOfLoops * loopSleep / 1000 / 60 + 1);
+
+    srand((unsigned int)time(NULL));
+    int rnd(0);
+
+    // Start extra thread
+    const char* threadName = "StressTest Extra API Thread";
+    _ptrExtraApiThread = ThreadWrapper::CreateThread(
+        RunExtraApi, this, kNormalPriority, threadName);
+    unsigned int id(0);
+    VALIDATE_STRESS(!_ptrExtraApiThread->Start(id));
+
+    //       Some possible extensions include:
+    //       Add more API calls to randomize
+    //       More threads
+    //       Different sleep times (fixed or random).
+    //       Make sure audio is OK after test has finished.
+
+    // Call random API functions here and in extra thread, ignore any error
+    for (i=0; i<numberOfLoops; ++i)
+    {
+        // This part should be equal to the marked part in the extra thread
+        // --- BEGIN ---
+        rnd = rand();
+        if (rnd < (RAND_MAX / 2))
+        {
+            // Start playout
+            base->StartPlayout(0);
+        }
+        else
+        {
+            // Stop playout
+            base->StopPlayout(0);
+        }
+        // --- END ---
+
+        if (!(i % markInterval)) MARK();
+        SLEEP(loopSleep);
+    }
+    ANL();
+
+    // Stop extra thread
+    VALIDATE_STRESS(!_ptrExtraApiThread->Stop());
+    delete _ptrExtraApiThread;
+
+    ///////////// End test /////////////
+
+    // Terminate
+    VALIDATE_STRESS(base->Terminate()); // Deletes all channels
+
+    printf("Test finished \n");
+
+    return 0;
+}
+
+
+// Thread functions
+
+bool VoEStressTest::RunExtraApi(void* ptr)
+{
+    return static_cast<VoEStressTest*>(ptr)->ProcessExtraApi();
+}
+
+bool VoEStressTest::ProcessExtraApi()
+{
+    // Prepare
+    VoEBase* base = _mgr.BasePtr();
+    int rnd(0);
+
+    // Call random API function, ignore any error
+
+    // This part should be equal to the marked part in the main thread
+    // --- BEGIN ---
+    rnd = rand();
+    if (rnd < (RAND_MAX / 2))
+    {
+        // Start playout
+        base->StartPlayout(0);
+    }
+    else
+    {
+        // Stop playout
+        base->StopPlayout(0);
+    }
+    // --- END ---
+
+    return true;
+}
+
+}  //  namespace voetest
diff --git a/voice_engine/main/test/auto_test/voe_stress_test.h b/voice_engine/main/test/auto_test/voe_stress_test.h
new file mode 100644
index 0000000..69b309f
--- /dev/null
+++ b/voice_engine/main/test/auto_test/voe_stress_test.h
@@ -0,0 +1,50 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_STRESS_TEST_H
+#define WEBRTC_VOICE_ENGINE_VOE_STRESS_TEST_H
+
+namespace webrtc
+{
+class ThreadWrapper;
+}
+
+using namespace webrtc;
+
+namespace voetest
+{
+
+class VoETestManager;
+
+class VoEStressTest
+{
+public:
+    VoEStressTest(VoETestManager& mgr) : _mgr(mgr), _ptrExtraApiThread(NULL) {};
+    ~VoEStressTest() {};
+    int DoTest();
+
+private:
+    int MenuSelection();
+    int StartStopTest();
+    int CreateDeleteChannelsTest();
+    int MultipleThreadsTest();
+
+    static bool RunExtraApi(void* ptr);
+    bool ProcessExtraApi();
+
+    VoETestManager& _mgr;
+    static const char* _key;
+
+    ThreadWrapper* _ptrExtraApiThread;
+};
+
+}  //  namespace voetest
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_STRESS_TEST_H
diff --git a/voice_engine/main/test/auto_test/voe_test_defines.h b/voice_engine/main/test/auto_test/voe_test_defines.h
new file mode 100644
index 0000000..896110f
--- /dev/null
+++ b/voice_engine/main/test/auto_test/voe_test_defines.h
@@ -0,0 +1,185 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_TEST_DEFINES_H
+#define WEBRTC_VOICE_ENGINE_VOE_TEST_DEFINES_H
+
+// Read WEBRTC_VOICE_ENGINE_XXX_API compiler flags
+#include "engine_configurations.h"
+
+#ifdef ANDROID
+ #include <android/log.h>
+ #define ANDROID_LOG_TAG "VoiceEngine Auto Test"
+ #define TEST_LOG(...) \
+    __android_log_print(ANDROID_LOG_DEBUG, ANDROID_LOG_TAG, __VA_ARGS__)
+ #define TEST_LOG_ERROR(...) \
+    __android_log_print(ANDROID_LOG_ERROR, ANDROID_LOG_TAG, __VA_ARGS__)
+#else
+ #define TEST_LOG printf
+ #define TEST_LOG_ERROR printf
+#endif
+
+// Select the tests to execute, list order below is same as they will be
+// executed. Note that, all settings below will be overrided by sub-API
+// settings in engine_configurations.h.
+#define _TEST_BASE_
+#define _TEST_RTP_RTCP_
+#define _TEST_HARDWARE_
+#define _TEST_CODEC_
+#define _TEST_DTMF_
+#define _TEST_VOLUME_
+#define _TEST_AUDIO_PROCESSING_
+#define _TEST_FILE_
+#define _TEST_NETWORK_
+#define _TEST_CALL_REPORT_
+#define _TEST_VIDEO_SYNC_
+#define _TEST_ENCRYPT_
+#define _TEST_NETEQ_STATS_
+#define _TEST_XMEDIA_
+
+#define TESTED_AUDIO_LAYER kAudioPlatformDefault
+//#define TESTED_AUDIO_LAYER kAudioLinuxPulse
+
+// #define _ENABLE_VISUAL_LEAK_DETECTOR_ // Enables VLD to find memory leaks
+// #define _ENABLE_IPV6_TESTS_      // Enables IPv6 tests in network xtest
+// #define _USE_EXTENDED_TRACE_     // Adds unique trace files for extended test
+// #define _MEMORY_TEST_
+
+// Enable this when running instrumentation of some kind to exclude tests
+// that will not pass due to slowed down execution.
+// #define _INSTRUMENTATION_TESTING_
+
+// Exclude (override) API tests given preprocessor settings in
+// engine_configurations.h
+#ifndef WEBRTC_VOICE_ENGINE_CODEC_API
+#undef _TEST_CODEC_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
+#undef _TEST_VOLUME_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_DTMF_API
+#undef _TEST_DTMF_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
+#undef _TEST_RTP_RTCP_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
+#undef _TEST_AUDIO_PROCESSING_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_FILE_API
+#undef _TEST_FILE_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
+#undef _TEST_VIDEO_SYNC_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
+#undef _TEST_ENCRYPT_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_HARDWARE_API
+#undef _TEST_HARDWARE_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
+#undef _TEST_XMEDIA_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_NETWORK_API
+#undef _TEST_NETWORK_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
+#undef _TEST_NETEQ_STATS_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
+#undef _TEST_CALL_REPORT_
+#endif
+
+// Some parts can cause problems while running Insure
+#ifdef __INSURE__
+#define _INSTRUMENTATION_TESTING_
+#undef WEBRTC_SRTP
+#endif
+
+// Time in ms to test each packet size for each codec
+#define CODEC_TEST_TIME 400
+
+#define MARK() TEST_LOG("."); fflush(NULL);             // Add test marker
+#define ANL() TEST_LOG("\n")                            // Add New Line
+#define AOK() TEST_LOG("[Test is OK]"); fflush(NULL);   // Add OK
+
+#if defined(_WIN32)
+ #define PAUSE                                      \
+    {                                               \
+        TEST_LOG("Press any key to continue...");   \
+        _getch();                                   \
+        TEST_LOG("\n");                             \
+    }
+#else
+ #define PAUSE                                          \
+    {                                                   \
+        TEST_LOG("Continuing (pause not supported)\n"); \
+    }
+#endif
+
+#define TEST(s)                         \
+    {                                   \
+        TEST_LOG("Testing: %s", #s);    \
+    }                                   \
+
+#ifdef _INSTRUMENTATION_TESTING_
+// Don't stop execution if error occurs
+#define TEST_MUSTPASS(expr)                                               \
+    {                                                                     \
+        if ((expr))                                                       \
+        {                                                                 \
+            TEST_LOG_ERROR("Error at line:%i, %s \n",__LINE__, #expr);    \
+            TEST_LOG_ERROR("Error code: %i\n",base->LastError());  \
+        }                                                                 \
+    }
+#define TEST_ERROR(code)                                                \
+    {                                                                   \
+        int err = base->LastError();                             \
+        if (err != code)                                                \
+        {                                                               \
+            TEST_LOG_ERROR("Invalid error code (%d, should be %d) at line %d\n",
+                code, err, __LINE__); \
+        }                                                               \
+    }
+#else
+#define TEST_MUSTPASS(expr)                                              \
+    {                                                                    \
+        if ((expr))                                                      \
+        {                                                                \
+            TEST_LOG_ERROR("\nError at line:%i, %s \n",__LINE__, #expr); \
+            TEST_LOG_ERROR("Error code: %i\n",base->LastError()); \
+            PAUSE                                                        \
+            return -1;                                               \
+        }                                                                \
+    }
+#define TEST_ERROR(code)											\
+    {																\
+        int err = base->LastError();                                    \
+        if (err != code)                                                \
+        {                                                               \
+            TEST_LOG_ERROR("Invalid error code (%d, should be %d) at line %d\n", err, code, __LINE__);                        \
+            PAUSE                                                       \
+            return -1;                                                  \
+        }															\
+    }
+#endif  // #ifdef _INSTRUMENTATION_TESTING_
+
+#define EXCLUDE()                                                   \
+    {                                                               \
+        TEST_LOG("\n>>> Excluding test at line: %i <<<\n\n",__LINE__);  \
+    }
+
+#define INCOMPLETE()                                                \
+    {                                                               \
+        TEST_LOG("\n>>> Incomplete test at line: %i <<<\n\n",__LINE__);  \
+    }
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_TEST_DEFINES_H
diff --git a/voice_engine/main/test/auto_test/voe_test_interface.h b/voice_engine/main/test/auto_test/voe_test_interface.h
new file mode 100644
index 0000000..88a3841
--- /dev/null
+++ b/voice_engine/main/test/auto_test/voe_test_interface.h
@@ -0,0 +1,97 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ *  Interface for starting test
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_TEST_INTERFACE_H
+#define WEBRTC_VOICE_ENGINE_VOE_TEST_INTERFACE_H
+
+#include "common_types.h"
+
+namespace webrtc
+{
+class CriticalSectionWrapper;
+class EventWrapper;
+class ThreadWrapper;
+class VoENetwork;
+}
+
+using namespace webrtc;
+
+namespace voetest {
+
+// TestType enumerator
+enum TestType
+{
+    Invalid = -1,
+    Standard = 0,
+    Extended = 1,
+    Stress   = 2,
+    Unit     = 3,
+    CPU      = 4
+};
+
+// ExtendedSelection enumerator
+enum ExtendedSelection
+{
+    XSEL_Invalid = -1,
+    XSEL_None = 0,
+    XSEL_All,
+    XSEL_Base,
+    XSEL_CallReport,
+    XSEL_Codec,
+    XSEL_DTMF,
+    XSEL_Encryption,
+    XSEL_ExternalMedia,
+    XSEL_File,
+    XSEL_Hardware,
+    XSEL_NetEqStats,
+    XSEL_Network,
+    XSEL_RTP_RTCP,
+    XSEL_VideoSync,
+    XSEL_VolumeControl,
+    XSEL_AudioProcessing,
+};
+
+// ----------------------------------------------------------------------------
+//  External transport (Transport)
+// ----------------------------------------------------------------------------
+
+class my_transportation : public Transport
+{
+public:
+    my_transportation(VoENetwork* ptr);
+    virtual ~my_transportation();
+    VoENetwork* myNetw;
+    int SendPacket(int channel,const void *data,int len);
+    int SendRTCPPacket(int channel, const void *data, int len);
+    void SetDelayStatus(bool enabled, unsigned int delayInMs = 100);
+private:
+    static bool Run(void* ptr);
+    bool Process();
+private:
+    ThreadWrapper* _thread;
+    CriticalSectionWrapper* _lock;
+    EventWrapper* _event;
+private:
+    unsigned char _packetBuffer[1612];
+    int _length;
+    int _channel;
+    bool _delayIsEnabled;
+    int _delayTimeInMs;
+};
+
+// Main test function
+int runAutoTest(TestType testType, ExtendedSelection extendedSel);
+
+}  //  namespace voetest
+#endif // WEBRTC_VOICE_ENGINE_VOE_TEST_INTERFACE_H
diff --git a/voice_engine/main/test/auto_test/voe_unit_test.cc b/voice_engine/main/test/auto_test/voe_unit_test.cc
new file mode 100644
index 0000000..c41575e
--- /dev/null
+++ b/voice_engine/main/test/auto_test/voe_unit_test.cc
@@ -0,0 +1,1152 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+#include <cassert>
+#if defined(_WIN32)
+ #include <conio.h>
+#endif
+
+#include "voe_unit_test.h"
+
+#include "../../source/voice_engine_defines.h"
+#include "thread_wrapper.h"
+
+using namespace webrtc;
+
+namespace voetest {
+
+#ifdef MAC_IPHONE
+extern char micFile[256];
+#else
+extern const char* micFile;
+#endif
+
+#define CHECK(expr)                                             \
+    if (expr)                                                   \
+    {                                                           \
+        printf("Error at line: %i, %s \n", __LINE__, #expr);    \
+        printf("Error code: %i \n", base->LastError());  \
+		PAUSE												    \
+        return -1;                                              \
+    }
+
+extern char* GetFilename(char* filename);
+extern const char* GetFilename(const char* filename);
+extern int GetResource(char* resource, char* dest, int destLen);
+extern char* GetResource(char* resource);
+extern const char* GetResource(const char* resource);
+
+const char* VoEUnitTest::_key = "====YUtFWRAAAAADBtIHgAAAAAEAAAAcAAAAAQBHU0dsb2"
+    "JhbCBJUCBTb3VuZAAC\nAAAAIwAAAExpY2Vuc2VkIHRvIE5vcnRlbCBOZXR3cm9rcwAAAAAxA"
+    "AAAZxZ7/u0M\niFYyTwSwko5Uutf7mh8S0O4rYZYTFidbzQeuGonuL17F/2oD/2pfDp3jL4Rf"
+    "3z/A\nnlJsEJgEtASkDNFuwLILjGY0pzjjAYQp3pCl6z6k2MtE06AirdjGLYCjENpq/opX\nO"
+    "rs3sIuwdYK5va/aFcsjBDmlsGCUM48RDYG9s23bIHYafXUC4ofOaubbZPWiPTmL\nEVJ8WH4F"
+    "9pgNjALc14oJXfON7r/3\n=EsLx";
+
+// ----------------------------------------------------------------------------
+//                       >>>  R E A D M E  F I R S T <<<
+// ----------------------------------------------------------------------------
+
+// 1) The user must ensure that the following codecs are included in VoE:
+//
+// - L16
+// - G.729
+// - G.722.1C
+
+// 2) It is also possible to modify the simulation time for each indivifual test
+//
+const int dTBetweenEachTest = 4000;
+
+// ----------------------------------------------------------------------------
+//                                  Encrypt
+// ----------------------------------------------------------------------------
+
+void VoEUnitTest::encrypt(int channel_no,
+                          unsigned char * in_data,
+                          unsigned char * out_data,
+                          int bytes_in,
+                          int * bytes_out)
+{
+    int i;
+
+    if (!_extOnOff)
+    {
+        // no stereo emulation <=> pure bypass
+        for (i = 0; i < bytes_in; i++)
+            out_data[i] = in_data[i];
+        *bytes_out = bytes_in;
+    }
+    else if (_extOnOff && (_extBitsPerSample == 16))
+    {
+        // stereo emulation (sample based, 2 bytes per sample)
+
+        const int nBytesPayload = bytes_in-12;
+
+        // RTP header (first 12 bytes)
+        memcpy(out_data, in_data, 12);
+
+        // skip RTP header
+        short* ptrIn = (short*) &in_data[12];
+        short* ptrOut = (short*) &out_data[12];
+
+        // network byte order
+        for (i = 0; i < nBytesPayload/2; i++)
+        {
+            // produce two output samples for each input sample
+            *ptrOut++ = *ptrIn; // left sample
+            *ptrOut++ = *ptrIn; // right sample
+            ptrIn++;
+        }
+
+        *bytes_out = 12 + 2*nBytesPayload;
+    }
+    else if (_extOnOff && (_extBitsPerSample == 8))
+    {
+        // stereo emulation (sample based, 1 bytes per sample)
+
+        const int nBytesPayload = bytes_in-12;
+
+        // RTP header (first 12 bytes)
+        memcpy(out_data, in_data, 12);
+
+        // skip RTP header
+        unsigned char* ptrIn = (unsigned char*) &in_data[12];
+        unsigned char* ptrOut = (unsigned char*) &out_data[12];
+
+        // network byte order
+        for (i = 0; i < nBytesPayload; i++)
+        {
+            // produce two output samples for each input sample
+            *ptrOut++ = *ptrIn; // left sample
+            *ptrOut++ = *ptrIn; // right sample
+            ptrIn++;
+        }
+
+        *bytes_out = 12 + 2*nBytesPayload;
+    }
+    else if (_extOnOff && (_extBitsPerSample == -1))
+    {
+        // stereo emulation (frame based)
+
+        const int nBytesPayload = bytes_in-12;
+
+        // RTP header (first 12 bytes)
+        memcpy(out_data, in_data, 12);
+
+        // skip RTP header
+        unsigned char* ptrIn = (unsigned char*) &in_data[12];
+        unsigned char* ptrOut = (unsigned char*) &out_data[12];
+
+        // left channel
+        for (i = 0; i < nBytesPayload; i++)
+        {
+            *ptrOut++ = *ptrIn++;
+        }
+
+        ptrIn = (unsigned char*) &in_data[12];
+
+        // right channel
+        for (i = 0; i < nBytesPayload; i++)
+        {
+            *ptrOut++ = *ptrIn++;
+        }
+
+        *bytes_out = 12 + 2*nBytesPayload;
+    }
+}
+
+void VoEUnitTest::decrypt(int channel_no,
+                          unsigned char * in_data,
+                          unsigned char * out_data,
+                          int bytes_in,
+                          int * bytes_out)
+{
+    int i;
+    for (i = 0; i < bytes_in; i++)
+        out_data[i] = in_data[i];
+    *bytes_out = bytes_in;
+}
+    
+void VoEUnitTest::encrypt_rtcp(int channel_no,
+                               unsigned char * in_data,
+                               unsigned char * out_data,
+                               int bytes_in,
+                               int * bytes_out)
+{
+    int i;
+    for (i = 0; i < bytes_in; i++)
+        out_data[i] = in_data[i];
+    *bytes_out = bytes_in;
+}
+
+void VoEUnitTest::decrypt_rtcp(int channel_no,
+                               unsigned char * in_data,
+                               unsigned char * out_data,
+                               int bytes_in,
+                               int * bytes_out)
+{
+    int i;
+    for (i = 0; i < bytes_in; i++)
+        out_data[i] = in_data[i];
+    *bytes_out = bytes_in;
+}
+
+void VoEUnitTest::SetStereoExternalEncryption(int channel,
+                                              bool onOff,
+                                              int bitsPerSample)
+{
+    _extOnOff = onOff;
+    _extChannel = channel;
+    _extBitsPerSample = bitsPerSample;
+}
+
+// VoEVEMediaProcess
+MyMedia mpobj;
+
+// ----------------------------------------------------------------------------
+//                               VoEUnitTest
+// ----------------------------------------------------------------------------
+
+VoEUnitTest::VoEUnitTest(VoETestManager& mgr) :
+    _mgr(mgr),
+    _extOnOff(false),
+    _extBitsPerSample(-1)
+{
+    for (int i = 0; i < 32; i++)
+    {
+        _listening[i] = false;
+        _playing[i] = false;
+        _sending[i] = false;
+    }
+}
+
+// ----------------------------------------------------------------------------
+//  DoTest
+// ----------------------------------------------------------------------------
+
+int VoEUnitTest::DoTest()
+{
+    int test(-1);
+    int ret(0);
+    while ((test != 0) && (ret != -1))
+    {
+        test = MenuSelection();
+        switch (test)
+        {
+            case 0:
+                // Quit stress test
+                break;
+            case 1:
+                ret = MixerTest();
+                break;
+            case 2:
+                ret = MixerTest();
+                break;
+            default:
+                // Should not be possible
+                printf("Invalid selection! (Test code error)\n");
+                assert(false);
+        } // switch
+    } // while
+
+    return ret;
+}
+
+// ----------------------------------------------------------------------------
+//  MenuSelection
+// ----------------------------------------------------------------------------
+
+int VoEUnitTest::MenuSelection()
+{
+    printf("------------------------------------------------\n");
+    printf("Select unit test\n\n");
+    printf(" (0)  Quit\n");
+    printf(" (1)  All\n");
+    printf("- - - - - - - - - - - - - - - - - - - - - - - - \n");
+    printf(" (2)  Mixer\n");
+
+    const int maxMenuSelection = 2;
+    int selection(-1);
+    int dummy(0);
+
+    while ((selection < 0) || (selection > maxMenuSelection))
+    {
+        printf("\n: ");
+        dummy = scanf("%d", &selection);
+        if ((selection < 0) || (selection > maxMenuSelection))
+        {
+            printf("Invalid selection!\n");
+        }
+    }
+
+    return selection;
+}
+
+// ----------------------------------------------------------------------------
+//  StartMedia
+// ----------------------------------------------------------------------------
+
+int VoEUnitTest::StartMedia(int channel, 
+                            int rtpPort,
+                            bool listen,
+                            bool playout,
+                            bool send,
+                            bool fileAsMic,
+                            bool localFile)
+{
+    VoEBase* base = _mgr.BasePtr();
+    VoEFile* file = _mgr.FilePtr();
+    VoECodec* codec = _mgr.CodecPtr();
+
+    _listening[channel] = false;
+    _playing[channel] = false;
+    _sending[channel] = false;
+
+    CHECK(base->SetLocalReceiver(channel, rtpPort));
+    CHECK(base->SetSendDestination(channel, rtpPort, "127.0.0.1"));
+    if (listen)
+    {
+        _listening[channel] = true;
+        CHECK(base->StartReceive(channel));
+    }
+    if (playout)
+    {
+        _playing[channel] = true;
+        CHECK(base->StartPlayout(channel));
+    }
+    if (send)
+    {
+        _sending[channel] = true;
+        CHECK(base->StartSend(channel));
+    }
+    if (fileAsMic)
+    {
+        // play mic as file, mix with microphone to ensure that SWB can be
+        //tested as well
+        const bool mixWithMic(true);
+        CHECK(file->StartPlayingFileAsMicrophone(channel, micFile,
+                                                 true, mixWithMic));
+    }
+    if (localFile)
+    {
+        CHECK(file->StartPlayingFileLocally(channel,
+                                            GetResource("audio_short16.pcm"),
+                                            false,
+                                            kFileFormatPcm16kHzFile));
+    }
+
+    return 0;
+}
+
+// ----------------------------------------------------------------------------
+//  StopMedia
+// ----------------------------------------------------------------------------
+
+int VoEUnitTest::StopMedia(int channel)
+{
+    VoEBase* base = _mgr.BasePtr();
+    VoEFile* file = _mgr.FilePtr();
+
+    if (file->IsPlayingFileAsMicrophone(channel))
+    {
+        CHECK(file->StopPlayingFileAsMicrophone(channel));
+    }
+    if (file->IsPlayingFileLocally(channel))
+    {
+        CHECK(file->StopPlayingFileLocally(channel));
+    }
+    if (_listening[channel])
+    {
+        _listening[channel] = false;
+        CHECK(base->StopReceive(channel));
+    }
+    if (_playing[channel])
+    {
+        _playing[channel] = false;
+        CHECK(base->StopPlayout(channel));
+    }
+    if (_sending[channel])
+    {
+        _sending[channel] = false;
+        CHECK(base->StopSend(channel));
+    }
+
+    return 0;
+}
+
+void VoEUnitTest::Sleep(unsigned int timeMillisec, bool addMarker)
+{
+    if (addMarker)
+    {
+        float dtSec = (float) ((float) timeMillisec / 1000.0);
+        printf("[dT=%.1f]", dtSec);
+        fflush(NULL);
+    }
+    ::Sleep(timeMillisec);
+}
+
+void VoEUnitTest::Wait()
+{
+#if defined(_WIN32)
+    printf("\npress any key..."); fflush(NULL);
+    _getch();
+#endif
+}
+
+void VoEUnitTest::Test(const char* msg)
+{
+    printf(msg); fflush(NULL);
+    printf("\n"); fflush(NULL);
+}
+
+int VoEUnitTest::MixerTest()
+{
+    // Set up test parameters first
+    //
+    const int testTime(dTBetweenEachTest);
+
+    printf("\n\n================================================\n");
+    printf(" Mixer Unit Test\n");
+    printf("================================================\n\n");
+
+    // Get sub-API pointers
+    //
+    VoEBase* base = _mgr.BasePtr();
+    VoECodec* codec = _mgr.CodecPtr();
+    VoEFile* file = _mgr.FilePtr();
+    VoEVolumeControl* volume = _mgr.VolumeControlPtr();
+    VoEEncryption* encrypt = _mgr.EncryptionPtr();
+    VoEDtmf* dtmf = _mgr.DtmfPtr();
+    VoEExternalMedia* xmedia = _mgr.ExternalMediaPtr();
+
+    // Set trace
+    //
+    VoiceEngine::SetTraceFile(GetFilename("UnitTest_Mixer_trace.txt"));
+    VoiceEngine::SetTraceFilter(kTraceStateInfo |
+                                kTraceWarning |
+                                kTraceError |
+                                kTraceCritical |
+                                kTraceApiCall |
+                                kTraceMemory |
+                                kTraceInfo);
+
+    // Init
+    //
+    CHECK(base->Init());
+
+    // 8 kHz
+    CodecInst l16_8 = { 123, "L16", 8000, 160, 1, 128000 };
+    CodecInst pcmu_8 = { 0, "pcmu", 8000, 160, 1, 64000 };
+    CodecInst g729_8 = { 18, "g729", 8000, 160, 1, 8000 };
+
+    // 16 kHz
+    CodecInst ipcmwb_16 = { 97, "ipcmwb", 16000, 320, 1, 80000 };
+    CodecInst l16_16 = { 124, "L16", 16000, 320, 1, 256000 };
+
+    // 32 kHz
+    CodecInst l16_32 = { 125, "L16", 32000, 320, 1, 512000 };
+    CodecInst g722_1c_32 = { 126, "G7221", 32000, 640, 1, 32000 };// 20ms@32kHz
+
+    // ------------------------
+    // Verify mixing frequency
+    // ------------------------
+
+    base->CreateChannel();
+
+    Test(">> Verify correct mixing frequency:\n");
+
+    Test("(ch 0) Sending file at 8kHz <=> mixing at 8kHz...");
+    CHECK(StartMedia(0, 12345, true, true, true, true, false));
+    Sleep(testTime);
+
+    Test("(ch 0) Sending file at 16kHz <=> mixing at 16kHz...");
+    CHECK(codec->SetSendCodec(0, ipcmwb_16));
+    Sleep(testTime);
+
+    Test("(ch 0) Sending speech at 32kHz <=> mixing at 32Hz...");
+    CHECK(codec->SetSendCodec(0, l16_32));
+    Sleep(testTime);
+
+    Test("(ch 0) Sending file at 8kHz <=> mixing at 8kHz...");
+    CHECK(codec->SetSendCodec(0, pcmu_8));
+    Sleep(testTime);
+
+    Test("(ch 0) Playing 16kHz file locally <=> mixing at 16kHz...");
+    CHECK(file->StartPlayingFileLocally(0, GetResource("audio_long16.pcm"),
+                                        false, kFileFormatPcm16kHzFile));
+    Sleep(testTime);
+    CHECK(file->StopPlayingFileLocally(0));
+
+    base->CreateChannel();
+
+    Test("(ch 0) Sending file at 8kHz <=> mixing at 8kHz...");
+    CHECK(codec->SetSendCodec(0, pcmu_8));
+    Sleep(testTime);
+
+    Test("(ch 0) Sending speech at 32kHz <=> mixing at 32Hz...");
+    CHECK(codec->SetSendCodec(0, l16_32));
+    Sleep(testTime);
+
+    Test("(ch 1) Playing 16kHz file locally <=> mixing at 32kHz...");
+    CHECK(StartMedia(1, 54321, false, true, false, false, true));
+    Sleep(testTime);
+
+    CHECK(StopMedia(1));
+    CHECK(StopMedia(0));
+
+    base->DeleteChannel(1);
+    base->DeleteChannel(0);
+    ANL();
+
+    // -------------------------
+    // Verify stereo mode mixing
+    // -------------------------
+
+    base->CreateChannel();
+    base->CreateChannel();
+
+    // SetOutputVolumePan
+    //
+    // Ensure that all cases sound OK and that the mixer changes state between
+    // mono and stereo as it should. A debugger is required to trace the state
+    // transitions.
+
+    Test(">> Verify correct mixing in stereo using SetOutputVolumePan():\n");
+
+    Test("(ch 0) Playing 16kHz file locally <=> mixing in mono @ 16kHz...");
+    CHECK(StartMedia(0, 12345, false, true, false, false, true));
+    Sleep(testTime);
+    Test("Panning volume to the left <=> mixing in stereo @ 16kHz...");
+    CHECK(volume->SetOutputVolumePan(-1, 1.0, 0.0));
+    Sleep(testTime);
+    Test("Panning volume to the right <=> mixing in stereo @ 16kHz...");
+    CHECK(volume->SetOutputVolumePan(-1, 0.0, 1.0));
+    Sleep(testTime);
+    Test("Back to center volume again <=> mixing in mono @ 16kHz...");
+    CHECK(volume->SetOutputVolumePan(-1, 1.0, 1.0));
+    Sleep(testTime);
+    Test("(ch 1) Playing 16kHz file locally <=> mixing in mono @ 16kHz...");
+    CHECK(StartMedia(1, 54321, false, true, false, false, true));
+    Sleep(testTime);
+    Test("Panning volume to the left <=> mixing in stereo @ 16kHz...");
+    CHECK(volume->SetOutputVolumePan(-1, 1.0, 0.0));
+    Sleep(testTime);
+    Test("Back to center volume again <=> mixing in mono @ 16kHz...");
+    CHECK(volume->SetOutputVolumePan(-1, 1.0, 1.0));
+    Sleep(testTime);
+    Test("(ch 1) Stopped playing file <=> mixing in mono @ 16kHz...");
+    CHECK(StopMedia(1));
+    Sleep(testTime);
+    CHECK(StopMedia(0));
+    Test("(ch 0) Sending file at 8kHz <=> mixing at 8kHz...");
+    CHECK(StartMedia(0, 12345, true, true, true, true, false));
+    Sleep(testTime);
+    Test("(ch 0) Sending speech at 32kHz <=> mixing at 32kHz...");
+    CHECK(codec->SetSendCodec(0, l16_32));
+    Sleep(testTime);
+    Test("Panning volume to the right <=> mixing in stereo @ 32kHz...");
+    CHECK(volume->SetOutputVolumePan(-1, 0.0, 1.0));
+    Sleep(testTime);
+    Test("Back to center volume again <=> mixing in mono @ 32kHz...");
+    CHECK(volume->SetOutputVolumePan(-1, 1.0, 1.0));
+    Sleep(testTime);
+    CHECK(StopMedia(0));
+    ANL();
+
+    base->DeleteChannel(0);
+    base->DeleteChannel(1);
+
+    // SetChannelOutputVolumePan
+    //
+    // Ensure that all cases sound OK and that the mixer changes state between
+    // mono and stereo as it should. A debugger is required to trace the state
+    // transitions.
+
+    base->CreateChannel();
+    base->CreateChannel();
+
+    Test(">> Verify correct mixing in stereo using"
+        " SetChannelOutputVolumePan():\n");
+
+    Test("(ch 0) Playing 16kHz file locally <=> mixing in mono @ 16kHz...");
+    CHECK(StartMedia(0, 12345, false, true, false, false, true));
+    Sleep(testTime);
+    Test("(ch 0) Panning channel volume to the left <=> mixing in stereo @ "
+        "16kHz...");
+    CHECK(volume->SetOutputVolumePan(0, 1.0, 0.0));
+    Sleep(testTime);
+    Test("(ch 0) Panning channel volume to the right <=> mixing in stereo"
+        " @ 16kHz...");
+    CHECK(volume->SetOutputVolumePan(0, 0.0, 1.0));
+    Sleep(testTime);
+    Test("(ch 0) Back to center volume again <=> mixing in mono @"
+        " 16kHz...");
+    CHECK(volume->SetOutputVolumePan(0, 1.0, 1.0));
+    Sleep(testTime);
+    Test("(ch 1) Playing 16kHz file locally <=> mixing in mono @ 16kHz...");
+    CHECK(StartMedia(1, 54321, false, true, false, false, true));
+    Sleep(testTime);
+    Test("(ch 1) Panning channel volume to the left <=> mixing in stereo "
+        "@ 16kHz...");
+    CHECK(volume->SetOutputVolumePan(1, 1.0, 0.0));
+    Sleep(testTime);
+    Test("(ch 1) Back to center volume again <=> mixing in mono @ 16kHz...");
+    CHECK(volume->SetOutputVolumePan(1, 1.0, 1.0));
+    Sleep(testTime);
+    Test("(ch 1) Stopped playing file <=> mixing in mono @ 16kHz...");
+    CHECK(StopMedia(1));
+    Sleep(testTime);
+    CHECK(StopMedia(0));
+    ANL();
+
+    base->DeleteChannel(0);
+    base->DeleteChannel(1);
+
+    // Emulate stereo-encoding using Encryption
+    //
+    // Modify the transmitted RTP stream by using external encryption.
+    // Supports frame based and sample based "stereo-encoding schemes".
+
+    base->CreateChannel();
+
+    Test(">> Verify correct mixing in stereo using emulated stereo input:\n");
+
+    // enable external encryption
+    CHECK(encrypt->RegisterExternalEncryption(0, *this));
+    Test("(ch 0) External Encryption is now enabled:");
+
+    Test("(ch 0) Sending file at 8kHz <=> mixing in mono @ 8kHz...");
+    CHECK(StartMedia(0, 12345, true, true, true, true, false));
+    Sleep(testTime);
+
+    // switch to 16kHz (L16) sending codec
+    CHECK(codec->SetSendCodec(0, l16_16));
+    Test("(ch 0) Sending file at 16kHz (L16) <=> mixing in mono @ 16kHz...");
+    Sleep(testTime);
+
+    // register L16 as 2-channel codec on receiving side =>
+    // should sound bad since RTP module splits all received packets in half
+    // (sample based)
+    CHECK(base->StopPlayout(0));
+    CHECK(base->StopReceive(0));
+    l16_16.channels = 2;
+    CHECK(codec->SetRecPayloadType(0, l16_16));
+    CHECK(base->StartReceive(0));
+    CHECK(base->StartPlayout(0));
+    Test("(ch 0) 16kHz L16 is now registered as 2-channel codec on RX side => "
+        "should sound bad...");
+    Sleep(testTime);
+
+    // emulate sample-based stereo encoding
+    Test("(ch 0) Emulate sample-based stereo encoding on sending side => "
+        "should sound OK...");
+    SetStereoExternalEncryption(0, true, 16);
+    Sleep(testTime);
+    Test("(ch 0) Stop emulating sample-based stereo encoding on sending side =>"
+        " should sound bad...");
+    SetStereoExternalEncryption(0, false, 16);
+    Sleep(testTime);
+    Test("(ch 0) Emulate sample-based stereo encoding on sending side => "
+        "should sound OK...");
+    SetStereoExternalEncryption(0, true, 16);
+    Sleep(testTime);
+
+    // switch to 32kHz (L16) sending codec and disable stereo encoding
+    CHECK(codec->SetSendCodec(0, l16_32));
+    SetStereoExternalEncryption(0, false, 16);
+    Test("(ch 0) Sending file and spech at 32kHz (L16) <=> mixing in mono @ "
+        "32kHz...");
+    Sleep(testTime);
+
+    // register L16 32kHz as 2-channel codec on receiving side 
+    CHECK(base->StopPlayout(0));
+    CHECK(base->StopReceive(0));
+    l16_32.channels = 2;
+    CHECK(codec->SetRecPayloadType(0, l16_32));
+    CHECK(base->StartReceive(0));
+    CHECK(base->StartPlayout(0));
+    Test("(ch 0) 32kHz L16 is now registered as 2-channel codec on RX side =>"
+        " should sound bad...");
+    Sleep(testTime);
+
+    // emulate sample-based stereo encoding
+    Test("(ch 0) Emulate sample-based stereo encoding on sending side =>"
+        " should sound OK...");
+    SetStereoExternalEncryption(0, true, 16);
+    Sleep(testTime);
+
+    StopMedia(0);
+    l16_32.channels = 1;
+
+    // disable external encryption
+    CHECK(encrypt->DeRegisterExternalEncryption(0));
+    ANL();
+
+    base->DeleteChannel(0);
+
+    // ------------------
+    // Verify put-on-hold
+    // ------------------
+
+    base->CreateChannel();
+    base->CreateChannel();
+
+    Test(">> Verify put-on-hold functionality:\n");
+
+    Test("(ch 0) Sending at 8kHz...");
+    CHECK(StartMedia(0, 12345, true, true, true, true, false));
+    Sleep(testTime);
+
+    CHECK(base->SetOnHoldStatus(0, true, kHoldPlayOnly));
+    Test("(ch 0) Playout is now on hold...");
+    Sleep(testTime);
+    CHECK(base->SetOnHoldStatus(0, false, kHoldPlayOnly));
+    Test("(ch 0) Playout is now enabled again...");
+    Sleep(testTime);
+
+    Test("(ch 0) Sending at 16kHz...");
+    l16_16.channels = 1;
+    CHECK(codec->SetSendCodec(0, l16_16));
+    Sleep(testTime);
+
+    CHECK(base->SetOnHoldStatus(0, true, kHoldPlayOnly));
+    Test("(ch 0) Playout is now on hold...");
+    Sleep(testTime);
+    CHECK(base->SetOnHoldStatus(0, false, kHoldPlayOnly));
+    Test("(ch 0) Playout is now enabled again...");
+    Sleep(testTime);
+
+    Test("(ch 0) Perform minor panning to the left to force mixing in"
+        " stereo...");
+    CHECK(volume->SetOutputVolumePan(0, (float)1.0, (float)0.7));
+    Sleep(testTime);
+
+    CHECK(base->SetOnHoldStatus(0, true, kHoldPlayOnly));
+    Test("(ch 0) Playout is now on hold...");
+    Sleep(testTime);
+    CHECK(base->SetOnHoldStatus(0, false, kHoldPlayOnly));
+    Test("(ch 0) Playout is now enabled again...");
+    Sleep(testTime);
+
+    Test("(ch 0) Back to center volume again...");
+    CHECK(volume->SetOutputVolumePan(0, 1.0, 1.0));
+    Sleep(testTime);
+
+    Test("(ch 1) Add 16kHz local file to the mixer...");
+    CHECK(StartMedia(1, 54321, false, true, false, false, true));
+    Sleep(testTime);
+
+    CHECK(base->SetOnHoldStatus(0, true, kHoldPlayOnly));
+    Test("(ch 0) Playout is now on hold...");
+    Sleep(testTime);
+    CHECK(base->SetOnHoldStatus(1, true, kHoldPlayOnly));
+    Test("(ch 1) Playout is now on hold => should be silent...");
+    Sleep(testTime);
+    CHECK(base->SetOnHoldStatus(0, false, kHoldPlayOnly));
+    Test("(ch 0) Playout is now enabled again...");
+    CHECK(base->SetOnHoldStatus(1, false, kHoldPlayOnly));
+    Test("(ch 1) Playout is now enabled again...");
+    Sleep(testTime);
+    StopMedia(1);
+    Test("(ch 1) Stopped playing file...");
+    Sleep(testTime);
+    StopMedia(0);
+    ANL();
+
+    base->DeleteChannel(0);
+    base->DeleteChannel(1);
+
+    // -----------------------------------
+    // Verify recording of playout to file
+    // -----------------------------------
+
+    // StartRecordingPlayout
+    //
+    // Verify that the correct set of signals is recorded in the mixer.
+    // Record each channel and all channels (-1) to ensure that post and pre
+    // mixing recording works.
+
+    base->CreateChannel();
+    base->CreateChannel();
+
+    Test(">> Verify file-recording functionality:\n");
+
+    Test("(ch 0) Sending at 8kHz...");
+    CHECK(StartMedia(0, 12345, true, true, true, true, false));
+    Sleep(testTime);
+
+    Test("(ch 0) Recording of playout to 16kHz PCM file...");
+    CHECK(file->StartRecordingPlayout(
+        0, GetFilename("RecordedPlayout16kHz.pcm"), NULL));
+    Sleep(testTime);
+    CHECK(file->StopRecordingPlayout(0));
+
+    Test("(ch 0) Playing out the recorded file...");
+    CHECK(volume->SetInputMute(0, true));
+    CHECK(file->StartPlayingFileLocally(
+        0, GetFilename("RecordedPlayout16kHz.pcm")));
+    Sleep(testTime);
+    CHECK(file->StopPlayingFileLocally(0));
+    CHECK(volume->SetInputMute(0, false));
+
+    CHECK(codec->SetSendCodec(0, l16_16));
+    Test("(ch 0) Sending at 16kHz (L16)...");
+    Sleep(testTime);
+
+    Test("(ch 0) Recording of playout to 16kHz PCM file...");
+    CHECK(file->StartRecordingPlayout(
+        0, GetFilename("RecordedPlayout16kHz.pcm"), NULL));
+    Sleep(testTime);
+    CHECK(file->StopRecordingPlayout(0));
+
+    Test("(ch 0) Playing out the recorded file...");
+    CHECK(volume->SetInputMute(0, true));
+    CHECK(file->StartPlayingFileLocally(
+        0, GetFilename("RecordedPlayout16kHz.pcm")));
+    Sleep(testTime);
+    CHECK(file->StopPlayingFileLocally(0));
+    CHECK(volume->SetInputMute(0, false));
+
+    CHECK(codec->SetSendCodec(0, l16_32));
+    Test("(ch 0) Sending at 32kHz (L16)...");
+    Sleep(testTime);
+
+    Test("(ch 0) Recording of playout to 16kHz PCM file...");
+    CHECK(file->StartRecordingPlayout(
+        0, GetFilename("RecordedPlayout16kHz.pcm"), NULL));
+    Sleep(testTime);
+    CHECK(file->StopRecordingPlayout(0));
+
+    Test("(ch 0) Playing out the recorded file...");
+    CHECK(volume->SetInputMute(0, true));
+    CHECK(file->StartPlayingFileLocally(
+        0, GetFilename("RecordedPlayout16kHz.pcm")));
+    Sleep(testTime);
+    CHECK(file->StopPlayingFileLocally(0));
+    CHECK(volume->SetInputMute(0, false));
+
+    Test("(ch 0) Sending at 16kHz without file as mic but file added on the"
+        " playout side instead...");
+    CHECK(StopMedia(0));
+    CHECK(StartMedia(0, 12345, false, true, false, false, true));
+    CHECK(codec->SetSendCodec(0, l16_16));
+    Sleep(testTime);
+
+    Test("(ch 0) Recording of playout to 16kHz PCM file...");
+    CHECK(file->StartRecordingPlayout(
+        0, GetFilename("RecordedPlayout16kHz.pcm"), NULL));
+    Sleep(testTime);
+    CHECK(file->StopRecordingPlayout(0));
+    CHECK(file->StopPlayingFileLocally(0));
+
+    Test("(ch 0) Playing out the recorded file...");
+    CHECK(file->StartPlayingFileLocally(
+        0, GetFilename("RecordedPlayout16kHz.pcm")));
+    Sleep(testTime);
+    CHECK(file->StopPlayingFileLocally(0));
+
+    CHECK(StopMedia(0));
+    CHECK(StopMedia(1));
+
+    Test("(ch 0) Sending at 16kHz...");
+    CHECK(StartMedia(0, 12345, true, true, true, false, false));
+    CHECK(codec->SetSendCodec(0, l16_16));
+    Test("(ch 1) Adding playout file...");
+    CHECK(StartMedia(1, 33333, false, true, false, false, true));
+    Sleep(testTime);
+
+    Test("(ch -1) Speak while recording all channels to add mixer input on "
+        "channel 0...");
+    CHECK(file->StartRecordingPlayout(
+        -1, GetFilename("RecordedPlayout16kHz.pcm"), NULL));
+    Sleep(testTime);
+    CHECK(file->StopRecordingPlayout(-1));
+    CHECK(file->StopPlayingFileLocally(1));
+
+    Test("(ch 0) Playing out the recorded file...");
+    CHECK(volume->SetInputMute(0, true));
+    CHECK(file->StartPlayingFileLocally(
+        0, GetFilename("RecordedPlayout16kHz.pcm")));
+    Sleep(testTime);
+    CHECK(file->StopPlayingFileLocally(0));
+    CHECK(volume->SetInputMute(0, false));
+
+    CHECK(StopMedia(0));
+    CHECK(StopMedia(1));
+    ANL();
+
+    // StartRecordingPlayoutStereo
+
+    Test(">> Verify recording of playout in stereo:\n");
+
+    Test("(ch 0) Sending at 32kHz...");
+    CHECK(codec->SetSendCodec(0, l16_16));
+    CHECK(StartMedia(0, 12345, true, true, true, true, false));
+    Sleep(testTime);
+
+    Test("Modified master balance (L=10%%, R=100%%) to force stereo mixing...");
+    CHECK(volume->SetOutputVolumePan(-1, (float)0.1, (float)1.0));
+    Sleep(testTime);
+
+    /*
+     Test("Recording of left and right channel playout to two 16kHz PCM "
+         "files...");
+     file->StartRecordingPlayoutStereo(
+         GetFilename("RecordedPlayout_Left_16kHz.pcm"),
+         GetFilename("RecordedPlayout_Right_16kHz.pcm"), StereoBoth);
+     Sleep(testTime);
+     Test("Back to center volume again...");
+     CHECK(volume->SetOutputVolumePan(-1, (float)1.0, (float)1.0));
+     */
+
+    Test("(ch 0) Playing out the recorded file for the left channel (10%%)...");
+    CHECK(volume->SetInputMute(0, true));
+    CHECK(file->StartPlayingFileLocally(
+        0, GetFilename("RecordedPlayout_Left_16kHz.pcm")));
+    Sleep(testTime);
+    CHECK(file->StopPlayingFileLocally(0));
+
+    Test("(ch 0) Playing out the recorded file for the right channel (100%%) =>"
+        " should sound louder than the left channel...");
+    CHECK(file->StartPlayingFileLocally(
+        0, GetFilename("RecordedPlayout_Right_16kHz.pcm")));
+    Sleep(testTime);
+    CHECK(file->StopPlayingFileLocally(0));
+    CHECK(volume->SetInputMute(0, false));
+
+    base->DeleteChannel(0);
+    base->DeleteChannel(1);
+    ANL();
+
+    // ---------------------------
+    // Verify inserted Dtmf tones
+    // ---------------------------
+
+    Test(">> Verify Dtmf feedback functionality:\n");
+
+    base->CreateChannel();
+
+    for (int i = 0; i < 2; i++)
+    {
+        if (i == 0)
+            Test("Dtmf direct feedback is now enabled...");
+        else
+            Test("Dtmf direct feedback is now disabled...");
+
+        CHECK(dtmf->SetDtmfFeedbackStatus(true, (i==0)));
+
+        Test("(ch 0) Sending at 32kHz using G.722.1C...");
+        CHECK(codec->SetRecPayloadType(0, g722_1c_32));
+        CHECK(codec->SetSendCodec(0, g722_1c_32));
+        CHECK(StartMedia(0, 12345, true, true, true, false, false));
+        Sleep(500);
+
+        Test("(ch 0) Sending outband Dtmf events => ensure that they are added"
+            " to the mixer...");
+        // ensure that receiver will not play out outband Dtmf
+        CHECK(dtmf->SetSendTelephoneEventPayloadType(0, 118));
+        CHECK(dtmf->SendTelephoneEvent(0, 9, true, 390));
+        Sleep(500);
+        CHECK(dtmf->SendTelephoneEvent(0, 1, true, 390));
+        Sleep(500);
+        CHECK(dtmf->SendTelephoneEvent(0, 5, true, 390));
+        Sleep(500);
+        Sleep(testTime - 1500);
+
+        Test("(ch 0) Changing codec to 8kHz PCMU...");
+        CHECK(codec->SetSendCodec(0, pcmu_8));
+        Sleep(500);
+
+        Test("(ch 0) Sending outband Dtmf events => ensure that they are added"
+            " to the mixer...");
+        CHECK(dtmf->SendTelephoneEvent(0, 9, true, 390));
+        Sleep(500);
+        CHECK(dtmf->SendTelephoneEvent(0, 1, true, 390));
+        Sleep(500);
+        CHECK(dtmf->SendTelephoneEvent(0, 5, true, 390));
+        Sleep(500);
+        Sleep(testTime - 1500);
+
+        Test("(ch 0) Changing codec to 16kHz L16...");
+        CHECK(codec->SetSendCodec(0, l16_16));
+        Sleep(500);
+
+        Test("(ch 0) Sending outband Dtmf events => ensure that they are added"
+             " to the mixer...");
+        CHECK(dtmf->SendTelephoneEvent(0, 9, true, 390));
+        Sleep(500);
+        CHECK(dtmf->SendTelephoneEvent(0, 1, true, 390));
+        Sleep(500);
+        CHECK(dtmf->SendTelephoneEvent(0, 5, true, 390));
+        Sleep(500);
+        Sleep(testTime - 1500);
+
+        StopMedia(0);
+        ANL();
+    }
+
+    base->DeleteChannel(0);
+
+    // ---------------------------
+    // Verify external processing
+    // --------------------------
+
+    base->CreateChannel();
+
+    Test(">> Verify external media processing:\n");
+
+    Test("(ch 0) Playing 16kHz file locally <=> mixing in mono @ 16kHz...");
+    CHECK(StartMedia(0, 12345, false, true, false, false, true));
+    Sleep(testTime);
+    Test("Enabling playout external media processing => played audio should "
+        "now be affected");
+    CHECK(xmedia->RegisterExternalMediaProcessing(
+        0, kPlaybackAllChannelsMixed, mpobj));
+    Sleep(testTime);
+    Test("(ch 0) Sending speech at 32kHz <=> mixing at 32kHz...");
+    CHECK(codec->SetSendCodec(0, l16_32));
+    Sleep(testTime);
+    printf("Back to normal again\n");
+    CHECK(xmedia->DeRegisterExternalMediaProcessing(0,
+                                                    kPlaybackAllChannelsMixed));
+    Sleep(testTime);
+    printf("Enabling playout external media processing on ch 0 => "
+        "played audio should now be affected\n");
+    CHECK(xmedia->RegisterExternalMediaProcessing(0, kPlaybackPerChannel,
+                                                  mpobj));
+    Sleep(testTime);
+    Test("Panning volume to the right <=> mixing in stereo @ 32kHz...");
+    CHECK(volume->SetOutputVolumePan(-1, 0.0, 1.0));
+    Sleep(testTime);
+    Test("Back to center volume again <=> mixing in mono @ 32kHz...");
+    CHECK(volume->SetOutputVolumePan(-1, 1.0, 1.0));
+    Sleep(testTime);
+    printf("Back to normal again\n");
+    CHECK(xmedia->DeRegisterExternalMediaProcessing(0, kPlaybackPerChannel));
+    Sleep(testTime);
+    CHECK(StopMedia(0));
+    ANL();
+
+    base->DeleteChannel(0);
+
+    // --------------------------------------------------
+    // Extended tests of emulated stereo encoding schemes
+    // --------------------------------------------------
+
+    CodecInst PCMU;
+    CodecInst G729;
+    CodecInst L16_8;
+    CodecInst L16_16;
+    CodecInst L16_32;
+
+    base->CreateChannel();
+
+    Test(">> Verify emulated stereo encoding for differenct codecs:\n");
+
+    // enable external encryption
+    CHECK(encrypt->RegisterExternalEncryption(0, *this));
+    Test("(ch 0) External Encryption is now enabled:");
+
+    // register all codecs on the receiving side
+    strcpy(PCMU.plname, "PCMU");
+    PCMU.channels = 2;
+    PCMU.pacsize = 160;
+    PCMU.plfreq = 8000;
+    PCMU.pltype = 125;
+    PCMU.rate = 64000;
+    CHECK(codec->SetRecPayloadType(0, PCMU));
+
+    strcpy(G729.plname, "G729");
+    G729.channels = 2;
+    G729.pacsize = 160;
+    G729.plfreq = 8000;
+    G729.pltype = 18;
+    G729.rate = 8000;
+    CHECK(codec->SetRecPayloadType(0, G729));
+
+    strcpy(L16_8.plname, "L16");
+    L16_8.channels = 2;
+    L16_8.pacsize = 160;
+    L16_8.plfreq = 8000;
+    L16_8.pltype = 120;
+    L16_8.rate = 128000;
+    CHECK(codec->SetRecPayloadType(0, L16_8));
+
+    strcpy(L16_16.plname, "L16");
+    L16_16.channels = 2;
+    L16_16.pacsize = 320;
+    L16_16.plfreq = 16000;
+    L16_16.pltype = 121;
+    L16_16.rate = 256000;
+    CHECK(codec->SetRecPayloadType(0, L16_16));
+
+    // NOTE - we cannot send larger than 1500 bytes per RTP packet
+    strcpy(L16_32.plname, "L16");
+    L16_32.channels = 2;
+    L16_32.pacsize = 320;
+    L16_32.plfreq = 32000;
+    L16_32.pltype = 122;
+    L16_32.rate = 512000;
+    CHECK(codec->SetRecPayloadType(0, L16_32));
+
+    // sample-based, 8-bits per sample
+
+    Test("(ch 0) Sending using G.711 (sample based, 8 bits/sample)...");
+    PCMU.channels = 1;
+    CHECK(codec->SetSendCodec(0, PCMU));
+    SetStereoExternalEncryption(0, true, 8);
+    CHECK(StartMedia(0, 12345, true, true, true, true, false));
+    Sleep(testTime);
+
+    // sample-based, 16-bits per sample
+
+    Test("(ch 0) Sending using L16 8kHz (sample based, 16 bits/sample)...");
+    L16_8.channels = 1;
+    CHECK(codec->SetSendCodec(0, L16_8));
+    SetStereoExternalEncryption(0, true, 16);
+    Sleep(testTime);
+
+    Test("(ch 0) Sending using L16 16kHz (sample based, 16 bits/sample)...");
+    L16_16.channels = 1;
+    CHECK(codec->SetSendCodec(0, L16_16));
+    Sleep(testTime);
+
+    Test("(ch 0) Sending using L16 32kHz (sample based, 16 bits/sample)...");
+    L16_32.channels = 1;
+    CHECK(codec->SetSendCodec(0, L16_32));
+    Sleep(testTime);
+
+    Test("(ch 0) Sending using G.729 (frame based)...");
+    G729.channels = 1;
+    CHECK(codec->SetSendCodec(0, G729));
+    Sleep(testTime);
+
+    StopMedia(0);
+
+    // disable external encryption
+    CHECK(encrypt->DeRegisterExternalEncryption(0));
+
+    base->DeleteChannel(0);
+
+    // ------------------------------------------------------------------------
+
+    CHECK(base->Terminate());
+
+    printf("\n\n------------------------------------------------\n");
+    printf(" Test passed!\n");
+    printf("------------------------------------------------\n\n");
+
+    return 0;
+}
+
+}  // namespace voetest
diff --git a/voice_engine/main/test/auto_test/voe_unit_test.h b/voice_engine/main/test/auto_test/voe_unit_test.h
new file mode 100644
index 0000000..979a3f6
--- /dev/null
+++ b/voice_engine/main/test/auto_test/voe_unit_test.h
@@ -0,0 +1,67 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_UNIT_TEST_H
+#define WEBRTC_VOICE_ENGINE_VOE_UNIT_TEST_H
+
+#include "voe_standard_test.h"
+
+namespace voetest {
+
+class VoETestManager;
+
+class VoEUnitTest : public Encryption
+{
+public:
+    VoEUnitTest(VoETestManager& mgr);
+    ~VoEUnitTest() {};
+    int DoTest();
+
+protected:
+    // Encryption
+    void encrypt(int channel_no, unsigned char * in_data,
+                 unsigned char * out_data, int bytes_in, int * bytes_out);
+    void decrypt(int channel_no, unsigned char * in_data,
+                 unsigned char * out_data, int bytes_in, int * bytes_out);
+    void encrypt_rtcp(int channel_no, unsigned char * in_data,
+                      unsigned char * out_data, int bytes_in, int * bytes_out);
+    void decrypt_rtcp(int channel_no, unsigned char * in_data,
+                      unsigned char * out_data, int bytes_in, int * bytes_out);
+
+private:
+    int MenuSelection();
+    int MixerTest();
+    void Sleep(unsigned int timeMillisec, bool addMarker = false);
+    void Wait();
+    int StartMedia(int channel, int rtpPort, bool listen, bool playout,
+                   bool send, bool fileAsMic, bool localFile);
+    int StopMedia(int channel);
+    void Test(const char* msg);
+    void SetStereoExternalEncryption(int channel,
+                                     bool onOff,
+                                     int bitsPerSample);
+
+private:
+    VoETestManager& _mgr;
+    static const char* _key;
+
+private:
+    bool _listening[32];
+    bool _playing[32];
+    bool _sending[32];
+
+private:
+    bool _extOnOff;
+    int _extBitsPerSample;
+    int _extChannel;
+};
+
+}  //  namespace voetest
+#endif // WEBRTC_VOICE_ENGINE_VOE_UNIT_TEST_H
diff --git a/voice_engine/main/test/auto_test/zip.exe b/voice_engine/main/test/auto_test/zip.exe
new file mode 100755
index 0000000..286227a
--- /dev/null
+++ b/voice_engine/main/test/auto_test/zip.exe
Binary files differ
diff --git a/voice_engine/main/test/ui_linux_test/ui_linux_test.cc b/voice_engine/main/test/ui_linux_test/ui_linux_test.cc
new file mode 100644
index 0000000..4a1b79f
--- /dev/null
+++ b/voice_engine/main/test/ui_linux_test/ui_linux_test.cc
@@ -0,0 +1,749 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "voe_errors.h"
+#include "voe_base.h"
+#include "voe_codec.h"
+#include "voe_volume_control.h"
+#include "voe_dtmf.h"
+#include "voe_rtp_rtcp.h"
+#include "voe_audio_processing.h"
+#include "voe_file.h"
+#include "voe_video_sync.h"
+#include "voe_encryption.h"
+#include "voe_hardware.h"
+#include "voe_external_media.h"
+#include "voe_network.h"
+#include "voe_neteq_stats.h"
+#include "engine_configurations.h"
+#ifdef DYLIB
+#include "dlfcn.h"
+#endif
+
+//#define DEBUG
+
+//#define EXTERNAL_TRANSPORT
+
+using namespace webrtc;
+
+#define VALIDATE                                                        \
+    if (res != 0)                                                       \
+    {                                                                   \
+        printf("*** Error at position %i / line %i \n", cnt, __LINE__); \
+        printf("*** Error code = %i \n", base1->LastError());    \
+        error = 1;                                                      \
+    }                                                                   \
+    cnt++;
+
+VoiceEngine* m_voe = NULL;
+VoEBase* base1 = NULL;
+VoECodec* codec = NULL;
+VoEVolumeControl* volume = NULL;
+VoEDtmf* dtmf = NULL;
+VoERTP_RTCP* rtp_rtcp = NULL;
+VoEAudioProcessing* apm = NULL;
+VoENetwork* netw = NULL;
+VoEFile* file = NULL;
+VoEVideoSync* vsync = NULL;
+VoEEncryption* encr = NULL;
+VoEHardware* hardware = NULL;
+VoEExternalMedia* xmedia = NULL;
+VoENetEqStats* neteqst = NULL;
+
+void loopBack(int);
+
+#ifdef EXTERNAL_TRANSPORT
+
+class my_transportation : public Transport
+{
+    int SendPacket(int channel,const void *data,int len);
+    int SendRTCPPacket(int channel, const void *data, int len);
+};
+
+int my_transportation::SendPacket(int channel,const void *data,int len)
+{
+    netw->ReceivedRTPPacket(channel, data, len);
+    return 0;
+}
+
+int my_transportation::SendRTCPPacket(int channel, const void *data, int len)
+{
+    netw->ReceivedRTCPPacket(channel, data, len);
+    return 0;
+}
+
+my_transportation my_transport;
+
+#endif
+
+int main(int /*argc*/, char* /*argv[]*/*)
+{
+    int res = 0;
+    int cnt = 0;
+    int error = 0;
+
+    printf("Test started \n");
+
+    m_voe = VoiceEngine::Create();
+    base1 = VoEBase::GetInterface(m_voe);
+    codec = VoECodec::GetInterface(m_voe);
+    apm = VoEAudioProcessing::GetInterface(m_voe);
+    volume = VoEVolumeControl::GetInterface(m_voe);
+    dtmf = VoEDtmf::GetInterface(m_voe);
+    rtp_rtcp = VoERTP_RTCP::GetInterface(m_voe);
+    netw = VoENetwork::GetInterface(m_voe);
+    file = VoEFile::GetInterface(m_voe);
+    vsync = VoEVideoSync::GetInterface(m_voe);
+    encr = VoEEncryption::GetInterface(m_voe);
+    hardware = VoEHardware::GetInterface(m_voe);
+    xmedia = VoEExternalMedia::GetInterface(m_voe);
+    neteqst = VoENetEqStats::GetInterface(m_voe);
+
+    int year = 0, month = 0, day = 0; // Set correct expiry values here when testing
+    printf("Set trace filenames (enable trace)\n");
+    //VoiceEngine::SetTraceFilter(kTraceAll);
+    // VoiceEngine::SetTraceFilter(kTraceAll);
+    // VoiceEngine::SetTraceFilter(kTraceStream |
+    //  kTraceStateInfo | kTraceWarning | kTraceError |
+    //  kTraceCritical | kTraceApiCall | kTraceModuleCall |
+    // kTraceMemory | kTraceDebug | kTraceInfo);
+    //VoiceEngine::SetTraceFilter(kTraceStateInfo | kTraceWarning |
+    // kTraceError | kTraceCritical | kTraceApiCall |
+    // kTraceModuleCall | kTraceMemory | kTraceInfo);
+
+    res = VoiceEngine::SetTraceFile("4_X_trace.txt");
+    VALIDATE;
+    //res = VoiceEngine::SetTraceCallback(NULL);
+    //VALIDATE;
+
+    int select = 1;
+#ifdef WEBRTC_LINUX
+    printf("Enter 0 for LINUX_AUDIO_ALSA\nEnter 1 for LINUX_AUDIO_PULSE\n");
+    int dummy(0);
+    dummy = scanf("%d", &select);
+#endif
+
+    printf("Init\n");
+    res = base1->Init();
+    if (res != 0)
+    {
+        printf("\nError calling Init: %d\n", base1->LastError());
+        fflush(NULL);
+        exit(0);
+    }
+
+    cnt++;
+    printf("Version\n");
+    char tmp[1024];
+    res = base1->GetVersion(tmp);
+    VALIDATE;
+    cnt++;
+    printf("%s\n", tmp);
+    loopBack(select);
+
+    printf("Terminate \n");
+
+    res = base1->Terminate();
+    VALIDATE;
+
+    if (base1)
+        base1->Release();
+
+    if (codec)
+        codec->Release();
+
+    if (volume)
+        volume->Release();
+
+    if (dtmf)
+        dtmf->Release();
+
+    if (rtp_rtcp)
+        rtp_rtcp->Release();
+
+    if (apm)
+        apm->Release();
+
+    if (netw)
+        netw->Release();
+
+    if (file)
+        file->Release();
+
+    if (vsync)
+        vsync->Release();
+
+    if (encr)
+        encr->Release();
+
+    if (hardware)
+        hardware->Release();
+
+    if (xmedia)
+        xmedia->Release();
+
+    if (neteqst)
+        neteqst->Release();
+
+    VoiceEngine::Delete(m_voe);
+
+    return 0;
+}
+
+void loopBack(int)
+{
+    int chan, cnt, error, res;
+    CodecInst cinst;
+    cnt = 0;
+    int i;
+    int dummy(0);
+    int codecinput;
+    bool AEC = false;
+    bool AGC = true;
+    bool AGC1 = false;
+    bool VAD = false;
+    bool NS = false;
+    bool NS1 = false;
+
+    chan = base1->CreateChannel();
+    if (chan < 0)
+    {
+        printf("Error at position %i\n", cnt);
+        printf("************ Error code = %i\n", base1->LastError());
+        fflush(NULL);
+        error = 1;
+    }
+    cnt++;
+
+#ifdef EXTERNAL_TRANSPORT
+    my_transportation ch0transport;
+    printf("Enabling external transport \n");
+    netw->SetExternalTransport(0, true, &ch0transport);
+#else
+    char ip[64];
+#ifdef DEBUG
+    strcpy(ip, "127.0.0.1");
+#else
+    char localip[64];
+    netw->GetLocalIP(localip, 64);
+    printf("local IP:%s\n", localip);
+
+    printf("1. 127.0.0.1 \n");
+    printf("2. Specify IP \n");
+    dummy = scanf("%i", &i);
+    if (1 == i)
+        strcpy(ip, "127.0.0.1");
+    else
+    {
+        printf("Specify remote IP: ");
+        dummy = scanf("%s", ip);
+    }
+#endif
+
+    int colons(0), j(0);
+    while (ip[j] != '\0' && j < 64 && !(colons = (ip[j++] == ':')))
+        ;
+    if (colons)
+    {
+        printf("Enabling IPv6\n");
+        res = netw->EnableIPv6(0);
+        VALIDATE;
+    }
+
+    int rPort;
+#ifdef DEBUG
+    rPort=8500;
+#else
+    printf("Specify remote port (1=1234): ");
+    dummy = scanf("%i", &rPort);
+    if (1 == rPort)
+        rPort = 1234;
+    printf("Set Send port \n");
+#endif
+
+    printf("Set Send IP \n");
+    res = base1->SetSendDestination(chan, rPort, ip);
+    VALIDATE
+
+    int lPort;
+#ifdef DEBUG
+    lPort=8500;
+#else
+    printf("Specify local port (1=1234): ");
+    dummy = scanf("%i", &lPort);
+    if (1 == lPort)
+        lPort = 1234;
+    printf("Set Rec Port \n");
+#endif
+    res = base1->SetLocalReceiver(chan, lPort);
+    VALIDATE
+#endif
+
+    printf("\n");
+    for (i = 0; i < codec->NumOfCodecs(); i++)
+    {
+        res = codec->GetCodec(i, cinst);
+        VALIDATE
+        if (strncmp(cinst.plname, "ISAC", 4) == 0 && cinst.plfreq == 32000)
+        {
+            printf("%i. ISAC-swb pltype:%i plfreqi:%i\n", i, cinst.pltype,
+                   cinst.plfreq);
+        } else
+        {
+            printf("%i. %s pltype:%i plfreq:%i\n", i, cinst.plname,
+                   cinst.pltype, cinst.plfreq);
+        }
+    }
+#ifdef DEBUG
+    codecinput=0;
+#else
+    printf("Select send codec: ");
+    dummy = scanf("%i", &codecinput);
+#endif
+    codec->GetCodec(codecinput, cinst);
+
+    printf("Set primary codec\n");
+    res = codec->SetSendCodec(chan, cinst);
+    VALIDATE
+
+    // Call loop
+    bool newcall = true;
+    while (newcall)
+    {
+
+#ifdef WEBRTC_LINUX
+        // ALSA
+
+        // NOTE: hw:0,0, is NOT the default device, it's a raw kernel device mapping.
+        //       "default" is the default device, which is used if no call is made
+        //        to SetRecordingDevice / SetPlayoutDevice.
+        //        This is what we shall use for normal testing.
+        int rd(-1), pd(-1);
+        res = hardware->GetNumOfRecordingDevices(rd);
+        VALIDATE;
+        res = hardware->GetNumOfPlayoutDevices(pd);
+        VALIDATE;
+
+        char dn[64] =
+        {   0};
+        char guid[128] =
+        {   0};
+        printf("\nPlayout devices (%d): \n", pd);
+        for (j=0; j<pd; ++j)
+        {
+            res = hardware->GetPlayoutDeviceName(j, dn, guid);
+            VALIDATE;
+            printf("  %d: %s \n", j, dn);
+        }
+
+        printf("Recording devices (%d): \n", rd);
+        for (j=0; j<rd; ++j)
+        {
+            res = hardware->GetRecordingDeviceName(j, dn, guid);
+            VALIDATE;
+            printf("  %d: %s \n", j, dn);
+        }
+
+        printf("Select playout device: ");
+        dummy = scanf("%d", &pd);
+        res = hardware->SetPlayoutDevice(pd); // Will use plughw for hardware devices
+        VALIDATE;
+        printf("Select recording device: ");
+        dummy = scanf("%d", &rd);
+        printf("Setting sound devices \n");
+        res = hardware->SetRecordingDevice(rd); // Will use plughw for hardware devices
+        VALIDATE;
+
+#endif // LINUX
+        res = codec->SetVADStatus(0, VAD);
+        VALIDATE
+
+        res = apm->SetAgcStatus(AGC);
+        VALIDATE
+
+        res = apm->SetEcStatus(AEC);
+        VALIDATE
+
+        res = apm->SetNsStatus(NS);
+        VALIDATE
+
+#ifdef DEBUG
+        i = 1;
+#else
+        printf("\n1. Send, listen and playout \n");
+        printf("2. Send only \n");
+        printf("3. Listen and playout only \n");
+        printf("Select transfer mode: ");
+        dummy = scanf("%i", &i);
+#endif
+        const bool send = !(3 == i);
+        const bool receive = !(2 == i);
+
+        if (receive)
+        {
+#ifndef EXTERNAL_TRANSPORT
+            printf("Start Listen \n");
+            res = base1->StartReceive(chan);
+            VALIDATE;
+#endif
+
+            printf("Start Playout \n");
+            res = base1->StartPlayout(chan);
+            VALIDATE;
+        }
+
+        if (send)
+        {
+            // GQoS is not supported on Linux and the only
+            // way to set the TOS value is using this function
+            // the useSetSockop param is ignored as the only
+            // way to set the value is with setsockopt  
+            //res=netw->SetSendTOS(chan, 12, false);
+            //VALIDATE;
+
+            printf("Start Send \n");
+            res = base1->StartSend(chan);
+            VALIDATE;
+        }
+
+        //        res=file->StartRecordingMicrophone("MicRecording.pcm");
+
+
+        printf("Getting mic volume \n");
+        unsigned int vol = 999;
+        res = volume->GetMicVolume(vol);
+        VALIDATE
+        if ((vol > 255) || (vol < 1))
+        {
+            printf("\n****ERROR in GetMicVolume");
+
+        }
+        int forever = 1;
+        while (forever)
+        {
+            printf("\nActions\n");
+
+            printf("Codec Changes\n");
+            for (i = 0; i < codec->NumOfCodecs(); i++)
+            {
+                res = codec->GetCodec(i, cinst);
+                VALIDATE
+                if (strncmp(cinst.plname, "ISAC", 4) == 0 && cinst.plfreq
+                    == 32000)
+                {
+                    printf("\t%i. ISAC-swb pltype:%i plfreq:%i\n", i,
+                           cinst.pltype, cinst.plfreq);
+                } else
+                {
+                    printf("\t%i. %s pltype:%i plfreq:%i\n", i, cinst.plname,
+                           cinst.pltype, cinst.plfreq);
+                }
+            }
+            printf("Other\n");
+            const int noCodecs = i - 1;
+            printf("\t%i. Toggle VAD\n", i);
+            i++;
+            printf("\t%i. Toggle AGC\n", i);
+            i++;
+            printf("\t%i. Toggle NS\n", i);
+            i++;
+            printf("\t%i. Toggle echo control\n", i);
+            i++;
+            printf("\t%i. Select AEC\n", i);
+            i++;
+            printf("\t%i. Select AECM\n", i);
+            i++;
+            printf("\t%i. Get speaker volume\n", i);
+            i++;
+            printf("\t%i. Set speaker volume\n", i);
+            i++;
+            printf("\t%i. Get microphone volume\n", i);
+            i++;
+            printf("\t%i. Set microphone volume\n", i);
+            i++;
+            printf("\t%i. Stereo \n", i);
+            i++;
+            printf("\t%i. Play local file \n", i);
+            i++;
+            printf("\t%i. Change Playout Device \n", i);
+            i++;
+            printf("\t%i. Change Recording Device \n", i);
+            i++;
+            printf("\t%i. Toggle Remote AGC \n", i);
+            i++;
+            printf("\t%i. Toggle Remote NS \n", i);
+            i++;
+            printf("\t%i. agc status \n", i);
+            i++;
+            printf("\t%i. Stop call \n", i);
+
+            printf("Select action or %i to stop the call: ", i);
+            dummy = scanf("%i", &codecinput);
+
+            if (codecinput < codec->NumOfCodecs())
+            {
+                res = codec->GetCodec(codecinput, cinst);
+                VALIDATE
+
+                // Test "Super Wideband" SWB
+                //if (!strcmp(cinst.plname, "G7221"))
+                //{
+                //    cinst.plfreq = 32000;
+                //    cinst.pacsize = 640;
+                //    cinst.rate = 24000; 
+                //    cinst.rate = 32000; 
+                //    cinst.rate = 48000; 
+                //    base1->StopPlayout(0);
+                //    base1->StopReceive(0);
+                //    codec->SetRecPayloadType(0, cinst);
+                //    base1->StartReceive(0);
+                //    base1->StartPlayout(0);
+                //}
+
+                printf("Set primary codec\n");
+                res = codec->SetSendCodec(chan, cinst);
+                VALIDATE
+            } else if (codecinput == (noCodecs + 1))
+            {
+                VAD = !VAD;
+                res = codec->SetVADStatus(0, VAD);
+                VALIDATE
+                if (VAD)
+                    printf("\n VAD is now on! \n");
+                else
+                    printf("\n VAD is now off! \n");
+            } else if (codecinput == (noCodecs + 2))
+            {
+                AGC = !AGC;
+
+                res = apm->SetAgcStatus(AGC);
+                VALIDATE
+                if (AGC)
+                    printf("\n AGC is now on! \n");
+                else
+                    printf("\n AGC is now off! \n");
+            } else if (codecinput == (noCodecs + 3))
+            {
+                NS = !NS;
+                res = apm->SetNsStatus(NS);
+                VALIDATE
+                if (NS)
+                    printf("\n NS is now on! \n");
+                else
+                    printf("\n NS is now off! \n");
+            } else if (codecinput == (noCodecs + 4))
+            {
+                AEC = !AEC;
+                res = apm->SetEcStatus(AEC, kEcUnchanged);
+                VALIDATE
+                if (AEC)
+                    printf("\n Echo control is now on! \n");
+                else
+                    printf("\n Echo control is now off! \n");
+            } else if (codecinput == (noCodecs + 5))
+            {
+                res = apm->SetEcStatus(AEC, kEcAec);
+                VALIDATE
+                printf("\n AEC selected! \n");
+                if (AEC)
+                    printf(" (Echo control is on)\n");
+                else
+                    printf(" (Echo control is off)\n");
+            } else if (codecinput == (noCodecs + 6))
+            {
+                res = apm->SetEcStatus(AEC, kEcAecm);
+                VALIDATE
+                printf("\n AECM selected! \n");
+                if (AEC)
+                    printf(" (Echo control is on)\n");
+                else
+                    printf(" (Echo control is off)\n");
+            } else if (codecinput == (noCodecs + 7))
+            {
+                unsigned vol(0);
+                res = volume->GetSpeakerVolume(vol);
+                VALIDATE;
+                printf("\n Speaker Volume is %d \n", vol);
+            } else if (codecinput == (noCodecs + 8))
+            {
+                printf("Level: ");
+                dummy = scanf("%i", &i);
+                res = volume->SetSpeakerVolume(i);
+                VALIDATE;
+            } else if (codecinput == (noCodecs + 9))
+            {
+                unsigned vol(0);
+                res = volume->GetMicVolume(vol);
+                VALIDATE;
+                printf("\n Microphone Volume is %d \n", vol);
+            } else if (codecinput == (noCodecs + 10))
+            {
+                printf("Level: ");
+                dummy = scanf("%i", &i);
+                res = volume->SetMicVolume(i);
+                VALIDATE;
+            } else if (codecinput == (noCodecs + 11))
+            {
+                //use different IP address for remote to test this. non-loopback
+                //use rtp to send to port 1234
+                //Here we are using channel 0 to test stereo
+                //Refere to windows code stub for more info
+                CodecInst c;
+                base1->DeleteChannel(chan);
+                res = base1->CreateChannel();
+                VALIDATE
+                base1->StopPlayout(chan);
+                base1->StopReceive(chan);
+                //G729 
+                //c.channels = 2; c.pacsize = 160; c.plfreq = 8000;
+                //strcpy(c.plname, "G729"); c.pltype = 18; c.rate = 8000;
+                //G722_1_C - 48k rate
+                //c.channels = 2; c.pacsize = 640; c.plfreq = 32000;
+                //strcpy(c.plname, "G7221"); c.pltype = 125; c.rate = 48000;
+                //L16 at 16Khz
+                //c.channels = 2; c.pacsize = 160; c.plfreq = 16000;
+                //strcpy(c.plname, "L16"); c.pltype = 125; c.rate = 256000;
+                //PCMU
+                c.channels = 2;
+                c.pacsize = 160;
+                c.plfreq = 8000;
+                strcpy(c.plname, "PCMU");
+                c.pltype = 125;
+                c.rate = 64000;
+                res = codec->SetRecPayloadType(chan, c);
+                VALIDATE
+                res = base1->SetLocalReceiver(chan, lPort);
+                VALIDATE
+                base1->StartReceive(chan);
+                res = base1->SetSendDestination(chan, lPort, ip);
+                VALIDATE
+                res = base1->StartSend(chan);
+                VALIDATE;
+                base1->StartPlayout(chan);
+            } else if (codecinput == (noCodecs + 12))
+            {
+                file->StartPlayingFileLocally(0, "singleUserDemo.pcm");
+            } else if (codecinput == (noCodecs + 13))
+            {
+                // change the playout device with current call
+                int num_pd(-1);
+                res = hardware->GetNumOfPlayoutDevices(num_pd);
+                VALIDATE;
+
+                char dn[64] = { 0 };
+                char guid[128] = { 0 };
+
+                printf("\nPlayout devices (%d): \n", num_pd);
+                for (j = 0; j < num_pd; ++j)
+                {
+                    res = hardware->GetPlayoutDeviceName(j, dn, guid);
+                    VALIDATE;
+                    printf("  %d: %s \n", j, dn);
+                }
+                printf("Select playout device: ");
+                dummy = scanf("%d", &num_pd);
+                // Will use plughw for hardware devices
+                res = hardware->SetPlayoutDevice(num_pd);
+                VALIDATE;
+
+            } else if (codecinput == (noCodecs + 14))
+            {
+                // change the recording device with current call
+                int num_rd(-1);
+
+                res = hardware->GetNumOfRecordingDevices(num_rd);
+                VALIDATE;
+
+                char dn[64] = { 0 };
+                char guid[128] = { 0 };
+
+                printf("Recording devices (%d): \n", num_rd);
+                for (j = 0; j < num_rd; ++j)
+                {
+                    res = hardware->GetRecordingDeviceName(j, dn, guid);
+                    VALIDATE;
+                    printf("  %d: %s \n", j, dn);
+                }
+
+                printf("Select recording device: ");
+                dummy = scanf("%d", &num_rd);
+                printf("Setting sound devices \n");
+                // Will use plughw for hardware devices
+                res = hardware->SetRecordingDevice(num_rd);
+                VALIDATE;
+            } else if (codecinput == (noCodecs + 15))
+            {
+                // Remote AGC
+                AGC1 = !AGC1;
+                res = apm->SetRxAgcStatus(chan, AGC1);
+                VALIDATE
+                if (AGC1)
+                    printf("\n Remote AGC is now on! \n");
+                else
+                    printf("\n Remote AGC is now off! \n");
+            } else if (codecinput == (noCodecs + 16))
+            {
+                // Remote NS
+                NS1 = !NS1;
+                res = apm->SetRxNsStatus(chan, NS);
+                VALIDATE
+                if (NS1)
+                    printf("\n Remote NS is now on! \n");
+                else
+                    printf("\n Remote NS is now off! \n");
+            } else if (codecinput == (noCodecs + 17))
+            {
+                AgcModes agcmode;
+                bool enable;
+                res = apm->GetAgcStatus(enable, agcmode);
+                VALIDATE
+                printf("\n AGC enale is %d , mode is %d \n", enable, agcmode);
+            } else
+                break;
+        }
+
+        if (send)
+        {
+            printf("Stop Send \n");
+            res = base1->StopSend(chan);
+            VALIDATE;
+        }
+
+        if (receive)
+        {
+            printf("Stop Playout \n");
+            res = base1->StopPlayout(chan);
+            VALIDATE;
+
+#ifndef EXTERNAL_TRANSPORT
+            printf("Stop Listen \n");
+            res = base1->StopReceive(chan);
+            VALIDATE;
+#endif
+        }
+
+        printf("\n1. New call \n");
+        printf("2. Quit \n");
+        printf("Select action: ");
+        dummy = scanf("%i", &i);
+        newcall = (1 == i);
+
+        // Call loop
+    }
+
+    printf("Delete Channel \n");
+    res = base1->DeleteChannel(chan);
+    VALIDATE
+}
diff --git a/voice_engine/main/test/win_test/Resource.h b/voice_engine/main/test/win_test/Resource.h
new file mode 100644
index 0000000..5ae9c5f
--- /dev/null
+++ b/voice_engine/main/test/win_test/Resource.h
@@ -0,0 +1,241 @@
+//{{NO_DEPENDENCIES}}
+// Microsoft Visual C++ generated include file.
+// Used by WinTest.rc
+//
+#define IDM_ABOUTBOX                    0x0010
+#define IDD_ABOUTBOX                    100
+#define IDS_ABOUTBOX                    101
+#define IDD_WINTEST_DIALOG              102
+#define IDR_MAINFRAME                   128
+#define IDD_DTMF_DIALOG                 129
+#define IDC_BUTTON_CREATE_1             1000
+#define IDC_BUTTON_DELETE_1             1001
+#define IDC_EDIT_1                      1002
+#define IDC_BUTTON_CREATE_2             1003
+#define IDC_BUTTON_DELETE_2             1004
+#define IDC_EDIT_2                      1005
+#define IDC_EDIT_MESSAGE                1006
+#define IDC_BUTTON_START_LISTEN_1       1007
+#define IDC_COMBO_IP_1                  1008
+#define IDC_EDIT_TX_PORT_1              1009
+#define IDC_EDIT_RX_PORT_1              1010
+#define IDC_COMBO_CODEC_1               1011
+#define IDC_BUTTON_STOP_LISTEN_1        1012
+#define IDC_STATIC_LISTEN               1013
+#define IDC_BUTTON_START_PLAYOUT_1      1014
+#define IDC_BUTTON_STOP_PLAYOUT_1       1015
+#define IDC_STATIC_PLAY                 1016
+#define IDC_BUTTON_START_SEND_1         1017
+#define IDC_BUTTON_STOP_SEND_1          1018
+#define IDC_STATIC_SEND                 1019
+#define IDC_COMBO_IP_2                  1020
+#define IDC_STATIC_IP                   1021
+#define IDC_STATIC_PORTS                1022
+#define IDC_STATIC_CODEC                1023
+#define IDC_STATIC_CHANNEL              1024
+#define IDC_STATIC_ID                   1025
+#define IDC_EDIT_TX_PORT_2              1026
+#define IDC_EDIT_RX_PORT_2              1027
+#define IDC_COMBO_CODEC_2               1028
+#define IDC_BUTTON_START_LISTEN_2       1029
+#define IDC_BUTTON_STOP_LISTEN_2        1030
+#define IDC_BUTTON_START_PLAYOUT_2      1031
+#define IDC_BUTTON_STOP_PLAYOUT_2       1032
+#define IDC_BUTTON_START_SEND_2         1033
+#define IDC_BUTTON_STOP_SEND_2          1034
+#define IDC_BUTTON_START_SEND_3         1035
+#define IDC_BUTTON_TEST_1_1             1035
+#define IDC_BUTTON_TEST_1               1035
+#define IDC_EDIT_RESULT                 1036
+#define IDC_EDIT_N_FAILS                1037
+#define IDC_STATIC_ERROR                1038
+#define IDC_EDIT_LAST_ERROR             1039
+#define IDC_STATIC_LAST_ERROR           1040
+#define IDC_STATIC_PLAY_FILE            1041
+#define IDC_STATIC_EXTERNAL             1042
+#define IDC_CHECK_EXT_TRANS_1           1043
+#define IDC_CHECK2                      1044
+#define IDC_CHECK_PLAY_FILE_IN_1        1044
+#define IDC_CHECK_PLAY_FILE_OUT_1       1045
+#define IDC_CHECK_PLAY_FILE_IN_2        1046
+#define IDC_CHECK_PLAY_FILE_OUT_2       1047
+#define IDC_CHECK_EXT_TRANS_2           1048
+#define IDC_STATIC_ALL_CHANNELS         1049
+#define IDC_CHECK_PLAY_FILE_IN          1050
+#define IDC_CHECK_PLAY_FILE_OUT         1051
+#define IDC_CHECK_EXT_MEDIA_IN_1        1051
+#define IDC_COMBO_REC_DEVICE            1052
+#define IDC_STATIC_REC_DEVICE           1053
+#define IDC_COMBO_PLAY_DEVICE2          1054
+#define IDC_COMBO_PLAY_DEVICE           1054
+#define IDC_STATIC_PLAY_DEVICE          1055
+#define IDC_CHECK_EXT_MEDIA_PLAY_1      1056
+#define IDC_CHECK_EXT_MEDIA_OUT_1       1056
+#define IDC_STATIC_PLAY_FILE2           1057
+#define IDC_SLIDER_INPUT_VOLUME         1058
+#define IDC_STATIC_MIC_VOLUME           1059
+#define IDC_SLIDER_OUTPUT_VOLUME        1060
+#define IDC_STATIC_SPK_VOLUME2          1061
+#define IDC_STATIC_SPK_VOLUME           1061
+#define IDC_CHECK_PLAY_FILE_IN2         1062
+#define IDC_CHECK_AGC                   1062
+#define IDC_STATIC_MIC_VOLUME2          1063
+#define IDC_STATIC_AUDIO_LEVEL_IN       1063
+#define IDC_PROGRESS_AUDIO_LEVEL_IN     1064
+#define IDC_CHECK_AGC2                  1065
+#define IDC_CHECK_NS                    1065
+#define IDC_BUTTON_1                    1065
+#define IDC_CHECK_VAD                   1066
+#define IDC_CHECK_EXT_MEDIA_IN_2        1066
+#define IDC_BUTTON_2                    1066
+#define IDC_CHECK_VAD2                  1067
+#define IDC_CHECK_EC                    1067
+#define IDC_BUTTON_3                    1067
+#define IDC_CHECK_VAD_1                 1068
+#define IDC_BUTTON_4                    1068
+#define IDC_CHECK_VAD_2                 1069
+#define IDC_CHECK_EXT_MEDIA_OUT_2       1069
+#define IDC_BUTTON_5                    1069
+#define IDC_CHECK_VAD_3                 1070
+#define IDC_BUTTON_6                    1070
+#define IDC_CHECK_MUTE_IN               1071
+#define IDC_BUTTON_7                    1071
+#define IDC_CHECK_MUTE_IN_1             1072
+#define IDC_BUTTON_8                    1072
+#define IDC_CHECK_MUTE_IN_2             1073
+#define IDC_BUTTON_9                    1073
+#define IDC_CHECK_SRTP_TX_1             1074
+#define IDC_BUTTON_10                   1074
+#define IDC_CHECK_SRTP_RX_1             1075
+#define IDC_BUTTON_11                   1075
+#define IDC_STATIC_PLAY_FILE3           1076
+#define IDC_STATIC_SRTP                 1076
+#define IDC_BUTTON_12                   1076
+#define IDC_CHECK_SRTP_TX_2             1077
+#define IDC_BUTTON_13                   1077
+#define IDC_CHECK_SRTP_RX_2             1078
+#define IDC_BUTTON_14                   1078
+#define IDC_CHECK_EXT_ENCRYPTION_1      1079
+#define IDC_BUTTON_15                   1079
+#define IDC_STATIC_PLAY_FILE4           1080
+#define IDC_BUTTON_16                   1080
+#define IDC_CHECK_EXT_ENCRYPTION_2      1081
+#define IDC_BUTTON_17                   1081
+#define IDC_BUTTON_DTMF_1               1082
+#define IDC_BUTTON_18                   1082
+#define IDC_EDIT_DTMF_EVENT             1083
+#define IDC_CHECK_REC_                  1083
+#define IDC_CHECK_REC_MIC               1083
+#define IDC_STATIC_DTMF_EVENT           1084
+#define IDC_BUTTON_DTMF_2               1084
+#define IDC_STATIC_GROUP_DTMF           1085
+#define IDC_CHECK_CONFERENCE_1          1085
+#define IDC_BUTTON_19                   1086
+#define IDC_CHECK_CONFERENCE_2          1086
+#define IDC_BUTTON_20                   1087
+#define IDC_CHECK_ON_HOLD_1             1087
+#define IDC_BUTTON_21                   1088
+#define IDC_CHECK_ON_HOLD_2             1088
+#define IDC_BUTTON_22                   1089
+#define IDC_CHECK_DTMF_PLAYOUT_RX       1089
+#define IDC_CHECK_EXT_MEDIA_IN          1089
+#define IDC_STATIC_PLAYOUT_RX           1090
+#define IDC_EDIT_GET_OUTPUT             1090
+#define IDC_CHECK_DTMF_PLAY_TONE        1091
+#define IDC_STATIC_LAST_ERROR2          1091
+#define IDC_STATIC_GET                  1091
+#define IDC_STATIC_PLAY_TONE            1092
+#define IDC_CHECK_EXT_MEDIA_OUT         1092
+#define IDC_CHECK_START_STOP_MODE       1093
+#define IDC_BUTTON_SET_TX_TELEPHONE_PT  1093
+#define IDC_PROGRESS_AUDIO_LEVEL_IN2    1093
+#define IDC_PROGRESS_AUDIO_LEVEL_OUT    1093
+#define IDC_EDIT_EVENT_LENGTH           1094
+#define IDC_EDIT_RX_PORT_3              1094
+#define IDC_EDIT_DELAY_ESTIMATE_1       1094
+#define IDC_STATIC_EVENT_LENGTH         1095
+#define IDC_EDIT_PLAYOUT_BUFFER_SIZE    1095
+#define IDC_STATIC_START_STOP_MODE      1096
+#define IDC_EDIT_EVENT_RX_PT            1096
+#define IDC_CHECK_DELAY_ESTIMATE_1      1096
+#define IDC_EDIT_EVENT_ATTENUATION      1097
+#define IDC_CHECK_AGC_1                 1097
+#define IDC_CHECK_EVENT_INBAND          1098
+#define IDC_CHECK_NS_1                  1098
+#define IDC_STATIC_EVENT_ATTENUATION    1099
+#define IDC_STATIC_SRTP2                1099
+#define IDC_STATIC_RX_VQE               1099
+#define IDC_EDIT_EVENT_TX_PT            1100
+#define IDC_CHECK_REC_MIC2              1100
+#define IDC_CHECK_REC_CALL              1100
+#define IDC_CHECK_DTMF_FEEDBACK         1101
+#define IDC_CHECK_REC_CALL2             1101
+#define IDC_CHECK_TYPING_DETECTION      1101
+#define IDC_CHECK_START_STOP_MODE2      1102
+#define IDC_CHECK_DIRECT_FEEDBACK       1102
+#define IDC_CHECK_FEC                   1102
+#define IDC_BUTTON_SET_RX_TELEPHONE_PT_TYPE 1103
+#define IDC_BUTTON_SET_RX_TELEPHONE_PT  1103
+#define IDC_BUTTON_CLEAR_ERROR_CALLBACK 1103
+#define IDC_EDIT_EVENT_CODE             1104
+#define IDC_STATIC_DIRECT_FEEDBACK      1105
+#define IDC_RADIO_SINGLE                1106
+#define IDC_RADIO_MULTI                 1107
+#define IDC_RADIO_START_STOP            1108
+#define IDC_STATIC_MODE                 1109
+#define IDC_STATIC_EVENT_RX_PT          1110
+#define IDC_STATIC_EVENT_TX_PT          1111
+#define IDC_STATIC_PT                   1112
+#define IDC_BUTTON_SEND_TELEPHONE_EVENT 1113
+#define IDC_STATIC_EVENT_CODE           1114
+#define IDC_CHECK_EVENT_DETECTION       1115
+#define IDC_CHECK_DETECT_INBAND         1116
+#define IDC_CHECK_DETECT_OUT_OF_BAND    1117
+#define IDC_STATIC_INBAND_DETECTION     1118
+#define IDC_STATIC_OUT_OF_BAND_DETECTION 1119
+#define IDC_STATIC_EVENT_DETECTION      1120
+#define IDC_STATIC_TELEPHONE_EVENTS     1121
+#define IDC_EDIT_EVENT_CODE2            1122
+#define IDC_EDIT_ON_EVENT               1122
+#define IDC_EDIT_ON_EVENT_OUT_OF_BAND   1122
+#define IDC_STATIC_ON_EVENT             1123
+#define IDC_EDIT_ON_EVENT_INBAND        1123
+#define IDC_STATIC_EVEN                 1124
+#define IDC_STATIC_LINE                 1125
+#define IDC_LIST_CODEC_1                1128
+#define IDC_EDIT2                       1129
+#define IDC_EDIT_CODEC_1                1129
+#define IDC_STATIC_PANNING              1131
+#define IDC_SLIDER_PAN_LEFT             1132
+#define IDC_SLIDER_PAN_RIGHT            1133
+#define IDC_STATIC_LEFT                 1134
+#define IDC_STATIC_LEFT2                1135
+#define IDC_STATIC_RIGHT                1135
+#define IDC_BUTTON_VERSION              1136
+#define IDC_STATIC_PLAYOUT_BUFFER       1137
+#define IDC_CHECK_RXVAD                 1138
+#define IDC_EDIT1                       1139
+#define IDC_EDIT_RXVAD                  1139
+#define IDC_STATIC_RX_PORT              1140
+#define IDC_STATIC_RX_PORT2             1141
+#define IDC_EDIT3                       1142
+#define IDC_EDIT_AUDIO_LAYER            1142
+#define IDC_EDIT_AUDIO_LAYER2           1143
+#define IDC_EDIT_CPU_LOAD               1143
+#define IDC_STATIC_ERROR_CALLBACK       1144
+#define IDC_EDIT_ERROR_CALLBACK         1145
+#define IDC_EDIT_RX_CODEC_1             1146
+#define IDC_STATIC_BYTES_SENT_TEXT      1147
+#define IDC_EDIT_RTCP_STAT              1147
+#define IDC_EDIT_RTCP_STAT_1            1147
+
+// Next default values for new objects
+// 
+#ifdef APSTUDIO_INVOKED
+#ifndef APSTUDIO_READONLY_SYMBOLS
+#define _APS_NEXT_RESOURCE_VALUE        130
+#define _APS_NEXT_COMMAND_VALUE         32771
+#define _APS_NEXT_CONTROL_VALUE         1148
+#define _APS_NEXT_SYMED_VALUE           101
+#endif
+#endif
diff --git a/voice_engine/main/test/win_test/WinTest.aps b/voice_engine/main/test/win_test/WinTest.aps
new file mode 100644
index 0000000..499db5f
--- /dev/null
+++ b/voice_engine/main/test/win_test/WinTest.aps
Binary files differ
diff --git a/voice_engine/main/test/win_test/WinTest.cpp b/voice_engine/main/test/win_test/WinTest.cpp
new file mode 100644
index 0000000..e0e0248
--- /dev/null
+++ b/voice_engine/main/test/win_test/WinTest.cpp
@@ -0,0 +1,75 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "stdafx.h"
+#include "WinTest.h"
+#include "WinTestDlg.h"
+
+#ifdef _DEBUG
+#define new DEBUG_NEW
+#endif
+
+
+// CWinTestApp
+
+BEGIN_MESSAGE_MAP(CWinTestApp, CWinApp)
+	ON_COMMAND(ID_HELP, &CWinApp::OnHelp)
+END_MESSAGE_MAP()
+
+
+// CWinTestApp construction
+
+CWinTestApp::CWinTestApp()
+{
+}
+
+
+// The one and only CWinTestApp object
+
+CWinTestApp theApp;
+
+
+// CWinTestApp initialization
+
+BOOL CWinTestApp::InitInstance()
+{
+	// InitCommonControlsEx() is required on Windows XP if an application
+	// manifest specifies use of ComCtl32.dll version 6 or later to enable
+	// visual styles.  Otherwise, any window creation will fail.
+	INITCOMMONCONTROLSEX InitCtrls;
+	InitCtrls.dwSize = sizeof(InitCtrls);
+	// Set this to include all the common control classes you want to use
+	// in your application.
+	InitCtrls.dwICC = ICC_WIN95_CLASSES;
+	InitCommonControlsEx(&InitCtrls);
+
+	CWinApp::InitInstance();
+
+	// Standard initialization
+	// If you are not using these features and wish to reduce the size
+	// of your final executable, you should remove from the following
+	// the specific initialization routines you do not need
+	// Change the registry key under which our settings are stored
+	SetRegistryKey(_T("Local AppWizard-Generated Applications"));
+
+	CWinTestDlg dlg;
+	m_pMainWnd = &dlg;
+	INT_PTR nResponse = dlg.DoModal();
+	if (nResponse == IDOK)
+	{
+	}
+	else if (nResponse == IDCANCEL)
+	{
+	}
+
+	// Since the dialog has been closed, return FALSE so that we exit the
+	//  application, rather than start the application's message pump.
+	return FALSE;
+}
diff --git a/voice_engine/main/test/win_test/WinTest.h b/voice_engine/main/test/win_test/WinTest.h
new file mode 100644
index 0000000..d012ce6
--- /dev/null
+++ b/voice_engine/main/test/win_test/WinTest.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#pragma once
+
+#ifndef __AFXWIN_H__
+	#error "include 'stdafx.h' before including this file for PCH"
+#endif
+
+#include "resource.h"		// main symbols
+
+
+// CWinTestApp:
+// See WinTest.cpp for the implementation of this class
+//
+
+class CWinTestApp : public CWinApp
+{
+public:
+	CWinTestApp();
+
+// Overrides
+	public:
+	virtual BOOL InitInstance();
+
+// Implementation
+
+	DECLARE_MESSAGE_MAP()
+};
+
+extern CWinTestApp theApp;
diff --git a/voice_engine/main/test/win_test/WinTest.rc b/voice_engine/main/test/win_test/WinTest.rc
new file mode 100644
index 0000000..dfe503f
--- /dev/null
+++ b/voice_engine/main/test/win_test/WinTest.rc
@@ -0,0 +1,394 @@
+// Microsoft Visual C++ generated resource script.

+//

+#include "resource.h"

+

+#define APSTUDIO_READONLY_SYMBOLS

+/////////////////////////////////////////////////////////////////////////////

+//

+// Generated from the TEXTINCLUDE 2 resource.

+//

+#include "afxres.h"

+

+/////////////////////////////////////////////////////////////////////////////

+#undef APSTUDIO_READONLY_SYMBOLS

+

+/////////////////////////////////////////////////////////////////////////////

+// Swedish resources

+

+#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_SVE)

+#ifdef _WIN32

+LANGUAGE LANG_SWEDISH, SUBLANG_DEFAULT

+#pragma code_page(1252)

+#endif //_WIN32

+

+#ifdef APSTUDIO_INVOKED

+/////////////////////////////////////////////////////////////////////////////

+//

+// TEXTINCLUDE

+//

+

+1 TEXTINCLUDE 

+BEGIN

+    "resource.h\0"

+END

+

+2 TEXTINCLUDE 

+BEGIN

+    "#include ""afxres.h""\r\n"

+    "\0"

+END

+

+3 TEXTINCLUDE 

+BEGIN

+    "#define _AFX_NO_SPLITTER_RESOURCES\r\n"

+    "#define _AFX_NO_OLE_RESOURCES\r\n"

+    "#define _AFX_NO_TRACKER_RESOURCES\r\n"

+    "#define _AFX_NO_PROPERTY_RESOURCES\r\n"

+    "\r\n"

+    "#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_SVE)\r\n"

+    "LANGUAGE 29, 1\r\n"

+    "#pragma code_page(1252)\r\n"

+    "#include ""res\\WinTest.rc2""  // non-Microsoft Visual C++ edited resources\r\n"

+    "#include ""afxres.rc""     // Standard components\r\n"

+    "#endif\r\n"

+    "\0"

+END

+

+#endif    // APSTUDIO_INVOKED

+

+

+/////////////////////////////////////////////////////////////////////////////

+//

+// Icon

+//

+

+// Icon with lowest ID value placed first to ensure application icon

+// remains consistent on all systems.

+IDR_MAINFRAME           ICON                    "res\\WinTest.ico"

+

+/////////////////////////////////////////////////////////////////////////////

+//

+// Dialog

+//

+

+IDD_ABOUTBOX DIALOGEX 0, 0, 235, 55

+STYLE DS_SETFONT | DS_MODALFRAME | DS_FIXEDSYS | WS_POPUP | WS_CAPTION | WS_SYSMENU

+CAPTION "About WinTest"

+FONT 8, "MS Shell Dlg", 0, 0, 0x1

+BEGIN

+    ICON            IDR_MAINFRAME,IDC_STATIC,11,17,20,20

+    LTEXT           "WinTest Version 1.0",IDC_STATIC,40,10,119,8,SS_NOPREFIX

+    LTEXT           "Copyright (C) 2010",IDC_STATIC,40,25,119,8

+    DEFPUSHBUTTON   "OK",IDOK,178,7,50,16,WS_GROUP

+END

+

+IDD_WINTEST_DIALOG DIALOGEX 0, 0, 796, 278

+STYLE DS_SETFONT | DS_MODALFRAME | DS_FIXEDSYS | WS_MINIMIZEBOX | WS_POPUP | WS_VISIBLE | WS_CAPTION | WS_SYSMENU

+EXSTYLE WS_EX_APPWINDOW

+CAPTION "WinTest"

+FONT 8, "MS Shell Dlg", 0, 0, 0x1

+BEGIN

+    PUSHBUTTON      "Create",IDC_BUTTON_CREATE_1,28,24,32,14

+    PUSHBUTTON      "Delete",IDC_BUTTON_DELETE_1,28,40,32,14

+    EDITTEXT        IDC_EDIT_1,6,32,18,14,ES_AUTOHSCROLL | ES_READONLY

+    PUSHBUTTON      "Create",IDC_BUTTON_CREATE_2,28,72,32,14

+    PUSHBUTTON      "Delete",IDC_BUTTON_DELETE_2,28,88,32,14

+    EDITTEXT        IDC_EDIT_2,6,82,18,14,ES_AUTOHSCROLL | ES_READONLY

+    EDITTEXT        IDC_EDIT_MESSAGE,28,244,764,12,ES_AUTOHSCROLL

+    COMBOBOX        IDC_COMBO_IP_1,64,24,76,30,CBS_DROPDOWN | CBS_SORT | WS_VSCROLL | WS_TABSTOP

+    EDITTEXT        IDC_EDIT_TX_PORT_1,144,24,28,14,ES_AUTOHSCROLL

+    EDITTEXT        IDC_EDIT_RX_PORT_1,144,40,28,14,ES_AUTOHSCROLL

+    COMBOBOX        IDC_COMBO_CODEC_1,176,24,76,156,CBS_DROPDOWN | WS_VSCROLL | WS_TABSTOP

+    PUSHBUTTON      "Start",IDC_BUTTON_START_LISTEN_1,256,24,32,14

+    PUSHBUTTON      "Stop",IDC_BUTTON_STOP_LISTEN_1,256,40,32,14

+    LTEXT           "Receive",IDC_STATIC_LISTEN,262,8,26,8

+    PUSHBUTTON      "Start",IDC_BUTTON_START_PLAYOUT_1,292,24,32,14

+    PUSHBUTTON      "Stop",IDC_BUTTON_STOP_PLAYOUT_1,292,40,32,14

+    LTEXT           "Playout",IDC_STATIC_PLAY,295,8,25,8

+    PUSHBUTTON      "Start",IDC_BUTTON_START_SEND_1,328,24,32,14

+    PUSHBUTTON      "Stop",IDC_BUTTON_STOP_SEND_1,328,40,32,14

+    LTEXT           "Send",IDC_STATIC_SEND,335,8,17,8

+    COMBOBOX        IDC_COMBO_IP_2,64,72,76,30,CBS_DROPDOWN | CBS_SORT | WS_VSCROLL | WS_TABSTOP

+    LTEXT           "Destination IP address",IDC_STATIC_IP,64,8,73,8

+    LTEXT           "Ports",IDC_STATIC_PORTS,145,8,18,8

+    LTEXT           "Codec",IDC_STATIC_CODEC,177,8,21,8

+    LTEXT           "Channel",IDC_STATIC_CHANNEL,30,8,27,8

+    LTEXT           "ID",IDC_STATIC_ID,12,8,8,8

+    EDITTEXT        IDC_EDIT_TX_PORT_2,144,72,28,14,ES_AUTOHSCROLL

+    EDITTEXT        IDC_EDIT_RX_PORT_2,144,88,28,14,ES_AUTOHSCROLL

+    COMBOBOX        IDC_COMBO_CODEC_2,176,72,76,156,CBS_DROPDOWN | WS_VSCROLL | WS_TABSTOP

+    PUSHBUTTON      "Start",IDC_BUTTON_START_LISTEN_2,256,72,32,14

+    PUSHBUTTON      "Stop",IDC_BUTTON_STOP_LISTEN_2,256,88,32,14

+    PUSHBUTTON      "Start",IDC_BUTTON_START_PLAYOUT_2,292,72,32,14

+    PUSHBUTTON      "Stop",IDC_BUTTON_STOP_PLAYOUT_2,292,88,32,14

+    PUSHBUTTON      "Start",IDC_BUTTON_START_SEND_2,328,72,32,14

+    PUSHBUTTON      "Stop",IDC_BUTTON_STOP_SEND_2,328,88,32,14

+    PUSHBUTTON      "TEST 1",IDC_BUTTON_TEST_1,756,224,36,14

+    LTEXT           "API",IDC_STATIC,4,247,12,8

+    EDITTEXT        IDC_EDIT_RESULT,28,260,96,12,ES_AUTOHSCROLL

+    LTEXT           "Result",IDC_STATIC,3,263,21,8

+    EDITTEXT        IDC_EDIT_N_FAILS,156,260,30,12,ES_AUTOHSCROLL

+    LTEXT           "#Fails",IDC_STATIC_ERROR,132,263,20,8

+    EDITTEXT        IDC_EDIT_LAST_ERROR,228,260,36,12,ES_AUTOHSCROLL

+    LTEXT           "Last Error",IDC_STATIC_LAST_ERROR,192,262,32,8

+    LTEXT           "Ext. Trans.",IDC_STATIC_EXTERNAL,361,8,37,8

+    CONTROL         "",IDC_CHECK_EXT_TRANS_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,371,33,16,10

+    CONTROL         "In",IDC_CHECK_PLAY_FILE_IN_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,396,24,36,14,WS_EX_DLGMODALFRAME

+    LTEXT           "Play File",IDC_STATIC_PLAY_FILE,401,8,27,8

+    CONTROL         "Out",IDC_CHECK_PLAY_FILE_OUT_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,396,40,36,14,WS_EX_DLGMODALFRAME

+    CONTROL         "In",IDC_CHECK_PLAY_FILE_IN_2,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,396,72,36,14,WS_EX_DLGMODALFRAME

+    CONTROL         "Out",IDC_CHECK_PLAY_FILE_OUT_2,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,396,88,36,14,WS_EX_DLGMODALFRAME

+    CONTROL         "",IDC_CHECK_EXT_TRANS_2,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,371,82,16,10

+    GROUPBOX        "",IDC_STATIC_ALL_CHANNELS,6,107,662,113

+    CONTROL         "PlayFileAsMic",IDC_CHECK_PLAY_FILE_IN,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,396,122,60,14,WS_EX_DLGMODALFRAME

+    COMBOBOX        IDC_COMBO_REC_DEVICE,12,132,184,80,CBS_DROPDOWN | WS_VSCROLL | WS_TABSTOP

+    LTEXT           "Recording device",IDC_STATIC_REC_DEVICE,12,120,56,8

+    COMBOBOX        IDC_COMBO_PLAY_DEVICE,12,180,184,80,CBS_DROPDOWN | WS_VSCROLL | WS_TABSTOP

+    LTEXT           "Playout device",IDC_STATIC_PLAY_DEVICE,12,167,56,8

+    CONTROL         "In",IDC_CHECK_EXT_MEDIA_IN_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,436,24,36,14,WS_EX_DLGMODALFRAME

+    CONTROL         "Out",IDC_CHECK_EXT_MEDIA_OUT_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,436,40,36,14,WS_EX_DLGMODALFRAME

+    LTEXT           "Ext. Media",IDC_STATIC_PLAY_FILE2,437,8,35,8

+    CONTROL         "",IDC_SLIDER_INPUT_VOLUME,"msctls_trackbar32",TBS_BOTH | TBS_NOTICKS | WS_TABSTOP,196,130,72,15

+    LTEXT           "Microphone Volume",IDC_STATIC_MIC_VOLUME,202,120,62,8

+    CONTROL         "",IDC_SLIDER_OUTPUT_VOLUME,"msctls_trackbar32",TBS_BOTH | TBS_NOTICKS | WS_TABSTOP,196,179,72,15

+    LTEXT           "Speaker Volume",IDC_STATIC_SPK_VOLUME,202,167,52,8

+    CONTROL         "AGC",IDC_CHECK_AGC,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,316,122,36,14,WS_EX_DLGMODALFRAME

+    CONTROL         "",IDC_PROGRESS_AUDIO_LEVEL_IN,"msctls_progress32",WS_BORDER,268,135,42,6

+    LTEXT           "Audio Level",IDC_STATIC_AUDIO_LEVEL_IN,271,120,38,8,NOT WS_GROUP

+    CONTROL         "NS",IDC_CHECK_NS,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,316,142,36,14,WS_EX_DLGMODALFRAME

+    CONTROL         "EC",IDC_CHECK_EC,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,356,122,36,14,WS_EX_DLGMODALFRAME

+    CONTROL         "VAD",IDC_CHECK_VAD_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,476,24,36,14,WS_EX_DLGMODALFRAME

+    CONTROL         "In",IDC_CHECK_EXT_MEDIA_IN_2,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,436,72,36,14,WS_EX_DLGMODALFRAME

+    CONTROL         "Out",IDC_CHECK_EXT_MEDIA_OUT_2,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,436,88,36,14,WS_EX_DLGMODALFRAME

+    CONTROL         "VAD",IDC_CHECK_VAD_3,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,476,72,36,14,WS_EX_DLGMODALFRAME

+    CONTROL         "Mute",IDC_CHECK_MUTE_IN,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,356,142,36,14,WS_EX_DLGMODALFRAME

+    CONTROL         "Mute",IDC_CHECK_MUTE_IN_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,476,40,36,14,WS_EX_DLGMODALFRAME

+    CONTROL         "Mute",IDC_CHECK_MUTE_IN_2,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,476,88,36,14,WS_EX_DLGMODALFRAME

+    CONTROL         "TX",IDC_CHECK_SRTP_TX_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,516,24,36,14,WS_EX_DLGMODALFRAME

+    CONTROL         "RX",IDC_CHECK_SRTP_RX_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,516,40,36,14,WS_EX_DLGMODALFRAME

+    LTEXT           "SRTP",IDC_STATIC_SRTP,525,8,18,8

+    CONTROL         "TX",IDC_CHECK_SRTP_TX_2,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,516,72,36,14,WS_EX_DLGMODALFRAME

+    CONTROL         "RX",IDC_CHECK_SRTP_RX_2,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,516,88,36,14,WS_EX_DLGMODALFRAME

+    CONTROL         "",IDC_CHECK_EXT_ENCRYPTION_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,564,33,16,10

+    LTEXT           "Encrypt",IDC_STATIC_PLAY_FILE4,556,8,26,8

+    CONTROL         "",IDC_CHECK_EXT_ENCRYPTION_2,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,564,82,16,10

+    PUSHBUTTON      "DTMF>>",IDC_BUTTON_DTMF_1,584,24,36,14

+    CONTROL         "RecMicToFile",IDC_CHECK_REC_MIC,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,396,142,60,14,WS_EX_DLGMODALFRAME

+    PUSHBUTTON      "DTMF>>",IDC_BUTTON_DTMF_2,584,72,36,14

+    CONTROL         "Conf",IDC_CHECK_CONFERENCE_1,"Button",BS_AUTOCHECKBOX | NOT WS_VISIBLE | WS_TABSTOP,584,40,36,14,WS_EX_DLGMODALFRAME

+    CONTROL         "Conf",IDC_CHECK_CONFERENCE_2,"Button",BS_AUTOCHECKBOX | NOT WS_VISIBLE | WS_TABSTOP,584,88,36,14,WS_EX_DLGMODALFRAME

+    CONTROL         "Hold",IDC_CHECK_ON_HOLD_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,708,24,36,14,WS_EX_DLGMODALFRAME

+    CONTROL         "Hold",IDC_CHECK_ON_HOLD_2,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,708,72,36,14,WS_EX_DLGMODALFRAME

+    EDITTEXT        IDC_EDIT_GET_OUTPUT,292,260,500,12,ES_AUTOHSCROLL

+    LTEXT           "Get",IDC_STATIC_GET,276,262,12,8

+    CONTROL         "Ext. Media",IDC_CHECK_EXT_MEDIA_IN,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,460,122,52,14,WS_EX_DLGMODALFRAME

+    CONTROL         "Ext. Media",IDC_CHECK_EXT_MEDIA_OUT,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,460,180,52,14,WS_EX_DLGMODALFRAME

+    LISTBOX         IDC_LIST_CODEC_1,208,40,44,28,LBS_NOINTEGRALHEIGHT | NOT WS_BORDER | WS_VSCROLL | WS_TABSTOP,WS_EX_CLIENTEDGE

+    EDITTEXT        IDC_EDIT_CODEC_1,176,40,28,14,ES_AUTOHSCROLL

+    CONTROL         "",IDC_PROGRESS_AUDIO_LEVEL_OUT,"msctls_progress32",WS_BORDER,268,184,42,6

+    LTEXT           "Panning",IDC_STATIC_PANNING,328,167,26,8

+    CONTROL         "",IDC_SLIDER_PAN_LEFT,"msctls_trackbar32",TBS_VERT | TBS_BOTH | TBS_NOTICKS | WS_TABSTOP,328,175,12,28

+    CONTROL         "",IDC_SLIDER_PAN_RIGHT,"msctls_trackbar32",TBS_VERT | TBS_BOTH | TBS_NOTICKS | WS_TABSTOP,344,175,12,28

+    LTEXT           "L",IDC_STATIC_LEFT,332,200,8,8

+    LTEXT           "R",IDC_STATIC_RIGHT,347,201,8,8

+    PUSHBUTTON      "Version",IDC_BUTTON_VERSION,624,200,36,14

+    EDITTEXT        IDC_EDIT_PLAYOUT_BUFFER_SIZE,363,181,28,12,ES_CENTER | ES_AUTOHSCROLL | ES_READONLY | NOT WS_TABSTOP

+    LTEXT           "Buffer Size",IDC_STATIC_PLAYOUT_BUFFER,361,167,36,8

+    CONTROL         "Delay",IDC_CHECK_DELAY_ESTIMATE_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,624,24,36,14,WS_EX_DLGMODALFRAME

+    EDITTEXT        IDC_EDIT_DELAY_ESTIMATE_1,631,40,24,14,ES_CENTER | ES_AUTOHSCROLL | ES_READONLY | NOT WS_TABSTOP

+    CONTROL         "RxVAD",IDC_CHECK_RXVAD,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,664,24,40,14,WS_EX_DLGMODALFRAME

+    EDITTEXT        IDC_EDIT_RXVAD,671,40,24,14,ES_CENTER | ES_AUTOHSCROLL | ES_READONLY

+    CONTROL         "AGC",IDC_CHECK_AGC_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,748,24,36,14,WS_EX_DLGMODALFRAME

+    CONTROL         "NS",IDC_CHECK_NS_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,748,40,36,14,WS_EX_DLGMODALFRAME

+    LTEXT           "RX VQE",IDC_STATIC_RX_VQE,753,8,25,8

+    CONTROL         "RecordCall",IDC_CHECK_REC_CALL,"Button",BS_AUTOCHECKBOX | NOT WS_VISIBLE | WS_TABSTOP,517,156,52,14,WS_EX_DLGMODALFRAME

+    LTEXT           "RX",IDC_STATIC_RX_PORT,133,42,10,8

+    LTEXT           "RX",IDC_STATIC_RX_PORT2,133,91,10,8

+    CONTROL         "TypingDetect",IDC_CHECK_TYPING_DETECTION,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,572,156,60,14,WS_EX_DLGMODALFRAME

+    EDITTEXT        IDC_EDIT_AUDIO_LAYER,28,224,116,14,ES_AUTOHSCROLL | ES_READONLY

+    EDITTEXT        IDC_EDIT_CPU_LOAD,152,224,116,14,ES_AUTOHSCROLL | ES_READONLY

+    CONTROL         "FEC",IDC_CHECK_FEC,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,176,55,28,14,WS_EX_DLGMODALFRAME

+    LTEXT           "=> Callbacks",IDC_STATIC_ERROR_CALLBACK,283,226,43,8

+    EDITTEXT        IDC_EDIT_ERROR_CALLBACK,328,224,312,14,ES_AUTOHSCROLL

+    PUSHBUTTON      "Clear",IDC_BUTTON_CLEAR_ERROR_CALLBACK,644,224,24,14

+    EDITTEXT        IDC_EDIT_RX_CODEC_1,256,56,216,12,ES_AUTOHSCROLL | ES_READONLY

+    EDITTEXT        IDC_EDIT_RTCP_STAT_1,476,56,316,12,ES_AUTOHSCROLL | ES_READONLY

+END

+

+IDD_DTMF_DIALOG DIALOGEX 0, 0, 316, 212

+STYLE DS_SETFONT | DS_MODALFRAME | DS_FIXEDSYS | WS_POPUP | WS_CAPTION | WS_SYSMENU

+CAPTION "Telehone Events"

+FONT 8, "MS Shell Dlg", 400, 0, 0x1

+BEGIN

+    DEFPUSHBUTTON   "OK",IDOK,260,192,50,14

+    PUSHBUTTON      "1",IDC_BUTTON_1,16,20,16,14

+    PUSHBUTTON      "2",IDC_BUTTON_2,36,20,16,14

+    PUSHBUTTON      "3",IDC_BUTTON_3,56,20,16,14

+    PUSHBUTTON      "4",IDC_BUTTON_4,16,36,16,14

+    PUSHBUTTON      "5",IDC_BUTTON_5,36,36,16,14

+    PUSHBUTTON      "6",IDC_BUTTON_6,56,36,16,14

+    PUSHBUTTON      "7",IDC_BUTTON_7,16,52,16,14

+    PUSHBUTTON      "8",IDC_BUTTON_8,36,52,16,14

+    PUSHBUTTON      "9",IDC_BUTTON_9,56,52,16,14

+    PUSHBUTTON      "*",IDC_BUTTON_10,16,68,16,14

+    PUSHBUTTON      "0",IDC_BUTTON_11,36,68,16,14

+    PUSHBUTTON      "#",IDC_BUTTON_12,56,68,16,14

+    PUSHBUTTON      "A",IDC_BUTTON_13,76,20,16,14

+    PUSHBUTTON      "B",IDC_BUTTON_14,76,36,16,14

+    PUSHBUTTON      "C",IDC_BUTTON_15,76,52,16,14

+    PUSHBUTTON      "D",IDC_BUTTON_16,76,68,16,14

+    EDITTEXT        IDC_EDIT_DTMF_EVENT,56,90,16,12,ES_AUTOHSCROLL | ES_READONLY

+    LTEXT           "Event code",IDC_STATIC_DTMF_EVENT,17,91,37,8

+    PUSHBUTTON      "1",IDC_BUTTON_17,16,20,16,14

+    PUSHBUTTON      "2",IDC_BUTTON_18,36,20,16,14

+    PUSHBUTTON      "3",IDC_BUTTON_19,56,20,16,14

+    PUSHBUTTON      "4",IDC_BUTTON_20,16,36,16,14

+    PUSHBUTTON      "A",IDC_BUTTON_21,76,20,16,14

+    GROUPBOX        "DTMF Events",IDC_STATIC_GROUP_DTMF,4,4,188,132

+    CONTROL         "",IDC_CHECK_DTMF_PLAYOUT_RX,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,160,21,12,14

+    LTEXT           "Play out-band RX",IDC_STATIC_PLAYOUT_RX,101,24,56,8

+    CONTROL         "",IDC_CHECK_DTMF_PLAY_TONE,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,160,39,12,14

+    LTEXT           "Play tone locally",IDC_STATIC_PLAY_TONE,101,41,52,8

+    EDITTEXT        IDC_EDIT_EVENT_LENGTH,44,163,28,14,ES_AUTOHSCROLL

+    LTEXT           "Duration",IDC_STATIC_EVENT_LENGTH,12,165,28,8

+    EDITTEXT        IDC_EDIT_EVENT_ATTENUATION,44,183,28,14,ES_AUTOHSCROLL

+    LTEXT           "Volume",IDC_STATIC_EVENT_ATTENUATION,12,186,24,8

+    CONTROL         "Inband",IDC_CHECK_EVENT_INBAND,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,84,163,40,14,WS_EX_DLGMODALFRAME

+    CONTROL         "Feedback",IDC_CHECK_DTMF_FEEDBACK,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,16,112,48,14,WS_EX_DLGMODALFRAME

+    CONTROL         "",IDC_CHECK_DIRECT_FEEDBACK,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,96,112,12,14

+    LTEXT           "Direct",IDC_STATIC_DIRECT_FEEDBACK,72,115,20,8

+    CONTROL         "Single",IDC_RADIO_SINGLE,"Button",BS_AUTORADIOBUTTON | WS_GROUP,112,68,35,10

+    CONTROL         "Sequence",IDC_RADIO_MULTI,"Button",BS_AUTORADIOBUTTON,112,80,47,10

+    CONTROL         "Start/Stop",IDC_RADIO_START_STOP,"Button",BS_AUTORADIOBUTTON,112,92,49,10

+    GROUPBOX        "Mode",IDC_STATIC_MODE,100,56,68,52

+    EDITTEXT        IDC_EDIT_EVENT_RX_PT,220,20,24,14,ES_AUTOHSCROLL

+    EDITTEXT        IDC_EDIT_EVENT_TX_PT,220,41,24,14,ES_AUTOHSCROLL

+    LTEXT           "RX",IDC_STATIC_EVENT_RX_PT,208,22,10,8

+    LTEXT           "TX",IDC_STATIC_EVENT_TX_PT,208,42,9,8

+    PUSHBUTTON      "Set",IDC_BUTTON_SET_TX_TELEPHONE_PT,248,41,24,14

+    PUSHBUTTON      "Set",IDC_BUTTON_SET_RX_TELEPHONE_PT,248,20,24,14

+    GROUPBOX        "Payload Type",IDC_STATIC_PT,200,4,80,56

+    EDITTEXT        IDC_EDIT_EVENT_CODE,128,163,28,14,ES_AUTOHSCROLL

+    LTEXT           "Event code",IDC_STATIC_EVENT_CODE,125,152,37,8

+    PUSHBUTTON      "Send",IDC_BUTTON_SEND_TELEPHONE_EVENT,160,163,24,14

+    CONTROL         "On/Off",IDC_CHECK_EVENT_DETECTION,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,208,80,40,14,WS_EX_DLGMODALFRAME

+    CONTROL         "",IDC_CHECK_DETECT_INBAND,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,208,100,12,14

+    CONTROL         "",IDC_CHECK_DETECT_OUT_OF_BAND,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,208,116,12,14

+    LTEXT           "Inband",IDC_STATIC_INBAND_DETECTION,220,103,24,8

+    LTEXT           "Outband",IDC_STATIC_OUT_OF_BAND_DETECTION,220,120,29,8

+    GROUPBOX        "Event Detection",IDC_STATIC_EVENT_DETECTION,200,68,108,68

+    GROUPBOX        "Telephone Events",IDC_STATIC_TELEPHONE_EVENTS,4,140,188,64

+    EDITTEXT        IDC_EDIT_ON_EVENT_OUT_OF_BAND,252,117,48,14,ES_AUTOHSCROLL

+    EDITTEXT        IDC_EDIT_ON_EVENT_INBAND,252,101,48,14,ES_AUTOHSCROLL

+    LTEXT           "=> Detections",IDC_STATIC_EVEN,253,90,48,8

+END

+

+

+/////////////////////////////////////////////////////////////////////////////

+//

+// Version

+//

+

+VS_VERSION_INFO VERSIONINFO

+ FILEVERSION 1,0,0,0

+ PRODUCTVERSION 1,0,0,0

+ FILEFLAGSMASK 0x3fL

+#ifdef _DEBUG

+ FILEFLAGS 0x1L

+#else

+ FILEFLAGS 0x0L

+#endif

+ FILEOS 0x4L

+ FILETYPE 0x1L

+ FILESUBTYPE 0x0L

+BEGIN

+    BLOCK "StringFileInfo"

+    BEGIN

+        BLOCK "040904e4"

+        BEGIN

+            VALUE "FileDescription", "WebRTC VoiceEngine Test"

+            VALUE "FileVersion", "1.0.0.0"

+            VALUE "InternalName", "WinTest.exe"

+            VALUE "LegalCopyright", "Copyright (c) 2011 The WebRTC project authors. All Rights Reserved."

+            VALUE "OriginalFilename", "WinTest.exe"

+            VALUE "ProductName", "WebRTC VoiceEngine"

+            VALUE "ProductVersion", "1.0.0.0"

+        END

+    END

+    BLOCK "VarFileInfo"

+    BEGIN

+        VALUE "Translation", 0x409, 1252

+    END

+END

+

+

+/////////////////////////////////////////////////////////////////////////////

+//

+// DESIGNINFO

+//

+

+#ifdef APSTUDIO_INVOKED

+GUIDELINES DESIGNINFO 

+BEGIN

+    IDD_ABOUTBOX, DIALOG

+    BEGIN

+        LEFTMARGIN, 7

+        RIGHTMARGIN, 228

+        TOPMARGIN, 7

+        BOTTOMMARGIN, 48

+    END

+

+    IDD_WINTEST_DIALOG, DIALOG

+    BEGIN

+        LEFTMARGIN, 7

+        RIGHTMARGIN, 789

+        TOPMARGIN, 7

+        BOTTOMMARGIN, 271

+    END

+

+    IDD_DTMF_DIALOG, DIALOG

+    BEGIN

+        LEFTMARGIN, 7

+        RIGHTMARGIN, 309

+        TOPMARGIN, 7

+        BOTTOMMARGIN, 205

+    END

+END

+#endif    // APSTUDIO_INVOKED

+

+

+/////////////////////////////////////////////////////////////////////////////

+//

+// String Table

+//

+

+STRINGTABLE 

+BEGIN

+    IDS_ABOUTBOX            "&About WinTest..."

+END

+

+#endif    // Swedish resources

+/////////////////////////////////////////////////////////////////////////////

+

+

+

+#ifndef APSTUDIO_INVOKED

+/////////////////////////////////////////////////////////////////////////////

+//

+// Generated from the TEXTINCLUDE 3 resource.

+//

+#define _AFX_NO_SPLITTER_RESOURCES

+#define _AFX_NO_OLE_RESOURCES

+#define _AFX_NO_TRACKER_RESOURCES

+#define _AFX_NO_PROPERTY_RESOURCES

+

+#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_SVE)

+LANGUAGE 29, 1

+#pragma code_page(1252)

+#include "res\WinTest.rc2"  // non-Microsoft Visual C++ edited resources

+#include "afxres.rc"     // Standard components

+#endif

+

+/////////////////////////////////////////////////////////////////////////////

+#endif    // not APSTUDIO_INVOKED

+

diff --git a/voice_engine/main/test/win_test/WinTestDlg.cpp b/voice_engine/main/test/win_test/WinTestDlg.cpp
new file mode 100644
index 0000000..4e1b91e
--- /dev/null
+++ b/voice_engine/main/test/win_test/WinTestDlg.cpp
@@ -0,0 +1,3561 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include "stdafx.h"
+#include "WinTest.h"
+#include "WinTestDlg.h"
+
+#ifdef _DEBUG
+#define new DEBUG_NEW
+#endif
+
+using namespace webrtc;
+
+unsigned char key[30] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+// Hack to convert char to TCHAR, using two buffers to be able to
+// call twice in the same statement
+TCHAR convertTemp1[256] = {0};
+TCHAR convertTemp2[256] = {0};
+bool convertBufferSwitch(false);
+TCHAR* CharToTchar(const char* str, int len)
+{
+#ifdef _UNICODE
+  TCHAR* temp = convertBufferSwitch ? convertTemp1 : convertTemp2;
+  convertBufferSwitch = !convertBufferSwitch;
+  memset(temp, 0, sizeof(convertTemp1));
+  MultiByteToWideChar(CP_UTF8, 0, str, len, temp, 256);
+  return temp;
+#else
+  return str;
+#endif
+}
+
+// Hack to convert TCHAR to char
+char convertTemp3[256] = {0};
+char* TcharToChar(TCHAR* str, int len)
+{
+#ifdef _UNICODE
+  memset(convertTemp3, 0, sizeof(convertTemp3));
+  WideCharToMultiByte(CP_UTF8, 0, str, len, convertTemp3, 256, 0, 0);
+  return convertTemp3;
+#else
+  return str;
+#endif
+}
+
+// ----------------------------------------------------------------------------
+//    VoEConnectionObserver
+// ----------------------------------------------------------------------------
+
+class ConnectionObserver : public  VoEConnectionObserver
+{
+public:
+    ConnectionObserver();
+    virtual void OnPeriodicDeadOrAlive(const int channel, const bool alive);
+};
+
+ConnectionObserver::ConnectionObserver()
+{
+}
+
+void ConnectionObserver::OnPeriodicDeadOrAlive(const int channel, const bool alive)
+{
+    CString str;
+    str.Format(_T("OnPeriodicDeadOrAlive(channel=%d) => alive=%d"), channel, alive);
+    OutputDebugString(str);
+}
+
+// ----------------------------------------------------------------------------
+//    VoiceEngineObserver
+// ----------------------------------------------------------------------------
+
+void CWinTestDlg::CallbackOnError(const int channel, const int errCode)
+{
+    _nErrorCallbacks++;
+
+    CString str;
+    str.Format(_T("[#%d] CallbackOnError(channel=%d) => errCode = %d"), _nErrorCallbacks, channel, errCode);
+    if (errCode == VE_RECEIVE_PACKET_TIMEOUT)
+    {
+        str += _T(" <=> VE_RECEIVE_PACKET_TIMEOUT");
+    }
+    else if (errCode == VE_PACKET_RECEIPT_RESTARTED)
+    {
+        str += _T(" <=> VE_PACKET_RECEIPT_RESTARTED");
+    }
+    else if (errCode == VE_RUNTIME_PLAY_WARNING)
+    {
+        str += _T(" <=> VE_RUNTIME_PLAY_WARNING");
+    }
+    else if (errCode == VE_RUNTIME_REC_WARNING)
+    {
+        str += _T(" <=> VE_RUNTIME_REC_WARNING");
+    }
+    else if (errCode == VE_RUNTIME_PLAY_ERROR)
+    {
+        str += _T(" <=> VE_RUNTIME_PLAY_ERROR");
+    }
+    else if (errCode == VE_RUNTIME_REC_ERROR)
+    {
+        str += _T(" <=> VE_RUNTIME_REC_ERROR");
+    }
+    else if (errCode == VE_SATURATION_WARNING)
+    {
+        str += _T(" <=> VE_SATURATION_WARNING");
+    }
+    else if (errCode == VE_TYPING_NOISE_WARNING)
+    {
+        str += _T(" <=> VE_TYPING_NOISE_WARNING");
+    }
+    else if (errCode == VE_REC_DEVICE_REMOVED)
+    {
+        str += _T(" <=> VE_REC_DEVICE_REMOVED");
+    }
+    // AfxMessageBox((LPCTSTR)str, MB_OK);
+    SetDlgItemText(IDC_EDIT_ERROR_CALLBACK, (LPCTSTR)str);
+}
+
+// ----------------------------------------------------------------------------
+//    VoERTPObserver
+// ----------------------------------------------------------------------------
+
+void CWinTestDlg::OnIncomingCSRCChanged(const int channel, const unsigned int CSRC, const bool added)
+{
+    CString str;
+    str.Format(_T("OnIncomingCSRCChanged(channel=%d) => CSRC=%u, added=%d"), channel, CSRC, added);
+    SetDlgItemText(IDC_EDIT_ERROR_CALLBACK, (LPCTSTR)str);
+}
+
+void CWinTestDlg::OnIncomingSSRCChanged(const int channel, const unsigned int SSRC)
+{
+    CString str;
+    str.Format(_T("OnIncomingSSRCChanged(channel=%d) => SSRC=%u"), channel, SSRC);
+    SetDlgItemText(IDC_EDIT_ERROR_CALLBACK, (LPCTSTR)str);
+}
+
+// ----------------------------------------------------------------------------
+//    Transport
+// ----------------------------------------------------------------------------
+
+class MyTransport : public Transport
+{
+public:
+    MyTransport(VoENetwork* veNetwork);
+    virtual int SendPacket(int channel, const void *data, int len);
+    virtual int SendRTCPPacket(int channel, const void *data, int len);
+private:
+    VoENetwork* _veNetworkPtr;
+};
+
+MyTransport::MyTransport(VoENetwork* veNetwork) :
+    _veNetworkPtr(veNetwork)
+{
+}
+
+int
+MyTransport::SendPacket(int channel, const void *data, int len)
+{
+    _veNetworkPtr->ReceivedRTPPacket(channel, data, len);
+    return len;
+}
+
+int
+MyTransport::SendRTCPPacket(int channel, const void *data, int len)
+{
+    _veNetworkPtr->ReceivedRTCPPacket(channel, data, len);
+    return len;
+}
+
+// ----------------------------------------------------------------------------
+//    VoEMediaProcess
+// ----------------------------------------------------------------------------
+
+class MediaProcessImpl : public VoEMediaProcess
+{
+public:
+    MediaProcessImpl();
+    virtual void Process(const int channel,
+                         const ProcessingTypes type,
+                         WebRtc_Word16 audio_10ms[],
+                         const int length,
+                         const int samplingFreqHz,
+                         const bool stereo);
+};
+
+MediaProcessImpl::MediaProcessImpl()
+{
+}
+
+void MediaProcessImpl::Process(const int channel,
+                               const ProcessingTypes type,
+                               WebRtc_Word16 audio_10ms[],
+                               const int length,
+                               const int samplingFreqHz,
+                               const bool stereo)
+{
+    int x = rand() % 100;
+
+    for (int i = 0; i < length; i++)
+    {
+        if (channel == -1)
+        {
+            if (type == kPlaybackAllChannelsMixed)
+            {
+                // playout: scale up
+                if (!stereo)
+                {
+                    audio_10ms[i] = (audio_10ms[i] << 2);
+                }
+                else
+                {
+                    audio_10ms[2*i] = (audio_10ms[2*i] << 2);
+                    audio_10ms[2*i+1] = (audio_10ms[2*i+1] << 2);
+                }
+            }
+            else
+            {
+                // recording: emulate packet loss by "dropping" 10% of the packets
+                if (x >= 0 && x < 10)
+                {
+                    if (!stereo)
+                    {
+                        audio_10ms[i] = 0;
+                    }
+                    else
+                    {
+                        audio_10ms[2*i] = 0;
+                        audio_10ms[2*i+1] = 0;
+                    }
+                }
+            }
+        }
+        else
+        {
+            if (type == kPlaybackPerChannel)
+            {
+                // playout: mute
+                if (!stereo)
+                {
+                    audio_10ms[i] = 0;
+                }
+                else
+                {
+                    audio_10ms[2*i] = 0;
+                    audio_10ms[2*i+1] = 0;
+                }
+            }
+            else
+            {
+                // recording: emulate packet loss by "dropping" 50% of the packets
+                if (x >= 0 && x < 50)
+                {
+                    if (!stereo)
+                    {
+                        audio_10ms[i] = 0;
+                    }
+                    else
+                    {
+                        audio_10ms[2*i] = 0;
+                        audio_10ms[2*i+1] = 0;
+                    }
+                }
+            }
+        }
+    }
+}
+
+// ----------------------------------------------------------------------------
+//    Encryptionen
+// ----------------------------------------------------------------------------
+
+class MyEncryption : public Encryption
+{
+public:
+    void encrypt(int channel_no, unsigned char * in_data, unsigned char * out_data, int bytes_in, int* bytes_out);
+    void decrypt(int channel_no, unsigned char * in_data, unsigned char * out_data, int bytes_in, int* bytes_out);
+    void encrypt_rtcp(int channel_no, unsigned char * in_data, unsigned char * out_data, int bytes_in, int* bytes_out);
+    void decrypt_rtcp(int channel_no, unsigned char * in_data, unsigned char * out_data, int bytes_in, int* bytes_out);
+};
+
+void MyEncryption::encrypt(int channel_no, unsigned char * in_data, unsigned char * out_data, int bytes_in, int* bytes_out)
+{
+    // --- Stereo emulation (sample based, 2 bytes per sample)
+
+    const int nBytesPayload = bytes_in-12;
+
+    // RTP header (first 12 bytes)
+    memcpy(out_data, in_data, 12);
+
+    // skip RTP header
+    short* ptrIn = (short*) &in_data[12];
+    short* ptrOut = (short*) &out_data[12];
+
+    // network byte order
+    for (int i = 0; i < nBytesPayload/2; i++)
+    {
+        // produce two output samples for each input sample
+        *ptrOut++ = *ptrIn; // left sample
+        *ptrOut++ = *ptrIn; // right sample
+        ptrIn++;
+    }
+
+    *bytes_out = 12 + 2*nBytesPayload;
+
+    /*
+    for(int i = 0; i < bytes_in; i++)
+        out_data[i] =~ in_data[i];
+    *bytes_out = bytes_in;
+    */
+}
+
+void MyEncryption::decrypt(int channel_no, unsigned char * in_data, unsigned char * out_data, int bytes_in, int* bytes_out)
+{
+    // Do nothing (<=> memcpy)
+    for(int i = 0; i < bytes_in; i++)
+        out_data[i] = in_data[i];
+    *bytes_out = bytes_in;
+}
+
+void MyEncryption::encrypt_rtcp(int channel_no, unsigned char * in_data, unsigned char * out_data, int bytes_in, int* bytes_out)
+{
+    for(int i = 0; i < bytes_in; i++)
+        out_data[i] =~ in_data[i];
+    *bytes_out = bytes_in;
+}
+
+void MyEncryption::decrypt_rtcp(int channel_no, unsigned char * in_data, unsigned char * out_data, int bytes_in, int* bytes_out)
+{
+    for(int i = 0; i < bytes_in; i++)
+        out_data[i] =~ in_data[i];
+    *bytes_out = bytes_in;
+}
+
+// ----------------------------------------------------------------------------
+//    TelephoneEventObserver
+// ----------------------------------------------------------------------------
+
+class TelephoneEventObserver: public VoETelephoneEventObserver
+{
+public:
+    TelephoneEventObserver(CWnd* editControlOut, CWnd* editControlIn);
+    virtual void OnReceivedTelephoneEventInband(const int channel, const unsigned char eventCode, const bool endOfEvent);
+    virtual void OnReceivedTelephoneEventOutOfBand(const int channel, const unsigned char eventCode, const bool endOfEvent);
+private:
+    CWnd* _editControlOutPtr;
+    CWnd* _editControlInPtr;
+};
+
+TelephoneEventObserver::TelephoneEventObserver(CWnd* editControlOut, CWnd* editControlIn) :
+    _editControlOutPtr(editControlOut),
+    _editControlInPtr(editControlIn)
+{
+}
+
+void TelephoneEventObserver::OnReceivedTelephoneEventInband(const int channel, const unsigned char eventCode, const bool endOfEvent)
+{
+    CString msg;
+    if (endOfEvent)
+    {
+        msg.AppendFormat(_T("%d [END]"), eventCode);
+        _editControlInPtr->SetWindowText((LPCTSTR)msg);
+    }
+    else
+    {
+        msg.AppendFormat(_T("%d [START]"), eventCode);
+        _editControlInPtr->SetWindowText((LPCTSTR)msg);
+    }
+}
+
+void TelephoneEventObserver::OnReceivedTelephoneEventOutOfBand(const int channel, const unsigned char eventCode, const bool endOfEvent)
+{
+    CString msg;
+    if (endOfEvent)
+    {
+        msg.AppendFormat(_T("%d [END]"), eventCode);
+        _editControlOutPtr->SetWindowText((LPCTSTR)msg);
+    }
+    else
+    {
+        msg.AppendFormat(_T("%d [START]"), eventCode);
+        _editControlOutPtr->SetWindowText((LPCTSTR)msg);
+    }
+}
+
+// ----------------------------------------------------------------------------
+//    RxVadCallback
+// ----------------------------------------------------------------------------
+
+class RxCallback : public VoERxVadCallback
+{
+public:
+    RxCallback() : _vadDecision(-1) {};
+
+    virtual void OnRxVad(int , int vadDecision)
+    {
+        _vadDecision = vadDecision;
+    }
+
+    int _vadDecision;
+};
+
+// ----------------------------------------------------------------------------
+//                                 CAboutDlg dialog
+// ----------------------------------------------------------------------------
+
+class CAboutDlg : public CDialog
+{
+public:
+    CAboutDlg();
+
+// Dialog Data
+    enum { IDD = IDD_ABOUTBOX };
+
+    protected:
+    virtual void DoDataExchange(CDataExchange* pDX);    // DDX/DDV support
+
+// Implementation
+protected:
+    DECLARE_MESSAGE_MAP()
+};
+
+CAboutDlg::CAboutDlg() : CDialog(CAboutDlg::IDD)
+{
+}
+
+void CAboutDlg::DoDataExchange(CDataExchange* pDX)
+{
+    CDialog::DoDataExchange(pDX);
+}
+
+BEGIN_MESSAGE_MAP(CAboutDlg, CDialog)
+END_MESSAGE_MAP()
+
+// ----------------------------------------------------------------------------
+//                               CTelephonyEvent dialog
+// ----------------------------------------------------------------------------
+
+class CTelephonyEvent : public CDialog
+{
+    DECLARE_DYNAMIC(CTelephonyEvent)
+
+public:
+    CTelephonyEvent(VoiceEngine* voiceEngine, int channel, CDialog* pParentDialog, CWnd* pParent = NULL);   // standard constructor
+    virtual ~CTelephonyEvent();
+
+// Dialog Data
+    enum { IDD = IDD_DTMF_DIALOG };
+
+protected:
+    virtual void DoDataExchange(CDataExchange* pDX);    // DDX/DDV support
+    virtual BOOL OnInitDialog();
+
+    DECLARE_MESSAGE_MAP()
+public:
+    afx_msg void OnBnClickedButton1();
+    afx_msg void OnBnClickedButton2();
+    afx_msg void OnBnClickedButton3();
+    afx_msg void OnBnClickedButton4();
+    afx_msg void OnBnClickedButton5();
+    afx_msg void OnBnClickedButton6();
+    afx_msg void OnBnClickedButton7();
+    afx_msg void OnBnClickedButton8();
+    afx_msg void OnBnClickedButton9();
+    afx_msg void OnBnClickedButton10();
+    afx_msg void OnBnClickedButton11();
+    afx_msg void OnBnClickedButton12();
+    afx_msg void OnBnClickedButtonA();
+    afx_msg void OnBnClickedButtonB();
+    afx_msg void OnBnClickedButtonC();
+    afx_msg void OnBnClickedButtonD();
+    afx_msg void OnBnClickedCheckDtmfPlayoutRx();
+    afx_msg void OnBnClickedCheckDtmfPlayTone();
+    afx_msg void OnBnClickedCheckStartStopMode();
+    afx_msg void OnBnClickedCheckEventInband();
+    afx_msg void OnBnClickedCheckDtmfFeedback();
+    afx_msg void OnBnClickedCheckDirectFeedback();
+    afx_msg void OnBnClickedRadioSingle();
+    afx_msg void OnBnClickedRadioMulti();
+    afx_msg void OnBnClickedRadioStartStop();
+    afx_msg void OnBnClickedButtonSetRxTelephonePt();
+    afx_msg void OnBnClickedButtonSetTxTelephonePt();
+    afx_msg void OnBnClickedButtonSendTelephoneEvent();
+    afx_msg void OnBnClickedCheckDetectInband();
+    afx_msg void OnBnClickedCheckDetectOutOfBand();
+    afx_msg void OnBnClickedCheckEventDetection();
+
+private:
+    void SendTelephoneEvent(unsigned char eventCode);
+
+private:
+    VoiceEngine*                _vePtr;
+    VoEBase*                    _veBasePtr;
+    VoEDtmf*                    _veDTMFPtr;
+    VoECodec*                   _veCodecPtr;
+    int                         _channel;
+    CString                     _strMsg;
+    CDialog*                    _parentDialogPtr;
+    TelephoneEventObserver*     _telephoneEventObserverPtr;
+    bool                        _PlayDtmfToneLocally;
+    bool                        _modeStartStop;
+    bool                        _modeSingle;
+    bool                        _modeSequence;
+    bool                        _playingDTMFTone;
+    bool                        _outOfBandEventDetection;
+    bool                        _inbandEventDetection;
+};
+
+IMPLEMENT_DYNAMIC(CTelephonyEvent, CDialog)
+
+CTelephonyEvent::CTelephonyEvent(VoiceEngine* voiceEngine,
+                                 int channel,
+                                 CDialog* pParentDialog,
+                                 CWnd* pParent /*=NULL*/)
+    : _vePtr(voiceEngine),
+      _channel(channel),
+      _PlayDtmfToneLocally(false),
+      _modeStartStop(false),
+      _modeSingle(true),
+      _modeSequence(false),
+      _playingDTMFTone(false),
+      _outOfBandEventDetection(true),
+      _inbandEventDetection(false),
+      _parentDialogPtr(pParentDialog),
+      _telephoneEventObserverPtr(NULL),
+      CDialog(CTelephonyEvent::IDD, pParent)
+{
+    _veBasePtr = VoEBase::GetInterface(_vePtr);
+    _veDTMFPtr = VoEDtmf::GetInterface(_vePtr);
+    _veCodecPtr = VoECodec::GetInterface(_vePtr);
+}
+
+CTelephonyEvent::~CTelephonyEvent()
+{
+    _veDTMFPtr->Release();
+    _veCodecPtr->Release();
+    _veBasePtr->Release();
+
+    if (_telephoneEventObserverPtr)
+    {
+        _veDTMFPtr->DeRegisterTelephoneEventDetection(_channel);
+        delete _telephoneEventObserverPtr;
+        _telephoneEventObserverPtr = NULL;
+    }
+}
+
+void CTelephonyEvent::DoDataExchange(CDataExchange* pDX)
+{
+    CDialog::DoDataExchange(pDX);
+}
+
+
+BEGIN_MESSAGE_MAP(CTelephonyEvent, CDialog)
+    ON_BN_CLICKED(IDC_BUTTON_1, &CTelephonyEvent::OnBnClickedButton1)
+    ON_BN_CLICKED(IDC_BUTTON_2, &CTelephonyEvent::OnBnClickedButton2)
+    ON_BN_CLICKED(IDC_BUTTON_3, &CTelephonyEvent::OnBnClickedButton3)
+    ON_BN_CLICKED(IDC_BUTTON_4, &CTelephonyEvent::OnBnClickedButton4)
+    ON_BN_CLICKED(IDC_BUTTON_5, &CTelephonyEvent::OnBnClickedButton5)
+    ON_BN_CLICKED(IDC_BUTTON_6, &CTelephonyEvent::OnBnClickedButton6)
+    ON_BN_CLICKED(IDC_BUTTON_7, &CTelephonyEvent::OnBnClickedButton7)
+    ON_BN_CLICKED(IDC_BUTTON_8, &CTelephonyEvent::OnBnClickedButton8)
+    ON_BN_CLICKED(IDC_BUTTON_9, &CTelephonyEvent::OnBnClickedButton9)
+    ON_BN_CLICKED(IDC_BUTTON_10, &CTelephonyEvent::OnBnClickedButton10)
+    ON_BN_CLICKED(IDC_BUTTON_11, &CTelephonyEvent::OnBnClickedButton11)
+    ON_BN_CLICKED(IDC_BUTTON_12, &CTelephonyEvent::OnBnClickedButton12)
+    ON_BN_CLICKED(IDC_BUTTON_13, &CTelephonyEvent::OnBnClickedButtonA)
+    ON_BN_CLICKED(IDC_BUTTON_14, &CTelephonyEvent::OnBnClickedButtonB)
+    ON_BN_CLICKED(IDC_BUTTON_15, &CTelephonyEvent::OnBnClickedButtonC)
+    ON_BN_CLICKED(IDC_BUTTON_16, &CTelephonyEvent::OnBnClickedButtonD)
+    ON_BN_CLICKED(IDC_CHECK_DTMF_PLAYOUT_RX, &CTelephonyEvent::OnBnClickedCheckDtmfPlayoutRx)
+    ON_BN_CLICKED(IDC_CHECK_DTMF_PLAY_TONE, &CTelephonyEvent::OnBnClickedCheckDtmfPlayTone)
+    ON_BN_CLICKED(IDC_CHECK_EVENT_INBAND, &CTelephonyEvent::OnBnClickedCheckEventInband)
+    ON_BN_CLICKED(IDC_CHECK_DTMF_FEEDBACK, &CTelephonyEvent::OnBnClickedCheckDtmfFeedback)
+    ON_BN_CLICKED(IDC_CHECK_DIRECT_FEEDBACK, &CTelephonyEvent::OnBnClickedCheckDirectFeedback)
+    ON_BN_CLICKED(IDC_RADIO_SINGLE, &CTelephonyEvent::OnBnClickedRadioSingle)
+    ON_BN_CLICKED(IDC_RADIO_MULTI, &CTelephonyEvent::OnBnClickedRadioMulti)
+    ON_BN_CLICKED(IDC_RADIO_START_STOP, &CTelephonyEvent::OnBnClickedRadioStartStop)
+    ON_BN_CLICKED(IDC_BUTTON_SET_RX_TELEPHONE_PT, &CTelephonyEvent::OnBnClickedButtonSetRxTelephonePt)
+    ON_BN_CLICKED(IDC_BUTTON_SET_TX_TELEPHONE_PT, &CTelephonyEvent::OnBnClickedButtonSetTxTelephonePt)
+    ON_BN_CLICKED(IDC_BUTTON_SEND_TELEPHONE_EVENT, &CTelephonyEvent::OnBnClickedButtonSendTelephoneEvent)
+    ON_BN_CLICKED(IDC_CHECK_DETECT_INBAND, &CTelephonyEvent::OnBnClickedCheckDetectInband)
+    ON_BN_CLICKED(IDC_CHECK_DETECT_OUT_OF_BAND, &CTelephonyEvent::OnBnClickedCheckDetectOutOfBand)
+    ON_BN_CLICKED(IDC_CHECK_EVENT_DETECTION, &CTelephonyEvent::OnBnClickedCheckEventDetection)
+END_MESSAGE_MAP()
+
+
+// CTelephonyEvent message handlers
+
+BOOL CTelephonyEvent::OnInitDialog()
+{
+    CDialog::OnInitDialog();
+
+    CString str;
+    GetWindowText(str);
+    str.AppendFormat(_T(" [channel = %d]"), _channel);
+    SetWindowText(str);
+
+    // Update dialog with latest playout state
+    bool enabled(false);
+    _veDTMFPtr->GetDtmfPlayoutStatus(_channel, enabled);
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_DTMF_PLAYOUT_RX);
+    button->SetCheck(enabled ? BST_CHECKED : BST_UNCHECKED);
+
+    // Update dialog with latest feedback state
+    bool directFeedback(false);
+    _veDTMFPtr->GetDtmfFeedbackStatus(enabled, directFeedback);
+    button = (CButton*)GetDlgItem(IDC_CHECK_DTMF_FEEDBACK);
+    button->SetCheck(enabled ? BST_CHECKED : BST_UNCHECKED);
+    button = (CButton*)GetDlgItem(IDC_CHECK_DIRECT_FEEDBACK);
+    button->SetCheck(directFeedback ? BST_CHECKED : BST_UNCHECKED);
+
+    // Default event length is 160 ms
+    SetDlgItemInt(IDC_EDIT_EVENT_LENGTH, 160);
+
+    // Default event attenuation is 10 (<-> -10dBm0)
+    SetDlgItemInt(IDC_EDIT_EVENT_ATTENUATION, 10);
+
+    // Current event-detection status
+    TelephoneEventDetectionMethods detectionMethod(kOutOfBand);
+    if (_veDTMFPtr->GetTelephoneEventDetectionStatus(_channel, enabled, detectionMethod) == 0)
+    {
+        // DTMF detection is supported
+        if (enabled)
+        {
+            button = (CButton*)GetDlgItem(IDC_CHECK_EVENT_DETECTION);
+            button->SetCheck(BST_CHECKED);
+        }
+        if (detectionMethod == kOutOfBand || detectionMethod == kInAndOutOfBand)
+        {
+            button = (CButton*)GetDlgItem(IDC_CHECK_DETECT_OUT_OF_BAND);
+            button->SetCheck(BST_CHECKED);
+        }
+        if (detectionMethod == kInBand || detectionMethod == kInAndOutOfBand)
+        {
+            button = (CButton*)GetDlgItem(IDC_CHECK_DETECT_INBAND);
+            button->SetCheck(BST_CHECKED);
+        }
+    }
+    else
+    {
+        // DTMF detection is not supported
+        GetDlgItem(IDC_CHECK_EVENT_DETECTION)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_DETECT_OUT_OF_BAND)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_DETECT_INBAND)->EnableWindow(FALSE);
+        GetDlgItem(IDC_EDIT_ON_EVENT_INBAND)->EnableWindow(FALSE);
+        GetDlgItem(IDC_EDIT_ON_EVENT_OUT_OF_BAND)->EnableWindow(FALSE);
+    }
+
+    // Telephone-event PTs
+    unsigned char pt(0);
+    _veDTMFPtr->GetSendTelephoneEventPayloadType(_channel, pt);
+    SetDlgItemInt(IDC_EDIT_EVENT_TX_PT, pt);
+
+    CodecInst codec;
+    strcpy_s(codec.plname, 32, "telephone-event"); codec.channels = 1; codec.plfreq = 8000;
+    _veCodecPtr->GetRecPayloadType(_channel, codec);
+    SetDlgItemInt(IDC_EDIT_EVENT_RX_PT, codec.pltype);
+
+    if (_modeSingle)
+    {
+        ((CButton*)GetDlgItem(IDC_RADIO_SINGLE))->SetCheck(BST_CHECKED);
+    }
+    else if (_modeStartStop)
+    {
+        ((CButton*)GetDlgItem(IDC_RADIO_START_STOP))->SetCheck(BST_CHECKED);
+    }
+    else if (_modeSequence)
+    {
+        ((CButton*)GetDlgItem(IDC_RADIO_MULTI))->SetCheck(BST_CHECKED);
+    }
+
+    return TRUE;  // return TRUE  unless you set the focus to a control
+}
+void CTelephonyEvent::SendTelephoneEvent(unsigned char eventCode)
+{
+    BOOL ret;
+    int lengthMs(0);
+    int attenuationDb(0);
+    bool outBand(false);
+    int res(0);
+
+    // tone length
+    if (!_modeStartStop)
+    {
+        lengthMs = GetDlgItemInt(IDC_EDIT_EVENT_LENGTH, &ret);
+        if (ret == FALSE)
+        {
+            // use default length if edit field is empty
+            lengthMs = 160;
+        }
+    }
+
+    // attenuation
+    attenuationDb = GetDlgItemInt(IDC_EDIT_EVENT_ATTENUATION, &ret);
+    if (ret == FALSE)
+    {
+        // use default length if edit field is empty
+        attenuationDb = 10;
+    }
+
+    // out-band or in-band
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_EVENT_INBAND);
+    int check = button->GetCheck();
+    outBand = (check == BST_UNCHECKED);
+
+    if (eventCode < 16)
+        SetDlgItemInt(IDC_EDIT_DTMF_EVENT, eventCode);
+
+    if (_PlayDtmfToneLocally)
+    {
+        // --- PlayDtmfTone
+
+        if (_modeSingle)
+        {
+            TEST2(_veDTMFPtr->PlayDtmfTone(eventCode, lengthMs, attenuationDb) == 0,
+                _T("PlayDtmfTone(eventCode=%u, lengthMs=%d, attenuationDb=%d)"), eventCode, lengthMs, attenuationDb);
+        }
+        else if (_modeStartStop)
+        {
+            if (!_playingDTMFTone)
+            {
+                TEST2((res = _veDTMFPtr->StartPlayingDtmfTone(eventCode, attenuationDb)) == 0,
+                    _T("StartPlayingDtmfTone(eventCode=%u, attenuationDb=%d)"), eventCode, attenuationDb);
+            }
+            else
+            {
+                TEST2((res = _veDTMFPtr->StopPlayingDtmfTone()) == 0,
+                    _T("StopPlayingDTMFTone()"));
+            }
+            if (res == 0)
+                _playingDTMFTone = !_playingDTMFTone;
+        }
+        else if (_modeSequence)
+        {
+            int nTones(1);
+            int sleepMs(0);
+            int lenMult(1);
+            if (eventCode == 1)
+            {
+                nTones = 2;
+                sleepMs = lengthMs;
+                lenMult = 1;
+            }
+            else if (eventCode == 2)
+            {
+                nTones = 2;
+                sleepMs = lengthMs/2;
+                lenMult = 2;
+            }
+            else if (eventCode == 3)
+            {
+                nTones = 3;
+                sleepMs = 0;
+                lenMult = 1;
+            }
+            for (int i = 0; i < nTones; i++)
+            {
+                TEST2(_veDTMFPtr->PlayDtmfTone(eventCode, lengthMs, attenuationDb) == 0,
+                    _T("PlayDtmfTone(eventCode=%u, outBand=%d, lengthMs=%d, attenuationDb=%d)"), eventCode, lengthMs, attenuationDb);
+                Sleep(sleepMs);
+                lengthMs = lenMult*lengthMs;
+                eventCode++;
+            }
+        }
+    }
+    else
+    {
+        // --- SendTelephoneEvent
+
+        if (_modeSingle)
+        {
+            TEST2(_veDTMFPtr->SendTelephoneEvent(_channel, eventCode, outBand, lengthMs, attenuationDb) == 0,
+                _T("SendTelephoneEvent(channel=%d, eventCode=%u, outBand=%d, lengthMs=%d, attenuationDb=%d)"), _channel, eventCode, outBand, lengthMs, attenuationDb);
+        }
+        else if (_modeStartStop)
+        {
+            TEST2(false, _T("*** NOT IMPLEMENTED ***"));
+        }
+        else if (_modeSequence)
+        {
+            int nTones(1);
+            int sleepMs(0);
+            int lenMult(1);
+            if (eventCode == 1)
+            {
+                nTones = 2;
+                sleepMs = lengthMs;
+                lenMult = 1;
+            }
+            else if (eventCode == 2)
+            {
+                eventCode = 1;
+                nTones = 2;
+                sleepMs = lengthMs/2;
+                lenMult = 2;
+            }
+            else if (eventCode == 3)
+            {
+                eventCode = 1;
+                nTones = 3;
+                sleepMs = 0;
+                lenMult = 1;
+            }
+            for (int i = 0; i < nTones; i++)
+            {
+                TEST2(_veDTMFPtr->SendTelephoneEvent(_channel, eventCode, outBand, lengthMs, attenuationDb) == 0,
+                    _T("SendTelephoneEvent(channel=%d, eventCode=%u, outBand=%d, lengthMs=%d, attenuationDb=%d)"), _channel, eventCode, outBand, lengthMs, attenuationDb);
+                Sleep(sleepMs);
+                lengthMs = lenMult*lengthMs;
+                eventCode++;
+            }
+        }
+    }
+}
+
+void CTelephonyEvent::OnBnClickedButtonSendTelephoneEvent()
+{
+    BOOL ret;
+    unsigned char eventCode(0);
+
+    eventCode = (unsigned char)GetDlgItemInt(IDC_EDIT_EVENT_CODE, &ret);
+    if (ret == FALSE)
+    {
+        return;
+    }
+    SendTelephoneEvent(eventCode);
+}
+
+void CTelephonyEvent::OnBnClickedButton1()
+{
+    SendTelephoneEvent(1);
+}
+
+void CTelephonyEvent::OnBnClickedButton2()
+{
+    SendTelephoneEvent(2);
+}
+
+void CTelephonyEvent::OnBnClickedButton3()
+{
+    SendTelephoneEvent(3);
+}
+
+void CTelephonyEvent::OnBnClickedButton4()
+{
+    SendTelephoneEvent(4);
+}
+
+void CTelephonyEvent::OnBnClickedButton5()
+{
+    SendTelephoneEvent(5);
+}
+
+void CTelephonyEvent::OnBnClickedButton6()
+{
+    SendTelephoneEvent(6);
+}
+
+void CTelephonyEvent::OnBnClickedButton7()
+{
+    SendTelephoneEvent(7);
+}
+
+void CTelephonyEvent::OnBnClickedButton8()
+{
+    SendTelephoneEvent(8);
+}
+
+void CTelephonyEvent::OnBnClickedButton9()
+{
+    SendTelephoneEvent(9);
+}
+
+void CTelephonyEvent::OnBnClickedButton10()
+{
+    // *
+    SendTelephoneEvent(10);
+}
+
+void CTelephonyEvent::OnBnClickedButton11()
+{
+    SendTelephoneEvent(0);
+}
+
+void CTelephonyEvent::OnBnClickedButton12()
+{
+    // #
+    SendTelephoneEvent(11);
+}
+
+void CTelephonyEvent::OnBnClickedButtonA()
+{
+    SendTelephoneEvent(12);
+}
+
+void CTelephonyEvent::OnBnClickedButtonB()
+{
+    SendTelephoneEvent(13);
+}
+
+void CTelephonyEvent::OnBnClickedButtonC()
+{
+    SendTelephoneEvent(14);
+}
+
+void CTelephonyEvent::OnBnClickedButtonD()
+{
+    SendTelephoneEvent(15);
+}
+
+void CTelephonyEvent::OnBnClickedCheckDtmfPlayoutRx()
+{
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_DTMF_PLAYOUT_RX);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    TEST2(_veDTMFPtr->SetDtmfPlayoutStatus(_channel, enable) == 0, _T("SetDtmfPlayoutStatus(channel=%d, enable=%d)"), _channel, enable);
+}
+
+void CTelephonyEvent::OnBnClickedCheckDtmfPlayTone()
+{
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_DTMF_PLAY_TONE);
+    int check = button->GetCheck();
+    _PlayDtmfToneLocally = (check == BST_CHECKED);
+}
+
+void CTelephonyEvent::OnBnClickedRadioSingle()
+{
+    _modeStartStop = false;
+    _modeSingle = true;
+    _modeSequence = false;
+}
+
+void CTelephonyEvent::OnBnClickedRadioMulti()
+{
+    _modeStartStop = false;
+    _modeSingle = false;
+    _modeSequence = true;
+}
+
+void CTelephonyEvent::OnBnClickedRadioStartStop()
+{
+    // CButton* button = (CButton*)GetDlgItem(IDC_RADIO_START_STOP);
+    // int check = button->GetCheck();
+    _modeStartStop = true;
+    _modeSingle = false;
+    _modeSequence = false;
+    // GetDlgItem(IDC_EDIT_EVENT_LENGTH)->EnableWindow();
+}
+
+void CTelephonyEvent::OnBnClickedCheckEventInband()
+{
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_EVENT_INBAND);
+    int check = button->GetCheck();
+    GetDlgItem(IDC_EDIT_EVENT_CODE)->EnableWindow(check?FALSE:TRUE);
+    GetDlgItem(IDC_BUTTON_SEND_TELEPHONE_EVENT)->EnableWindow(check?FALSE:TRUE);
+}
+
+void CTelephonyEvent::OnBnClickedCheckDtmfFeedback()
+{
+    CButton* button(NULL);
+
+    // Retrieve feedback state
+    button = (CButton*)GetDlgItem(IDC_CHECK_DTMF_FEEDBACK);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+
+    // Retrieve direct-feedback setting
+    button = (CButton*)GetDlgItem(IDC_CHECK_DIRECT_FEEDBACK);
+    check = button->GetCheck();
+    const bool directFeedback = (check == BST_CHECKED);
+
+    // GetDlgItem(IDC_CHECK_DIRECT_FEEDBACK)->EnableWindow(enable ? TRUE : FALSE);
+
+    TEST2(_veDTMFPtr->SetDtmfFeedbackStatus(enable, directFeedback) == 0,
+        _T("SetDtmfFeedbackStatus(enable=%d, directFeedback=%d)"), enable, directFeedback);
+}
+
+void CTelephonyEvent::OnBnClickedCheckDirectFeedback()
+{
+    CButton* button(NULL);
+
+    // Retrieve feedback state
+    button = (CButton*)GetDlgItem(IDC_CHECK_DTMF_FEEDBACK);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+
+    // Retrieve new direct-feedback setting
+    button = (CButton*)GetDlgItem(IDC_CHECK_DIRECT_FEEDBACK);
+    check = button->GetCheck();
+    const bool directFeedback = (check == BST_CHECKED);
+
+    TEST2(_veDTMFPtr->SetDtmfFeedbackStatus(enable, directFeedback) == 0,
+        _T("SetDtmfFeedbackStatus(enable=%d, directFeedback=%d)"), enable, directFeedback);
+}
+
+void CTelephonyEvent::OnBnClickedButtonSetRxTelephonePt()
+{
+    BOOL ret;
+    int pt = GetDlgItemInt(IDC_EDIT_EVENT_RX_PT, &ret);
+    if (ret == FALSE)
+        return;
+    CodecInst codec;
+    strcpy_s(codec.plname, 32, "telephone-event");
+    codec.pltype = pt; codec.channels = 1; codec.plfreq = 8000;
+    TEST2(_veCodecPtr->SetRecPayloadType(_channel, codec) == 0,
+        _T("SetSendTelephoneEventPayloadType(channel=%d, codec.pltype=%u)"), _channel, codec.pltype);
+}
+
+void CTelephonyEvent::OnBnClickedButtonSetTxTelephonePt()
+{
+    BOOL ret;
+    int pt = GetDlgItemInt(IDC_EDIT_EVENT_TX_PT, &ret);
+    if (ret == FALSE)
+        return;
+    TEST2(_veDTMFPtr->SetSendTelephoneEventPayloadType(_channel, pt) == 0,
+        _T("SetSendTelephoneEventPayloadType(channel=%d, type=%u)"), _channel, pt);
+}
+
+void CTelephonyEvent::OnBnClickedCheckDetectInband()
+{
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_DETECT_INBAND);
+    int check = button->GetCheck();
+    _inbandEventDetection = (check == BST_CHECKED);
+
+    bool enabled(false);
+    TelephoneEventDetectionMethods detectionMethod;
+    _veDTMFPtr->GetTelephoneEventDetectionStatus(_channel, enabled, detectionMethod);
+    if (enabled)
+    {
+        // deregister
+        _veDTMFPtr->DeRegisterTelephoneEventDetection(_channel);
+        delete _telephoneEventObserverPtr;
+        _telephoneEventObserverPtr = NULL;
+        SetDlgItemText(IDC_EDIT_ON_EVENT_INBAND,_T(""));
+        SetDlgItemText(IDC_EDIT_ON_EVENT_OUT_OF_BAND,_T(""));
+    }
+    OnBnClickedCheckEventDetection();
+}
+
+void CTelephonyEvent::OnBnClickedCheckDetectOutOfBand()
+{
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_DETECT_OUT_OF_BAND);
+    int check = button->GetCheck();
+    _outOfBandEventDetection = (check == BST_CHECKED);
+
+    bool enabled(false);
+    TelephoneEventDetectionMethods detectionMethod;
+    _veDTMFPtr->GetTelephoneEventDetectionStatus(_channel, enabled, detectionMethod);
+    if (enabled)
+    {
+        // deregister
+        _veDTMFPtr->DeRegisterTelephoneEventDetection(_channel);
+        delete _telephoneEventObserverPtr;
+        _telephoneEventObserverPtr = NULL;
+        SetDlgItemText(IDC_EDIT_ON_EVENT_INBAND,_T(""));
+        SetDlgItemText(IDC_EDIT_ON_EVENT_OUT_OF_BAND,_T(""));
+    }
+    OnBnClickedCheckEventDetection();
+}
+
+void CTelephonyEvent::OnBnClickedCheckEventDetection()
+{
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_EVENT_DETECTION);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+
+    if (enable)
+    {
+        TelephoneEventDetectionMethods method(kInBand);
+        if (_inbandEventDetection && !_outOfBandEventDetection)
+            method = kInBand;
+        else if (!_inbandEventDetection && _outOfBandEventDetection)
+            method = kOutOfBand;
+        else if (_inbandEventDetection && _outOfBandEventDetection)
+            method = kInAndOutOfBand;
+
+        CWnd* wndOut = GetDlgItem(IDC_EDIT_ON_EVENT_OUT_OF_BAND);
+        CWnd* wndIn = GetDlgItem(IDC_EDIT_ON_EVENT_INBAND);
+        _telephoneEventObserverPtr = new TelephoneEventObserver(wndOut, wndIn);
+
+        TEST2(_veDTMFPtr->RegisterTelephoneEventDetection(_channel, method, *_telephoneEventObserverPtr) == 0,
+            _T("RegisterTelephoneEventDetection(channel=%d, detectionMethod=%d)"), _channel, method);
+    }
+    else
+    {
+        TEST2(_veDTMFPtr->DeRegisterTelephoneEventDetection(_channel) == 0,
+            _T("DeRegisterTelephoneEventDetection(channel=%d)"), _channel);
+        delete _telephoneEventObserverPtr;
+        _telephoneEventObserverPtr = NULL;
+        SetDlgItemText(IDC_EDIT_ON_EVENT_INBAND,_T(""));
+        SetDlgItemText(IDC_EDIT_ON_EVENT_OUT_OF_BAND,_T(""));
+    }
+}
+
+// ============================================================================
+//                                 CWinTestDlg dialog
+// ============================================================================
+
+CWinTestDlg::CWinTestDlg(CWnd* pParent /*=NULL*/)
+    : CDialog(CWinTestDlg::IDD, pParent),
+    _failCount(0),
+    _vePtr(NULL),
+    _veBasePtr(NULL),
+    _veCodecPtr(NULL),
+    _veNetworkPtr(NULL),
+    _veFilePtr(NULL),
+    _veHardwarePtr(NULL),
+    _veExternalMediaPtr(NULL),
+    _veApmPtr(NULL),
+    _veEncryptionPtr(NULL),
+    _veRtpRtcpPtr(NULL),
+    _transportPtr(NULL),
+    _encryptionPtr(NULL),
+    _externalMediaPtr(NULL),
+    _externalTransport(false),
+    _externalTransportBuild(false),
+    _checkPlayFileIn(0),
+    _checkPlayFileIn1(0),
+    _checkPlayFileIn2(0),
+    _checkPlayFileOut1(0),
+    _checkPlayFileOut2(0),
+    _checkAGC(0),
+    _checkAGC1(0),
+    _checkNS(0),
+    _checkNS1(0),
+    _checkEC(0),
+    _checkVAD1(0),
+    _checkVAD2(0),
+    _checkSrtpTx1(0),
+    _checkSrtpTx2(0),
+    _checkSrtpRx1(0),
+    _checkSrtpRx2(0),
+    _checkConference1(0),
+    _checkConference2(0),
+    _checkOnHold1(0),
+    _checkOnHold2(0),
+    _strComboIp1(_T("")),
+    _strComboIp2(_T("")),
+    _delayEstimate1(false),
+    _delayEstimate2(false),
+    _rxVad(false),
+    _nErrorCallbacks(0),
+    _timerTicks(0)
+{
+    m_hIcon = AfxGetApp()->LoadIcon(IDR_MAINFRAME);
+
+    _vePtr = VoiceEngine::Create();
+
+    VoiceEngine::SetTraceFilter(kTraceNone);
+    // VoiceEngine::SetTraceFilter(kTraceAll);
+    // VoiceEngine::SetTraceFilter(kTraceStream | kTraceStateInfo | kTraceWarning | kTraceError | kTraceCritical | kTraceApiCall | kTraceModuleCall | kTraceMemory | kTraceDebug | kTraceInfo);
+    // VoiceEngine::SetTraceFilter(kTraceStateInfo | kTraceWarning | kTraceError | kTraceCritical | kTraceApiCall | kTraceModuleCall | kTraceMemory | kTraceInfo);
+
+    VoiceEngine::SetTraceFile("ve_win_test.txt");
+    VoiceEngine::SetTraceCallback(NULL);
+
+    if (_vePtr)
+    {
+        _veExternalMediaPtr = VoEExternalMedia::GetInterface(_vePtr);
+        _veVolumeControlPtr = VoEVolumeControl::GetInterface(_vePtr);
+        _veEncryptionPtr = VoEEncryption::GetInterface(_vePtr);
+        _veVideoSyncPtr = VoEVideoSync::GetInterface(_vePtr);
+        _veNetworkPtr = VoENetwork::GetInterface(_vePtr);
+        _veFilePtr = VoEFile::GetInterface(_vePtr);
+        _veApmPtr = VoEAudioProcessing::GetInterface(_vePtr);
+
+        _veBasePtr = VoEBase::GetInterface(_vePtr);
+        _veCodecPtr = VoECodec::GetInterface(_vePtr);
+        _veHardwarePtr = VoEHardware::GetInterface(_vePtr);
+        _veRtpRtcpPtr = VoERTP_RTCP::GetInterface(_vePtr);
+        _transportPtr = new MyTransport(_veNetworkPtr);
+        _encryptionPtr = new MyEncryption();
+        _externalMediaPtr = new MediaProcessImpl();
+        _connectionObserverPtr = new ConnectionObserver();
+        _rxVadObserverPtr = new RxCallback();
+    }
+
+    _veBasePtr->RegisterVoiceEngineObserver(*this);
+}
+
+CWinTestDlg::~CWinTestDlg()
+{
+    if (_connectionObserverPtr) delete _connectionObserverPtr;
+    if (_externalMediaPtr) delete _externalMediaPtr;
+    if (_transportPtr) delete _transportPtr;
+    if (_encryptionPtr) delete _encryptionPtr;
+    if (_rxVadObserverPtr) delete _rxVadObserverPtr;
+
+    if (_veExternalMediaPtr) _veExternalMediaPtr->Release();
+    if (_veEncryptionPtr) _veEncryptionPtr->Release();
+    if (_veVideoSyncPtr) _veVideoSyncPtr->Release();
+    if (_veVolumeControlPtr) _veVolumeControlPtr->Release();
+
+    if (_veBasePtr) _veBasePtr->Terminate();
+    if (_veBasePtr) _veBasePtr->Release();
+
+    if (_veCodecPtr) _veCodecPtr->Release();
+    if (_veNetworkPtr) _veNetworkPtr->Release();
+    if (_veFilePtr) _veFilePtr->Release();
+    if (_veHardwarePtr) _veHardwarePtr->Release();
+    if (_veApmPtr) _veApmPtr->Release();
+    if (_veRtpRtcpPtr) _veRtpRtcpPtr->Release();
+    if (_vePtr)
+    {
+        bool ret = VoiceEngine::Delete(_vePtr);
+        ASSERT(ret == true);
+    }
+    VoiceEngine::SetTraceFilter(kTraceNone);
+}
+
+void CWinTestDlg::DoDataExchange(CDataExchange* pDX)
+{
+    CDialog::DoDataExchange(pDX);
+    DDX_CBString(pDX, IDC_COMBO_IP_1, _strComboIp1);
+    DDX_CBString(pDX, IDC_COMBO_IP_2, _strComboIp2);
+}
+
+BEGIN_MESSAGE_MAP(CWinTestDlg, CDialog)
+    ON_WM_SYSCOMMAND()
+    ON_WM_PAINT()
+    ON_WM_QUERYDRAGICON()
+    ON_WM_TIMER()
+    //}}AFX_MSG_MAP
+    ON_BN_CLICKED(IDC_BUTTON_CREATE_1, &CWinTestDlg::OnBnClickedButtonCreate1)
+    ON_BN_CLICKED(IDC_BUTTON_DELETE_1, &CWinTestDlg::OnBnClickedButtonDelete1)
+    ON_BN_CLICKED(IDC_BUTTON_CREATE_2, &CWinTestDlg::OnBnClickedButtonCreate2)
+    ON_BN_CLICKED(IDC_BUTTON_DELETE_2, &CWinTestDlg::OnBnClickedButtonDelete2)
+    ON_CBN_SELCHANGE(IDC_COMBO_CODEC_1, &CWinTestDlg::OnCbnSelchangeComboCodec1)
+    ON_BN_CLICKED(IDC_BUTTON_START_LISTEN_1, &CWinTestDlg::OnBnClickedButtonStartListen1)
+    ON_BN_CLICKED(IDC_BUTTON_STOP_LISTEN_1, &CWinTestDlg::OnBnClickedButtonStopListen1)
+    ON_BN_CLICKED(IDC_BUTTON_START_PLAYOUT_1, &CWinTestDlg::OnBnClickedButtonStartPlayout1)
+    ON_BN_CLICKED(IDC_BUTTON_STOP_PLAYOUT_1, &CWinTestDlg::OnBnClickedButtonStopPlayout1)
+    ON_BN_CLICKED(IDC_BUTTON_START_SEND_1, &CWinTestDlg::OnBnClickedButtonStartSend1)
+    ON_BN_CLICKED(IDC_BUTTON_STOP_SEND_1, &CWinTestDlg::OnBnClickedButtonStopSend1)
+    ON_CBN_SELCHANGE(IDC_COMBO_IP_2, &CWinTestDlg::OnCbnSelchangeComboIp2)
+    ON_CBN_SELCHANGE(IDC_COMBO_IP_1, &CWinTestDlg::OnCbnSelchangeComboIp1)
+    ON_CBN_SELCHANGE(IDC_COMBO_CODEC_2, &CWinTestDlg::OnCbnSelchangeComboCodec2)
+    ON_BN_CLICKED(IDC_BUTTON_START_LISTEN_2, &CWinTestDlg::OnBnClickedButtonStartListen2)
+    ON_BN_CLICKED(IDC_BUTTON_STOP_LISTEN_2, &CWinTestDlg::OnBnClickedButtonStopListen2)
+    ON_BN_CLICKED(IDC_BUTTON_START_PLAYOUT_2, &CWinTestDlg::OnBnClickedButtonStartPlayout2)
+    ON_BN_CLICKED(IDC_BUTTON_STOP_PLAYOUT_2, &CWinTestDlg::OnBnClickedButtonStopPlayout2)
+    ON_BN_CLICKED(IDC_BUTTON_START_SEND_2, &CWinTestDlg::OnBnClickedButtonStartSend2)
+    ON_BN_CLICKED(IDC_BUTTON_STOP_SEND_2, &CWinTestDlg::OnBnClickedButtonStopSend2)
+    ON_BN_CLICKED(IDC_CHECK_EXT_TRANS_1, &CWinTestDlg::OnBnClickedCheckExtTrans1)
+    ON_BN_CLICKED(IDC_CHECK_PLAY_FILE_IN_1, &CWinTestDlg::OnBnClickedCheckPlayFileIn1)
+    ON_BN_CLICKED(IDC_CHECK_PLAY_FILE_OUT_1, &CWinTestDlg::OnBnClickedCheckPlayFileOut1)
+    ON_BN_CLICKED(IDC_CHECK_EXT_TRANS_2, &CWinTestDlg::OnBnClickedCheckExtTrans2)
+    ON_BN_CLICKED(IDC_CHECK_PLAY_FILE_IN_2, &CWinTestDlg::OnBnClickedCheckPlayFileIn2)
+    ON_BN_CLICKED(IDC_CHECK_PLAY_FILE_OUT_2, &CWinTestDlg::OnBnClickedCheckPlayFileOut2)
+    ON_BN_CLICKED(IDC_CHECK_PLAY_FILE_IN, &CWinTestDlg::OnBnClickedCheckPlayFileIn)
+    ON_CBN_SELCHANGE(IDC_COMBO_REC_DEVICE, &CWinTestDlg::OnCbnSelchangeComboRecDevice)
+    ON_CBN_SELCHANGE(IDC_COMBO_PLAY_DEVICE, &CWinTestDlg::OnCbnSelchangeComboPlayDevice)
+    ON_BN_CLICKED(IDC_CHECK_EXT_MEDIA_IN_1, &CWinTestDlg::OnBnClickedCheckExtMediaIn1)
+    ON_BN_CLICKED(IDC_CHECK_EXT_MEDIA_OUT_1, &CWinTestDlg::OnBnClickedCheckExtMediaOut1)
+    ON_NOTIFY(NM_RELEASEDCAPTURE, IDC_SLIDER_INPUT_VOLUME, &CWinTestDlg::OnNMReleasedcaptureSliderInputVolume)
+    ON_NOTIFY(NM_RELEASEDCAPTURE, IDC_SLIDER_OUTPUT_VOLUME, &CWinTestDlg::OnNMReleasedcaptureSliderOutputVolume)
+    ON_BN_CLICKED(IDC_CHECK_AGC, &CWinTestDlg::OnBnClickedCheckAgc)
+    ON_BN_CLICKED(IDC_CHECK_NS, &CWinTestDlg::OnBnClickedCheckNs)
+    ON_BN_CLICKED(IDC_CHECK_EC, &CWinTestDlg::OnBnClickedCheckEc)
+    ON_BN_CLICKED(IDC_CHECK_VAD_1, &CWinTestDlg::OnBnClickedCheckVad1)
+    ON_BN_CLICKED(IDC_CHECK_VAD_3, &CWinTestDlg::OnBnClickedCheckVad2)
+    ON_BN_CLICKED(IDC_CHECK_EXT_MEDIA_IN_2, &CWinTestDlg::OnBnClickedCheckExtMediaIn2)
+    ON_BN_CLICKED(IDC_CHECK_EXT_MEDIA_OUT_2, &CWinTestDlg::OnBnClickedCheckExtMediaOut2)
+    ON_BN_CLICKED(IDC_CHECK_MUTE_IN, &CWinTestDlg::OnBnClickedCheckMuteIn)
+    ON_BN_CLICKED(IDC_CHECK_MUTE_IN_1, &CWinTestDlg::OnBnClickedCheckMuteIn1)
+    ON_BN_CLICKED(IDC_CHECK_MUTE_IN_2, &CWinTestDlg::OnBnClickedCheckMuteIn2)
+    ON_BN_CLICKED(IDC_CHECK_SRTP_TX_1, &CWinTestDlg::OnBnClickedCheckSrtpTx1)
+    ON_BN_CLICKED(IDC_CHECK_SRTP_RX_1, &CWinTestDlg::OnBnClickedCheckSrtpRx1)
+    ON_BN_CLICKED(IDC_CHECK_SRTP_TX_2, &CWinTestDlg::OnBnClickedCheckSrtpTx2)
+    ON_BN_CLICKED(IDC_CHECK_SRTP_RX_2, &CWinTestDlg::OnBnClickedCheckSrtpRx2)
+    ON_BN_CLICKED(IDC_CHECK_EXT_ENCRYPTION_1, &CWinTestDlg::OnBnClickedCheckExtEncryption1)
+    ON_BN_CLICKED(IDC_CHECK_EXT_ENCRYPTION_2, &CWinTestDlg::OnBnClickedCheckExtEncryption2)
+    ON_BN_CLICKED(IDC_BUTTON_DTMF_1, &CWinTestDlg::OnBnClickedButtonDtmf1)
+    ON_BN_CLICKED(IDC_CHECK_REC_MIC, &CWinTestDlg::OnBnClickedCheckRecMic)
+    ON_BN_CLICKED(IDC_BUTTON_DTMF_2, &CWinTestDlg::OnBnClickedButtonDtmf2)
+    ON_BN_CLICKED(IDC_BUTTON_TEST_1, &CWinTestDlg::OnBnClickedButtonTest1)
+    ON_BN_CLICKED(IDC_CHECK_CONFERENCE_1, &CWinTestDlg::OnBnClickedCheckConference1)
+    ON_BN_CLICKED(IDC_CHECK_CONFERENCE_2, &CWinTestDlg::OnBnClickedCheckConference2)
+    ON_BN_CLICKED(IDC_CHECK_ON_HOLD_1, &CWinTestDlg::OnBnClickedCheckOnHold1)
+    ON_BN_CLICKED(IDC_CHECK_ON_HOLD_2, &CWinTestDlg::OnBnClickedCheckOnHold2)
+    ON_BN_CLICKED(IDC_CHECK_EXT_MEDIA_IN, &CWinTestDlg::OnBnClickedCheckExtMediaIn)
+    ON_BN_CLICKED(IDC_CHECK_EXT_MEDIA_OUT, &CWinTestDlg::OnBnClickedCheckExtMediaOut)
+    ON_LBN_SELCHANGE(IDC_LIST_CODEC_1, &CWinTestDlg::OnLbnSelchangeListCodec1)
+    ON_NOTIFY(NM_RELEASEDCAPTURE, IDC_SLIDER_PAN_LEFT, &CWinTestDlg::OnNMReleasedcaptureSliderPanLeft)
+    ON_NOTIFY(NM_RELEASEDCAPTURE, IDC_SLIDER_PAN_RIGHT, &CWinTestDlg::OnNMReleasedcaptureSliderPanRight)
+    ON_BN_CLICKED(IDC_BUTTON_VERSION, &CWinTestDlg::OnBnClickedButtonVersion)
+    ON_BN_CLICKED(IDC_CHECK_DELAY_ESTIMATE_1, &CWinTestDlg::OnBnClickedCheckDelayEstimate1)
+    ON_BN_CLICKED(IDC_CHECK_RXVAD, &CWinTestDlg::OnBnClickedCheckRxvad)
+    ON_BN_CLICKED(IDC_CHECK_AGC_1, &CWinTestDlg::OnBnClickedCheckAgc1)
+    ON_BN_CLICKED(IDC_CHECK_NS_1, &CWinTestDlg::OnBnClickedCheckNs1)
+    ON_BN_CLICKED(IDC_CHECK_REC_CALL, &CWinTestDlg::OnBnClickedCheckRecCall)
+    ON_BN_CLICKED(IDC_CHECK_TYPING_DETECTION, &CWinTestDlg::OnBnClickedCheckTypingDetection)
+    ON_BN_CLICKED(IDC_CHECK_FEC, &CWinTestDlg::OnBnClickedCheckFEC)
+    ON_BN_CLICKED(IDC_BUTTON_CLEAR_ERROR_CALLBACK, &CWinTestDlg::OnBnClickedButtonClearErrorCallback)
+END_MESSAGE_MAP()
+
+BOOL CWinTestDlg::UpdateTest(bool failed, const CString& strMsg)
+{
+    if (failed)
+    {
+        SetDlgItemText(IDC_EDIT_MESSAGE, strMsg);
+        _strErr.Format(_T("FAILED (error=%d)"), _veBasePtr->LastError());
+        SetDlgItemText(IDC_EDIT_RESULT, _strErr);
+        _failCount++;
+        SetDlgItemInt(IDC_EDIT_N_FAILS, _failCount);
+        SetDlgItemInt(IDC_EDIT_LAST_ERROR, _veBasePtr->LastError());
+    }
+    else
+    {
+        SetDlgItemText(IDC_EDIT_MESSAGE, strMsg);
+        SetDlgItemText(IDC_EDIT_RESULT, _T("OK"));
+    }
+    return TRUE;
+}
+
+
+// CWinTestDlg message handlers
+
+BOOL CWinTestDlg::OnInitDialog()
+{
+    CDialog::OnInitDialog();
+
+    // Add "About..." menu item to system menu.
+
+    // IDM_ABOUTBOX must be in the system command range.
+    ASSERT((IDM_ABOUTBOX & 0xFFF0) == IDM_ABOUTBOX);
+    ASSERT(IDM_ABOUTBOX < 0xF000);
+
+    CMenu* pSysMenu = GetSystemMenu(FALSE);
+    if (pSysMenu != NULL)
+    {
+        CString strAboutMenu;
+        strAboutMenu.LoadString(IDS_ABOUTBOX);
+        if (!strAboutMenu.IsEmpty())
+        {
+            pSysMenu->AppendMenu(MF_SEPARATOR);
+            pSysMenu->AppendMenu(MF_STRING, IDM_ABOUTBOX, strAboutMenu);
+        }
+    }
+
+    // Set the icon for this dialog.  The framework does this automatically
+    //  when the application's main window is not a dialog
+    SetIcon(m_hIcon, TRUE);            // Set big icon
+    SetIcon(m_hIcon, FALSE);        // Set small icon
+
+    // char version[1024];
+    // _veBasePtr->GetVersion(version);
+    // AfxMessageBox(version, MB_OK);
+
+    if (_veBasePtr->Init() != 0)
+    {
+         AfxMessageBox(_T("Init() failed "), MB_OKCANCEL);
+    }
+
+    int ch = _veBasePtr->CreateChannel();
+    if (_veBasePtr->SetSendDestination(ch, 1234, "127.0.0.1") == -1)
+    {
+        if (_veBasePtr->LastError() == VE_EXTERNAL_TRANSPORT_ENABLED)
+        {
+            _strMsg.Format(_T("*** External transport build ***"));
+            SetDlgItemText(IDC_EDIT_MESSAGE, _strMsg);
+            _externalTransportBuild = true;
+        }
+    }
+    _veBasePtr->DeleteChannel(ch);
+
+    // --- Add (preferred) local IPv4 address in title
+
+    if (_veNetworkPtr)
+    {
+        char localIP[64];
+        _veNetworkPtr->GetLocalIP(localIP);
+        CString str;
+        GetWindowText(str);
+        str.AppendFormat(_T("  [Local IPv4 address: %s]"), CharToTchar(localIP, 64));
+        SetWindowText(str);
+    }
+
+    // --- Volume sliders
+
+    if (_veVolumeControlPtr)
+    {
+        unsigned int volume(0);
+        CSliderCtrl* slider(NULL);
+
+        slider = (CSliderCtrl*)GetDlgItem(IDC_SLIDER_INPUT_VOLUME);
+        slider->SetRangeMin(0);
+        slider->SetRangeMax(255);
+        _veVolumeControlPtr->GetMicVolume(volume);
+        slider->SetPos(volume);
+
+        slider = (CSliderCtrl*)GetDlgItem(IDC_SLIDER_OUTPUT_VOLUME);
+        slider->SetRangeMin(0);
+        slider->SetRangeMax(255);
+        _veVolumeControlPtr->GetSpeakerVolume(volume);
+        slider->SetPos(volume);
+    }
+
+    // --- Panning sliders
+
+    if (_veVolumeControlPtr)
+    {
+        float lVol(0.0);
+        float rVol(0.0);
+        int leftVol, rightVol;
+        unsigned int volumePan(0);
+        CSliderCtrl* slider(NULL);
+
+        _veVolumeControlPtr->GetOutputVolumePan(-1, lVol, rVol);
+
+        leftVol = (int)(lVol*10.0f);    // [0,10]
+        rightVol = (int)(rVol*10.0f);    // [0,10]
+
+        slider = (CSliderCtrl*)GetDlgItem(IDC_SLIDER_PAN_LEFT);
+        slider->SetRange(0,10);
+        slider->SetPos(10-leftVol);        // pos 0 <=> max pan 1.0 (top of slider)
+
+        slider = (CSliderCtrl*)GetDlgItem(IDC_SLIDER_PAN_RIGHT);
+        slider->SetRange(0,10);
+        slider->SetPos(10-rightVol);
+    }
+
+    // --- APM settings
+
+    bool enable(false);
+    CButton* button(NULL);
+
+    AgcModes agcMode(kAgcDefault);
+    if (_veApmPtr->GetAgcStatus(enable, agcMode) == 0)
+    {
+        button = (CButton*)GetDlgItem(IDC_CHECK_AGC);
+        enable ? button->SetCheck(BST_CHECKED) : button->SetCheck(BST_UNCHECKED);
+    }
+    else
+    {
+        // AGC is not supported
+        GetDlgItem(IDC_CHECK_AGC)->EnableWindow(FALSE);
+    }
+
+    NsModes nsMode(kNsDefault);
+    if (_veApmPtr->GetNsStatus(enable, nsMode) == 0)
+    {
+        button = (CButton*)GetDlgItem(IDC_CHECK_NS);
+        enable ? button->SetCheck(BST_CHECKED) : button->SetCheck(BST_UNCHECKED);
+    }
+    else
+    {
+        // NS is not supported
+        GetDlgItem(IDC_CHECK_NS)->EnableWindow(FALSE);
+    }
+
+    EcModes ecMode(kEcDefault);
+    if (_veApmPtr->GetEcStatus(enable, ecMode) == 0)
+    {
+        button = (CButton*)GetDlgItem(IDC_CHECK_EC);
+        enable ? button->SetCheck(BST_CHECKED) : button->SetCheck(BST_UNCHECKED);
+    }
+    else
+    {
+        // EC is not supported
+        GetDlgItem(IDC_CHECK_EC)->EnableWindow(FALSE);
+    }
+
+    // --- First channel section
+
+    GetDlgItem(IDC_COMBO_IP_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_EDIT_TX_PORT_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_EDIT_RX_PORT_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_COMBO_CODEC_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_LIST_CODEC_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_EDIT_CODEC_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_BUTTON_DELETE_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_BUTTON_START_LISTEN_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_BUTTON_STOP_LISTEN_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_BUTTON_START_PLAYOUT_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_BUTTON_STOP_PLAYOUT_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_BUTTON_START_SEND_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_BUTTON_STOP_SEND_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_EXT_TRANS_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_PLAY_FILE_IN_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_PLAY_FILE_OUT_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_EXT_MEDIA_IN_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_EXT_MEDIA_OUT_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_VAD_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_MUTE_IN_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_SRTP_TX_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_SRTP_RX_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_EXT_ENCRYPTION_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_BUTTON_DTMF_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_CONFERENCE_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_ON_HOLD_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_DELAY_ESTIMATE_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_RXVAD)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_AGC_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_NS_1)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_FEC)->EnableWindow(FALSE);
+
+    CComboBox* comboIP(NULL);
+    comboIP = (CComboBox*)GetDlgItem(IDC_COMBO_IP_1);
+    comboIP->AddString(_T("127.0.0.1"));
+    comboIP->SetCurSel(0);
+
+    SetDlgItemInt(IDC_EDIT_TX_PORT_1, 1111);
+    SetDlgItemInt(IDC_EDIT_RX_PORT_1, 1111);
+
+    // --- Add supported codecs to the codec combo box
+
+    CComboBox* comboCodec(NULL);
+    comboCodec = (CComboBox*)GetDlgItem(IDC_COMBO_CODEC_1);
+    comboCodec->ResetContent();
+
+    int numCodecs = _veCodecPtr->NumOfCodecs();
+    for (int idx = 0; idx < numCodecs; idx++)
+    {
+        CodecInst codec;
+        _veCodecPtr->GetCodec(idx, codec);
+        if ((_stricmp(codec.plname, "CNNB") != 0) &&
+            (_stricmp(codec.plname, "CNWB") != 0))
+        {
+            CString strCodec;
+            if (_stricmp(codec.plname, "G7221") == 0)
+                strCodec.Format(_T("%s (%d/%d/%d)"), CharToTchar(codec.plname, 32), codec.pltype, codec.plfreq/1000, codec.rate/1000);
+            else
+                strCodec.Format(_T("%s (%d/%d)"), CharToTchar(codec.plname, 32), codec.pltype, codec.plfreq/1000);
+            comboCodec->AddString(strCodec);
+        }
+        if (idx == 0)
+        {
+            SetDlgItemInt(IDC_EDIT_CODEC_1, codec.pltype);
+        }
+    }
+    comboCodec->SetCurSel(0);
+
+    CListBox* list = (CListBox*)GetDlgItem(IDC_LIST_CODEC_1);
+    list->AddString(_T("pltype"));
+    list->AddString(_T("plfreq"));
+    list->AddString(_T("pacsize"));
+    list->AddString(_T("channels"));
+    list->AddString(_T("rate"));
+    list->SetCurSel(0);
+
+    // --- Add available audio devices to the combo boxes
+
+    CComboBox* comboRecDevice(NULL);
+    CComboBox* comboPlayDevice(NULL);
+    comboRecDevice = (CComboBox*)GetDlgItem(IDC_COMBO_REC_DEVICE);
+    comboPlayDevice = (CComboBox*)GetDlgItem(IDC_COMBO_PLAY_DEVICE);
+    comboRecDevice->ResetContent();
+    comboPlayDevice->ResetContent();
+
+    if (_veHardwarePtr)
+    {
+        int numPlayout(0);
+        int numRecording(0);
+        char nameStr[128];
+        char guidStr[128];
+        CString strDevice;
+        AudioLayers audioLayer;
+
+        _veHardwarePtr->GetAudioDeviceLayer(audioLayer);
+        if (kAudioWindowsWave == audioLayer)
+        {
+            strDevice.FormatMessage(_T("Audio Layer: Windows Wave API"));
+        }
+        else if (kAudioWindowsCore == audioLayer)
+        {
+            strDevice.FormatMessage(_T("Audio Layer: Windows Core API"));
+        }
+        else
+        {
+            strDevice.FormatMessage(_T("Audio Layer: ** UNKNOWN **"));
+        }
+        SetDlgItemText(IDC_EDIT_AUDIO_LAYER, (LPCTSTR)strDevice);
+
+        _veHardwarePtr->GetNumOfRecordingDevices(numRecording);
+
+        for (int idx = 0; idx < numRecording; idx++)
+        {
+            _veHardwarePtr->GetRecordingDeviceName(idx, nameStr, guidStr);
+      strDevice.Format(_T("%s"), CharToTchar(nameStr, 128));
+            comboRecDevice->AddString(strDevice);
+        }
+        // Select default (communication) device in the combo box
+        _veHardwarePtr->GetRecordingDeviceName(-1, nameStr, guidStr);
+    CString tmp = CString(nameStr);
+        int nIndex = comboRecDevice->SelectString(-1, tmp);
+        ASSERT(nIndex != CB_ERR);
+
+        _veHardwarePtr->GetNumOfPlayoutDevices(numPlayout);
+
+        for (int idx = 0; idx < numPlayout; idx++)
+        {
+            _veHardwarePtr->GetPlayoutDeviceName(idx, nameStr, guidStr);
+      strDevice.Format(_T("%s"), CharToTchar(nameStr, 128));
+            comboPlayDevice->AddString(strDevice);
+        }
+        // Select default (communication) device in the combo box
+        _veHardwarePtr->GetPlayoutDeviceName(-1, nameStr, guidStr);
+        nIndex = comboPlayDevice->SelectString(-1, CString(nameStr));
+        ASSERT(nIndex != CB_ERR);
+    }
+
+    // --- Second channel section
+
+    GetDlgItem(IDC_COMBO_IP_2)->EnableWindow(FALSE);
+    GetDlgItem(IDC_EDIT_TX_PORT_2)->EnableWindow(FALSE);
+    GetDlgItem(IDC_EDIT_RX_PORT_2)->EnableWindow(FALSE);
+    GetDlgItem(IDC_COMBO_CODEC_2)->EnableWindow(FALSE);
+    GetDlgItem(IDC_BUTTON_DELETE_2)->EnableWindow(FALSE);
+    GetDlgItem(IDC_BUTTON_START_LISTEN_2)->EnableWindow(FALSE);
+    GetDlgItem(IDC_BUTTON_STOP_LISTEN_2)->EnableWindow(FALSE);
+    GetDlgItem(IDC_BUTTON_START_PLAYOUT_2)->EnableWindow(FALSE);
+    GetDlgItem(IDC_BUTTON_STOP_PLAYOUT_2)->EnableWindow(FALSE);
+    GetDlgItem(IDC_BUTTON_START_SEND_2)->EnableWindow(FALSE);
+    GetDlgItem(IDC_BUTTON_STOP_SEND_2)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_EXT_TRANS_2)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_PLAY_FILE_IN_2)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_PLAY_FILE_OUT_2)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_EXT_MEDIA_IN_2)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_EXT_MEDIA_OUT_2)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_VAD_3)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_MUTE_IN_2)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_SRTP_TX_2)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_SRTP_RX_2)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_EXT_ENCRYPTION_2)->EnableWindow(FALSE);
+    GetDlgItem(IDC_BUTTON_DTMF_2)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_CONFERENCE_2)->EnableWindow(FALSE);
+    GetDlgItem(IDC_CHECK_ON_HOLD_2)->EnableWindow(FALSE);
+
+    comboIP = (CComboBox*)GetDlgItem(IDC_COMBO_IP_2);
+    comboIP->AddString(_T("127.0.0.1"));
+    comboIP->SetCurSel(0);
+
+    SetDlgItemInt(IDC_EDIT_TX_PORT_2, 2222);
+    SetDlgItemInt(IDC_EDIT_RX_PORT_2, 2222);
+
+    comboCodec = (CComboBox*)GetDlgItem(IDC_COMBO_CODEC_2);
+    comboCodec->ResetContent();
+
+    if (_veCodecPtr)
+    {
+        numCodecs = _veCodecPtr->NumOfCodecs();
+        for (int idx = 0; idx < numCodecs; idx++)
+        {
+            CodecInst codec;
+            _veCodecPtr->GetCodec(idx, codec);
+            CString strCodec;
+            strCodec.Format(_T("%s (%d/%d)"), CharToTchar(codec.plname, 32), codec.pltype, codec.plfreq/1000);
+            comboCodec->AddString(strCodec);
+        }
+        comboCodec->SetCurSel(0);
+    }
+
+    // --- Start windows timer
+
+    SetTimer(0, 1000, NULL);
+
+    return TRUE;  // return TRUE  unless you set the focus to a control
+}
+
+void CWinTestDlg::OnSysCommand(UINT nID, LPARAM lParam)
+{
+    if ((nID & 0xFFF0) == IDM_ABOUTBOX)
+    {
+        CAboutDlg dlgAbout;
+        dlgAbout.DoModal();
+    }
+    else if (nID == SC_CLOSE)
+    {
+        BOOL ret;
+        int channel(0);
+        channel = GetDlgItemInt(IDC_EDIT_1, &ret);
+        if (ret == TRUE)
+        {
+            _veBasePtr->DeleteChannel(channel);
+        }
+        channel = GetDlgItemInt(IDC_EDIT_2, &ret);
+        if (ret == TRUE)
+        {
+            _veBasePtr->DeleteChannel(channel);
+        }
+
+        CDialog::OnSysCommand(nID, lParam);
+    }
+    else
+    {
+        CDialog::OnSysCommand(nID, lParam);
+    }
+
+}
+
+// If you add a minimize button to your dialog, you will need the code below
+//  to draw the icon.  For MFC applications using the document/view model,
+//  this is automatically done for you by the framework.
+
+void CWinTestDlg::OnPaint()
+{
+    if (IsIconic())
+    {
+        CPaintDC dc(this); // device context for painting
+
+        SendMessage(WM_ICONERASEBKGND, reinterpret_cast<WPARAM>(dc.GetSafeHdc()), 0);
+
+        // Center icon in client rectangle
+        int cxIcon = GetSystemMetrics(SM_CXICON);
+        int cyIcon = GetSystemMetrics(SM_CYICON);
+        CRect rect;
+        GetClientRect(&rect);
+        int x = (rect.Width() - cxIcon + 1) / 2;
+        int y = (rect.Height() - cyIcon + 1) / 2;
+
+        // Draw the icon
+        dc.DrawIcon(x, y, m_hIcon);
+    }
+    else
+    {
+        CDialog::OnPaint();
+    }
+}
+
+// The system calls this function to obtain the cursor to display while the user drags
+//  the minimized window.
+HCURSOR CWinTestDlg::OnQueryDragIcon()
+{
+    return static_cast<HCURSOR>(m_hIcon);
+}
+
+
+void CWinTestDlg::OnBnClickedButtonCreate1()
+{
+    int channel(0);
+    TEST((channel = _veBasePtr->CreateChannel()) >= 0, _T("CreateChannel(channel=%d)"), channel);
+    if (channel >= 0)
+    {
+        _veRtpRtcpPtr->RegisterRTPObserver(channel, *this);
+
+        SetDlgItemInt(IDC_EDIT_1, channel);
+        GetDlgItem(IDC_BUTTON_CREATE_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_BUTTON_DELETE_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_COMBO_IP_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_EDIT_TX_PORT_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_EDIT_RX_PORT_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_COMBO_CODEC_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_LIST_CODEC_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_EDIT_CODEC_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_BUTTON_START_LISTEN_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_BUTTON_START_PLAYOUT_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_BUTTON_START_SEND_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_EXT_TRANS_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_PLAY_FILE_IN_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_PLAY_FILE_OUT_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_EXT_MEDIA_IN_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_EXT_MEDIA_OUT_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_VAD_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_MUTE_IN_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_SRTP_TX_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_SRTP_RX_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_EXT_ENCRYPTION_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_BUTTON_DTMF_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_ON_HOLD_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_DELAY_ESTIMATE_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_RXVAD)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_AGC_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_NS_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_FEC)->EnableWindow(TRUE);
+
+        bool enabled(false);
+        bool includeCSRCs(false);
+
+        // Always set send codec to default codec <=> index 0.
+        CodecInst codec;
+        _veCodecPtr->GetCodec(0, codec);
+        _veCodecPtr->SetSendCodec(channel, codec);
+    }
+}
+
+void CWinTestDlg::OnBnClickedButtonCreate2()
+{
+    int channel(0);
+    TEST((channel = _veBasePtr->CreateChannel()) >=0 , _T("CreateChannel(%d)"), channel);
+    if (channel >= 0)
+    {
+        _veRtpRtcpPtr->RegisterRTPObserver(channel, *this);
+
+        SetDlgItemInt(IDC_EDIT_2, channel);
+        GetDlgItem(IDC_BUTTON_CREATE_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_BUTTON_DELETE_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_COMBO_IP_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_EDIT_TX_PORT_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_EDIT_RX_PORT_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_COMBO_CODEC_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_BUTTON_START_LISTEN_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_BUTTON_START_PLAYOUT_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_BUTTON_START_SEND_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_EXT_TRANS_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_PLAY_FILE_IN_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_PLAY_FILE_OUT_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_EXT_MEDIA_IN_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_EXT_MEDIA_OUT_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_VAD_3)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_MUTE_IN_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_SRTP_TX_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_SRTP_RX_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_EXT_ENCRYPTION_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_BUTTON_DTMF_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_CONFERENCE_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_CHECK_ON_HOLD_2)->EnableWindow(TRUE);
+
+        bool enabled(false);
+        bool includeCSRCs(false);
+
+
+        // Always set send codec to default codec <=> index 0.
+        CodecInst codec;
+        _veCodecPtr->GetCodec(0, codec);
+        _veCodecPtr->SetSendCodec(channel, codec);
+    }
+}
+
+void CWinTestDlg::OnBnClickedButtonDelete1()
+{
+    BOOL ret;
+    int channel = GetDlgItemInt(IDC_EDIT_1, &ret);
+    if (ret == TRUE)
+    {
+        _delayEstimate1 = false;
+        _rxVad = false;
+        _veRtpRtcpPtr->DeRegisterRTPObserver(channel);
+        TEST(_veBasePtr->DeleteChannel(channel) == 0, _T("DeleteChannel(channel=%d)"), channel);
+        SetDlgItemText(IDC_EDIT_1, _T(""));
+        GetDlgItem(IDC_BUTTON_CREATE_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_BUTTON_DELETE_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_COMBO_IP_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_EDIT_TX_PORT_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_EDIT_RX_PORT_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_COMBO_CODEC_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_LIST_CODEC_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_EDIT_CODEC_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_BUTTON_START_LISTEN_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_BUTTON_START_PLAYOUT_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_BUTTON_START_SEND_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_BUTTON_STOP_LISTEN_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_BUTTON_STOP_PLAYOUT_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_BUTTON_STOP_SEND_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_BUTTON_DTMF_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_EXT_TRANS_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_PLAY_FILE_IN_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_PLAY_FILE_OUT_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_EXT_MEDIA_IN_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_EXT_MEDIA_OUT_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_VAD_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_MUTE_IN_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_SRTP_TX_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_SRTP_RX_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_EXT_ENCRYPTION_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_CONFERENCE_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_ON_HOLD_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_DELAY_ESTIMATE_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_AGC_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_NS_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_RXVAD)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_FEC)->EnableWindow(FALSE);
+        SetDlgItemText(IDC_EDIT_RXVAD, _T(""));
+        GetDlgItem(IDC_EDIT_RXVAD)->EnableWindow(FALSE);
+        CButton* button = (CButton*)GetDlgItem(IDC_CHECK_EXT_TRANS_1);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_PLAY_FILE_IN_1);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_PLAY_FILE_OUT_1);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_EXT_MEDIA_IN_1);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_EXT_MEDIA_OUT_1);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_VAD_1);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_MUTE_IN_1);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_SRTP_TX_1);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_SRTP_RX_1);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_EXT_ENCRYPTION_1);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_CONFERENCE_1);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_ON_HOLD_1);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_DELAY_ESTIMATE_1);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_AGC_1);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_NS_1);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_RXVAD);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_FEC);
+        button->SetCheck(BST_UNCHECKED);
+    }
+}
+
+void CWinTestDlg::OnBnClickedButtonDelete2()
+{
+    BOOL ret;
+    int channel = GetDlgItemInt(IDC_EDIT_2, &ret);
+    if (ret == TRUE)
+    {
+        _delayEstimate2 = false;
+        _veRtpRtcpPtr->DeRegisterRTPObserver(channel);
+        TEST(_veBasePtr->DeleteChannel(channel) == 0, _T("DeleteChannel(%d)"), channel);
+        SetDlgItemText(IDC_EDIT_2, _T(""));
+        GetDlgItem(IDC_BUTTON_CREATE_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_BUTTON_DELETE_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_COMBO_IP_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_EDIT_TX_PORT_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_EDIT_RX_PORT_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_COMBO_CODEC_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_BUTTON_START_LISTEN_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_BUTTON_START_PLAYOUT_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_BUTTON_START_SEND_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_BUTTON_STOP_LISTEN_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_BUTTON_STOP_PLAYOUT_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_BUTTON_STOP_SEND_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_EXT_TRANS_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_PLAY_FILE_IN_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_PLAY_FILE_OUT_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_EXT_MEDIA_IN_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_EXT_MEDIA_OUT_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_MUTE_IN_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_VAD_3)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_SRTP_TX_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_SRTP_RX_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_EXT_ENCRYPTION_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_CONFERENCE_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_BUTTON_DTMF_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_CHECK_ON_HOLD_2)->EnableWindow(FALSE);
+        CButton* button = (CButton*)GetDlgItem(IDC_CHECK_EXT_TRANS_2);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_PLAY_FILE_IN_2);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_PLAY_FILE_OUT_2);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_EXT_MEDIA_IN_2);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_EXT_MEDIA_OUT_2);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_VAD_3);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_MUTE_IN_2);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_SRTP_TX_2);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_SRTP_RX_2);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_EXT_ENCRYPTION_2);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_CONFERENCE_2);
+        button->SetCheck(BST_UNCHECKED);
+        button = (CButton*)GetDlgItem(IDC_CHECK_ON_HOLD_2);
+        button->SetCheck(BST_UNCHECKED);
+    }
+}
+
+void CWinTestDlg::OnCbnSelchangeComboIp1()
+{
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    CString str;
+    int port = GetDlgItemInt(IDC_EDIT_TX_PORT_1);
+    CComboBox* comboIP = (CComboBox*)GetDlgItem(IDC_COMBO_IP_1);
+    int n = comboIP->GetLBTextLen(0);
+    comboIP->GetLBText(0, str.GetBuffer(n));
+    TEST(_veBasePtr->SetSendDestination(channel, port, TcharToChar(str.GetBuffer(n), -1)) == 0,
+        _T("SetSendDestination(channel=%d, port=%d, ip=%s)"), channel, port, str.GetBuffer(n));
+    str.ReleaseBuffer();
+}
+
+void CWinTestDlg::OnCbnSelchangeComboIp2()
+{
+    int channel = GetDlgItemInt(IDC_EDIT_2);
+    CString str;
+    int port = GetDlgItemInt(IDC_EDIT_TX_PORT_2);
+    CComboBox* comboIP = (CComboBox*)GetDlgItem(IDC_COMBO_IP_2);
+    int n = comboIP->GetLBTextLen(0);
+    comboIP->GetLBText(0, str.GetBuffer(n));
+    TEST(_veBasePtr->SetSendDestination(channel, port, TcharToChar(str.GetBuffer(n), -1)) == 0,
+        _T("SetSendDestination(channel=%d, port=%d, ip=%s)"), channel, port, str.GetBuffer(n));
+    str.ReleaseBuffer();
+}
+
+void CWinTestDlg::OnCbnSelchangeComboCodec1()
+{
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+
+    CodecInst codec;
+    CComboBox* comboCodec(NULL);
+    comboCodec = (CComboBox*)GetDlgItem(IDC_COMBO_CODEC_1);
+    int index = comboCodec->GetCurSel();
+    _veCodecPtr->GetCodec(index, codec);
+    if (strncmp(codec.plname, "ISAC", 4) == 0)
+    {
+        // Set iSAC to adaptive mode by default.
+        codec.rate = -1;
+    }
+    TEST(_veCodecPtr->SetSendCodec(channel, codec) == 0,
+        _T("SetSendCodec(channel=%d, plname=%s, pltype=%d, plfreq=%d, rate=%d, pacsize=%d, channels=%d)"),
+        channel, CharToTchar(codec.plname, 32), codec.pltype, codec.plfreq, codec.rate, codec.pacsize, codec.channels);
+
+    CListBox* list = (CListBox*)GetDlgItem(IDC_LIST_CODEC_1);
+    list->SetCurSel(0);
+    SetDlgItemInt(IDC_EDIT_CODEC_1, codec.pltype);
+}
+
+void CWinTestDlg::OnLbnSelchangeListCodec1()
+{
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+
+    CListBox* list = (CListBox*)GetDlgItem(IDC_LIST_CODEC_1);
+    int listIdx = list->GetCurSel();
+    if (listIdx < 0)
+        return;
+    CString str;
+    list->GetText(listIdx, str);
+
+    CodecInst codec;
+    _veCodecPtr->GetSendCodec(channel, codec);
+
+    int value = GetDlgItemInt(IDC_EDIT_CODEC_1);
+    if (str == _T("pltype"))
+    {
+        codec.pltype = value;
+    }
+    else if (str == _T("plfreq"))
+    {
+        codec.plfreq = value;
+    }
+    else if (str == _T("pacsize"))
+    {
+        codec.pacsize = value;
+    }
+    else if (str == _T("channels"))
+    {
+        codec.channels = value;
+    }
+    else if (str == _T("rate"))
+    {
+        codec.rate = value;
+    }
+    TEST(_veCodecPtr->SetSendCodec(channel, codec) == 0,
+        _T("SetSendCodec(channel=%d, plname=%s, pltype=%d, plfreq=%d, rate=%d, pacsize=%d, channels=%d)"),
+        channel, CharToTchar(codec.plname, 32), codec.pltype, codec.plfreq, codec.rate, codec.pacsize, codec.channels);
+}
+
+void CWinTestDlg::OnCbnSelchangeComboCodec2()
+{
+    int channel = GetDlgItemInt(IDC_EDIT_2);
+
+    CodecInst codec;
+    CComboBox* comboCodec(NULL);
+    comboCodec = (CComboBox*)GetDlgItem(IDC_COMBO_CODEC_2);
+    int index = comboCodec->GetCurSel();
+    _veCodecPtr->GetCodec(index, codec);
+    TEST(_veCodecPtr->SetSendCodec(channel, codec) == 0,
+        _T("SetSendCodec(channel=%d, plname=%s, pltype=%d, plfreq=%d, rate=%d, pacsize=%d, channels=%d)"),
+        channel, CharToTchar(codec.plname, 32), codec.pltype, codec.plfreq, codec.rate, codec.pacsize, codec.channels);
+}
+
+void CWinTestDlg::OnBnClickedButtonStartListen1()
+{
+    int ret1(0);
+    int ret2(0);
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    int port = GetDlgItemInt(IDC_EDIT_RX_PORT_1);
+    TEST((ret1 = _veBasePtr->SetLocalReceiver(channel, port)) == 0, _T("SetLocalReceiver(channel=%d, port=%d)"), channel, port);
+    TEST((ret2 = _veBasePtr->StartReceive(channel)) == 0, _T("StartReceive(channel=%d)"), channel);
+    if (ret1 == 0 && ret2 == 0)
+    {
+        GetDlgItem(IDC_BUTTON_START_LISTEN_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_BUTTON_STOP_LISTEN_1)->EnableWindow(TRUE);
+    }
+}
+
+void CWinTestDlg::OnBnClickedButtonStartListen2()
+{
+    int ret1(0);
+    int ret2(0);
+    int channel = GetDlgItemInt(IDC_EDIT_2);
+    int port = GetDlgItemInt(IDC_EDIT_RX_PORT_2);
+    TEST((ret1 = _veBasePtr->SetLocalReceiver(channel, port)) == 0, _T("SetLocalReceiver(channel=%d, port=%d)"), channel, port);
+    TEST((ret2 = _veBasePtr->StartReceive(channel)) == 0, _T("StartReceive(channel=%d)"), channel);
+    if (ret1 == 0 && ret2 == 0)
+    {
+        GetDlgItem(IDC_BUTTON_START_LISTEN_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_BUTTON_STOP_LISTEN_2)->EnableWindow(TRUE);
+    }
+}
+
+void CWinTestDlg::OnBnClickedButtonStopListen1()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    TEST((ret = _veBasePtr->StopReceive(channel)) == 0, _T("StopListen(channel=%d)"), channel);
+    if (ret == 0)
+    {
+        GetDlgItem(IDC_BUTTON_START_LISTEN_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_BUTTON_STOP_LISTEN_1)->EnableWindow(FALSE);
+    }
+}
+
+void CWinTestDlg::OnBnClickedButtonStopListen2()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_2);
+    TEST((ret = _veBasePtr->StopReceive(channel)) == 0, _T("StopListen(channel=%d)"), channel);
+    if (ret == 0)
+    {
+        GetDlgItem(IDC_BUTTON_START_LISTEN_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_BUTTON_STOP_LISTEN_2)->EnableWindow(FALSE);
+    }
+}
+
+void CWinTestDlg::OnBnClickedButtonStartPlayout1()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    TEST((ret = _veBasePtr->StartPlayout(channel)) == 0, _T("StartPlayout(channel=%d)"), channel);
+    if (ret == 0)
+    {
+        GetDlgItem(IDC_BUTTON_START_PLAYOUT_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_BUTTON_STOP_PLAYOUT_1)->EnableWindow(TRUE);
+    }
+}
+
+void CWinTestDlg::OnBnClickedButtonStartPlayout2()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_2);
+    TEST((ret = _veBasePtr->StartPlayout(channel)) == 0, _T("StartPlayout(channel=%d)"), channel);
+    if (ret == 0)
+    {
+        GetDlgItem(IDC_BUTTON_START_PLAYOUT_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_BUTTON_STOP_PLAYOUT_2)->EnableWindow(TRUE);
+    }
+}
+
+void CWinTestDlg::OnBnClickedButtonStopPlayout1()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    TEST((ret = _veBasePtr->StopPlayout(channel)) == 0, _T("StopPlayout(channel=%d)"), channel);
+    if (ret == 0)
+    {
+        GetDlgItem(IDC_BUTTON_START_PLAYOUT_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_BUTTON_STOP_PLAYOUT_1)->EnableWindow(FALSE);
+    }
+}
+
+void CWinTestDlg::OnBnClickedButtonStopPlayout2()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_2);
+    TEST((ret = _veBasePtr->StopPlayout(channel)) == 0, _T("StopPlayout(channel=%d)"));
+    if (ret == 0)
+    {
+        GetDlgItem(IDC_BUTTON_START_PLAYOUT_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_BUTTON_STOP_PLAYOUT_2)->EnableWindow(FALSE);
+    }
+}
+
+void CWinTestDlg::OnBnClickedButtonStartSend1()
+{
+    UpdateData(TRUE);  // update IP address
+
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    if (!_externalTransport)
+    {
+        CString str;
+        int port = GetDlgItemInt(IDC_EDIT_TX_PORT_1);
+    TEST(_veBasePtr->SetSendDestination(channel, port, TcharToChar(_strComboIp1.GetBuffer(7), -1)) == 0,
+      _T("SetSendDestination(channel=%d, port=%d, ip=%s)"), channel, port, _strComboIp1.GetBuffer(7));
+        str.ReleaseBuffer();
+    }
+
+	//_veVideoSyncPtr->SetInitTimestamp(0,0);
+    // OnCbnSelchangeComboCodec1();
+
+    TEST((ret = _veBasePtr->StartSend(channel)) == 0, _T("StartSend(channel=%d)"), channel);
+    if (ret == 0)
+    {
+        GetDlgItem(IDC_BUTTON_START_SEND_1)->EnableWindow(FALSE);
+        GetDlgItem(IDC_BUTTON_STOP_SEND_1)->EnableWindow(TRUE);
+    }
+}
+
+void CWinTestDlg::OnBnClickedButtonStartSend2()
+{
+    UpdateData(TRUE);  // update IP address
+
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_2);
+    if (!_externalTransport)
+    {
+        CString str;
+        int port = GetDlgItemInt(IDC_EDIT_TX_PORT_2);
+        TEST(_veBasePtr->SetSendDestination(channel, port, TcharToChar(_strComboIp2.GetBuffer(7), -1)) == 0,
+            _T("SetSendDestination(channel=%d, port=%d, ip=%s)"), channel, port, _strComboIp2.GetBuffer(7));
+        str.ReleaseBuffer();
+    }
+
+    // OnCbnSelchangeComboCodec2();
+
+    TEST((ret = _veBasePtr->StartSend(channel)) == 0, _T("StartSend(channel=%d)"), channel);
+    if (ret == 0)
+    {
+        GetDlgItem(IDC_BUTTON_START_SEND_2)->EnableWindow(FALSE);
+        GetDlgItem(IDC_BUTTON_STOP_SEND_2)->EnableWindow(TRUE);
+    }
+}
+
+void CWinTestDlg::OnBnClickedButtonStopSend1()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    TEST((ret = _veBasePtr->StopSend(channel)) == 0, _T("StopSend(channel=%d)"), channel);
+    if (ret == 0)
+    {
+        GetDlgItem(IDC_BUTTON_START_SEND_1)->EnableWindow(TRUE);
+        GetDlgItem(IDC_BUTTON_STOP_SEND_1)->EnableWindow(FALSE);
+    }
+}
+
+void CWinTestDlg::OnBnClickedButtonStopSend2()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_2);
+    TEST((ret = _veBasePtr->StopSend(channel)) == 0, _T("StopSend(channel=%d)"), channel);
+    if (ret == 0)
+    {
+        GetDlgItem(IDC_BUTTON_START_SEND_2)->EnableWindow(TRUE);
+        GetDlgItem(IDC_BUTTON_STOP_SEND_2)->EnableWindow(FALSE);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckExtTrans1()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_EXT_TRANS_1);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        TEST((ret = _veNetworkPtr->RegisterExternalTransport(channel, *_transportPtr)) == 0,
+            _T("RegisterExternalTransport(channel=%d, transport=0x%x)"), channel, _transportPtr);
+    }
+    else
+    {
+        TEST((ret = _veNetworkPtr->DeRegisterExternalTransport(channel)) == 0,
+            _T("DeRegisterExternalTransport(channel=%d)"), channel);
+    }
+    if (ret == 0)
+    {
+        _externalTransport = enable;
+    }
+    else
+    {
+        // restore inital state since API call failed
+        button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckExtTrans2()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_2);
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_EXT_TRANS_2);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        TEST((ret = _veNetworkPtr->RegisterExternalTransport(channel, *_transportPtr)) == 0,
+            _T("RegisterExternalTransport(channel=%d, transport=0x%x)"), channel, _transportPtr);
+    }
+    else
+    {
+        TEST((ret = _veNetworkPtr->DeRegisterExternalTransport(channel)) == 0,
+            _T("DeRegisterExternalTransport(channel=%d)"), channel);
+    }
+    if (ret == 0)
+    {
+        _externalTransport = enable;
+    }
+    else
+    {
+        // restore inital state since API call failed
+        button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckPlayFileIn1()
+{
+    const char micFile[] = "/tmp/audio_short16.pcm";
+
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_PLAY_FILE_IN_1);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        bool mix;
+        const bool loop(true);
+        const FileFormats format = kFileFormatPcm16kHzFile;
+        const float scale(1.0);
+
+        (_checkPlayFileIn1 %2 == 0) ? mix = true : mix = false;
+        TEST((ret = _veFilePtr->StartPlayingFileAsMicrophone(channel, micFile, loop, mix, format, scale) == 0),
+            _T("StartPlayingFileAsMicrophone(channel=%d, file=%s, loop=%d, mix=%d, format=%d, scale=%2.1f)"),
+            channel, CharToTchar(micFile, -1), loop, mix, format, scale);
+        _checkPlayFileIn1++;
+    }
+    else
+    {
+        TEST((ret = _veFilePtr->StopPlayingFileAsMicrophone(channel) == 0),
+            _T("StopPlayingFileAsMicrophone(channel=%d)"), channel);
+    }
+    if (ret == -1)
+    {
+        // restore inital state since API call failed
+        button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckPlayFileIn2()
+{
+    const char micFile[] = "/tmp/audio_long16.pcm";
+
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_2);
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_PLAY_FILE_IN_2);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        bool mix;
+        const bool loop(true);
+        const FileFormats format = kFileFormatPcm16kHzFile;
+        const float scale(1.0);
+
+        (_checkPlayFileIn2 %2 == 0) ? mix = true : mix = false;
+        TEST((ret = _veFilePtr->StartPlayingFileAsMicrophone(channel, micFile, loop, mix, format, scale) == 0),
+            _T("StartPlayingFileAsMicrophone(channel=%d, file=%s, loop=%d, mix=%d, format=%d, scale=%2.1f)"),
+            channel, CharToTchar(micFile, -1), loop, mix, format, scale);
+        _checkPlayFileIn2++;
+    }
+    else
+    {
+        TEST((ret = _veFilePtr->StopPlayingFileAsMicrophone(channel) == 0),
+            _T("StopPlayingFileAsMicrophone(channel=%d)"), channel);
+    }
+    if (ret == -1)
+    {
+        // restore inital state since API call failed
+        button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckPlayFileOut1()
+{
+    const FileFormats formats[7]  = {{kFileFormatPcm16kHzFile},
+                                          {kFileFormatWavFile},
+                                          {kFileFormatWavFile},
+                                          {kFileFormatWavFile},
+                                          {kFileFormatWavFile},
+                                          {kFileFormatWavFile},
+                                          {kFileFormatWavFile}};
+    const char spkrFiles[8][32] = {{"/tmp/audio_short16.pcm"},
+                                   {"/tmp/audio_tiny8.wav"},
+                                   {"/tmp/audio_tiny11.wav"},
+                                   {"/tmp/audio_tiny16.wav"},
+                                   {"/tmp/audio_tiny22.wav"},
+                                   {"/tmp/audio_tiny32.wav"},
+                                   {"/tmp/audio_tiny44.wav"},
+                                   {"/tmp/audio_tiny48.wav"}};
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_PLAY_FILE_OUT_1);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        const bool loop(true);
+        const float volumeScaling(1.0);
+        const int startPointMs(0);
+        const int stopPointMs(0);
+        const FileFormats format = formats[_checkPlayFileOut1 % 7];
+        const char* spkrFile = spkrFiles[_checkPlayFileOut1 % 7];
+
+        CString str;
+        if (_checkPlayFileOut1 % 7 == 0)
+        {
+            str = _T("kFileFormatPcm16kHzFile");
+        }
+        else
+        {
+            str = _T("kFileFormatWavFile");
+        }
+        // (_checkPlayFileOut1 %2 == 0) ? mix = true : mix = false;
+        TEST((ret = _veFilePtr->StartPlayingFileLocally(channel, spkrFile, loop, format, volumeScaling, startPointMs,stopPointMs) == 0),
+            _T("StartPlayingFileLocally(channel=%d, file=%s, loop=%d, format=%s, scale=%2.1f, start=%d, stop=%d)"),
+            channel, CharToTchar(spkrFile, -1), loop, str, volumeScaling, startPointMs, stopPointMs);
+        _checkPlayFileOut1++;
+    }
+    else
+    {
+        TEST((ret = _veFilePtr->StopPlayingFileLocally(channel) == 0),
+            _T("StopPlayingFileLocally(channel=%d)"), channel);
+    }
+    if (ret == -1)
+    {
+        // restore inital state since API call failed
+        button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckPlayFileOut2()
+{
+    const char spkrFile[] = "/tmp/audio_long16.pcm";
+
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_2);
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_PLAY_FILE_OUT_2);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        const bool loop(true);
+        const FileFormats format = kFileFormatPcm16kHzFile;
+        const float volumeScaling(1.0);
+        const int startPointMs(0);
+        const int stopPointMs(0);
+
+        // (_checkPlayFileOut2 %2 == 0) ? mix = true : mix = false;
+        TEST((ret = _veFilePtr->StartPlayingFileLocally(channel, spkrFile, loop, format, volumeScaling, startPointMs,stopPointMs) == 0),
+            _T("StartPlayingFileLocally(channel=%d, file=%s, loop=%d, format=%d, scale=%2.1f, start=%d, stop=%d)"),
+            channel, CharToTchar(spkrFile, -1), loop, format, volumeScaling, startPointMs, stopPointMs);
+        // _checkPlayFileIn2++;
+    }
+    else
+    {
+        TEST((ret = _veFilePtr->StopPlayingFileLocally(channel) == 0),
+            _T("StopPlayingFileLocally(channel=%d)"), channel);
+    }
+    if (ret == -1)
+    {
+        // restore inital state since API call failed
+        button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckExtMediaIn1()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    CButton* buttonExtTrans = (CButton*)GetDlgItem(IDC_CHECK_EXT_MEDIA_IN_1);
+    int check = buttonExtTrans->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        TEST(_veExternalMediaPtr->RegisterExternalMediaProcessing(channel, kRecordingPerChannel, *_externalMediaPtr) == 0,
+            _T("RegisterExternalMediaProcessing(channel=%d, kRecordingPerChannel, processObject=0x%x)"), channel, _externalMediaPtr);
+    }
+    else
+    {
+        TEST(_veExternalMediaPtr->DeRegisterExternalMediaProcessing(channel, kRecordingPerChannel) == 0,
+            _T("DeRegisterExternalMediaProcessing(channel=%d, kRecordingPerChannel)"), channel);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckExtMediaIn2()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_2);
+    CButton* buttonExtTrans = (CButton*)GetDlgItem(IDC_CHECK_EXT_MEDIA_IN_2);
+    int check = buttonExtTrans->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        TEST(_veExternalMediaPtr->RegisterExternalMediaProcessing(channel, kRecordingPerChannel, *_externalMediaPtr) == 0,
+            _T("RegisterExternalMediaProcessing(channel=%d, kRecordingPerChannel, processObject=0x%x)"), channel, _externalMediaPtr);
+    }
+    else
+    {
+        TEST(_veExternalMediaPtr->DeRegisterExternalMediaProcessing(channel, kRecordingPerChannel) == 0,
+            _T("DeRegisterExternalMediaProcessing(channel=%d, kRecordingPerChannel)"), channel);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckExtMediaOut1()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    CButton* buttonExtTrans = (CButton*)GetDlgItem(IDC_CHECK_EXT_MEDIA_OUT_1);
+    int check = buttonExtTrans->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        TEST(_veExternalMediaPtr->RegisterExternalMediaProcessing(channel, kPlaybackPerChannel, *_externalMediaPtr) == 0,
+            _T("RegisterExternalMediaProcessing(channel=%d, kPlaybackPerChannel, processObject=0x%x)"), channel, _externalMediaPtr);
+    }
+    else
+    {
+        TEST(_veExternalMediaPtr->DeRegisterExternalMediaProcessing(channel, kPlaybackPerChannel) == 0,
+            _T("DeRegisterExternalMediaProcessing(channel=%d, kPlaybackPerChannel)"), channel);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckExtMediaOut2()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_2);
+    CButton* buttonExtTrans = (CButton*)GetDlgItem(IDC_CHECK_EXT_MEDIA_OUT_2);
+    int check = buttonExtTrans->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        TEST(_veExternalMediaPtr->RegisterExternalMediaProcessing(channel, kPlaybackPerChannel, *_externalMediaPtr) == 0,
+            _T("RegisterExternalMediaProcessing(channel=%d, kPlaybackPerChannel, processObject=0x%x)"), channel, _externalMediaPtr);
+    }
+    else
+    {
+        TEST(_veExternalMediaPtr->DeRegisterExternalMediaProcessing(channel, kPlaybackPerChannel) == 0,
+            _T("DeRegisterExternalMediaProcessing(channel=%d, kPlaybackPerChannel)"), channel);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckVad1()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_VAD_1);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        CString str;
+        VadModes mode(kVadConventional);
+        if (_checkVAD1 % 4 == 0)
+        {
+            mode = kVadConventional;
+            str = _T("kVadConventional");
+        }
+        else if (_checkVAD1 % 4 == 1)
+        {
+            mode = kVadAggressiveLow;
+            str = _T("kVadAggressiveLow");
+        }
+        else if (_checkVAD1 % 4 == 2)
+        {
+            mode = kVadAggressiveMid;
+            str = _T("kVadAggressiveMid");
+        }
+        else if (_checkVAD1 % 4 == 3)
+        {
+            mode = kVadAggressiveHigh;
+            str = _T("kVadAggressiveHigh");
+        }
+        const bool disableDTX(false);
+        TEST((ret = _veCodecPtr->SetVADStatus(channel, true, mode, disableDTX) == 0),
+            _T("SetVADStatus(channel=%d, enable=%d, mode=%s, disableDTX=%d)"), channel, enable, str, disableDTX);
+        _checkVAD1++;
+    }
+    else
+    {
+        TEST((ret = _veCodecPtr->SetVADStatus(channel, false)) == 0, _T("SetVADStatus(channel=%d, enable=%d)"), channel, false);
+    }
+    if (ret == -1)
+    {
+        // restore inital state since API call failed
+        button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckVad2()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_2);
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_VAD_2);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        CString str;
+        VadModes mode(kVadConventional);
+        if (_checkVAD2 % 4 == 0)
+        {
+            mode = kVadConventional;
+            str = _T("kVadConventional");
+        }
+        else if (_checkVAD2 % 4 == 1)
+        {
+            mode = kVadAggressiveLow;
+            str = _T("kVadAggressiveLow");
+        }
+        else if (_checkVAD2 % 4 == 2)
+        {
+            mode = kVadAggressiveMid;
+            str = _T("kVadAggressiveMid");
+        }
+        else if (_checkVAD2 % 4 == 3)
+        {
+            mode = kVadAggressiveHigh;
+            str = _T("kVadAggressiveHigh");
+        }
+        const bool disableDTX(false);
+        TEST((ret = _veCodecPtr->SetVADStatus(channel, true, mode, disableDTX)) == 0,
+            _T("SetVADStatus(channel=%d, enable=%d, mode=%s, disableDTX=%d)"), channel, enable, str, disableDTX);
+        _checkVAD2++;
+    }
+    else
+    {
+        TEST((ret = _veCodecPtr->SetVADStatus(channel, false) == 0), _T("SetVADStatus(channel=%d, enable=%d)"), channel, false);
+    }
+    if (ret == -1)
+    {
+        // restore inital state since API call failed
+        button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckMuteIn1()
+{
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    CButton* buttonMute = (CButton*)GetDlgItem(IDC_CHECK_MUTE_IN_1);
+    int check = buttonMute->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    TEST(_veVolumeControlPtr->SetInputMute(channel, enable) == 0,
+        _T("SetInputMute(channel=%d, enable=%d)"), channel, enable);
+}
+
+void CWinTestDlg::OnBnClickedCheckMuteIn2()
+{
+    int channel = GetDlgItemInt(IDC_EDIT_2);
+    CButton* buttonMute = (CButton*)GetDlgItem(IDC_CHECK_MUTE_IN_2);
+    int check = buttonMute->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    TEST(_veVolumeControlPtr->SetInputMute(channel, enable) == 0,
+        _T("SetInputMute(channel=%d, enable=%d)"), channel, enable);
+}
+
+void CWinTestDlg::OnBnClickedCheckSrtpTx1()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_SRTP_TX_1);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    bool useForRTCP = false;
+    if (enable)
+    {
+        (_checkSrtpTx1++ %2 == 0) ? useForRTCP = false : useForRTCP = true;
+        TEST((ret = _veEncryptionPtr->EnableSRTPSend(channel,
+            kCipherAes128CounterMode, 30, kAuthHmacSha1, 20, 4, kEncryptionAndAuthentication, key, useForRTCP)) == 0,
+            _T("EnableSRTPSend(channel=%d, kCipherAes128CounterMode, 30, kAuthHmacSha1, 20, 4, kEncryptionAndAuthentication, key, useForRTCP=%d)"),
+            channel, useForRTCP);
+    }
+    else
+    {
+        TEST((ret = _veEncryptionPtr->DisableSRTPSend(channel) == 0), _T("DisableSRTPSend(channel=%d)"), channel);
+    }
+    if (ret == -1)
+    {
+        // restore inital state since API call failed
+        button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckSrtpTx2()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_2);
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_SRTP_TX_2);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    bool useForRTCP = false;
+    if (enable)
+    {
+        (_checkSrtpTx2++ %2 == 0) ? useForRTCP = false : useForRTCP = true;
+        TEST((ret = _veEncryptionPtr->EnableSRTPSend(channel,
+            kCipherAes128CounterMode, 30, kAuthHmacSha1, 20, 4, kEncryptionAndAuthentication, key, useForRTCP)) == 0,
+            _T("EnableSRTPSend(channel=%d, kCipherAes128CounterMode, 30, kAuthHmacSha1, 20, 4, kEncryptionAndAuthentication, key, useForRTCP=%d)"),
+            channel, useForRTCP);
+    }
+    else
+    {
+        TEST((ret = _veEncryptionPtr->DisableSRTPSend(channel) == 0), _T("DisableSRTPSend(channel=%d)"), channel);
+    }
+    if (ret == -1)
+    {
+        // restore inital state since API call failed
+        button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckSrtpRx1()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_SRTP_RX_1);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    bool useForRTCP(false);
+    if (enable)
+    {
+        (_checkSrtpRx1++ %2 == 0) ? useForRTCP = false : useForRTCP = true;
+        TEST((ret = _veEncryptionPtr->EnableSRTPReceive(channel,
+            kCipherAes128CounterMode, 30, kAuthHmacSha1, 20, 4, kEncryptionAndAuthentication, key, useForRTCP)) == 0,
+            _T("EnableSRTPReceive(channel=%d, kCipherAes128CounterMode, 30, kAuthHmacSha1, 20, 4, kEncryptionAndAuthentication, key, useForRTCP=%d)"),
+            channel, useForRTCP);
+    }
+    else
+    {
+        TEST((ret = _veEncryptionPtr->DisableSRTPReceive(channel) == 0), _T("DisableSRTPReceive(channel=%d)"), channel);
+    }
+    if (ret == -1)
+    {
+        // restore inital state since API call failed
+        button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckSrtpRx2()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_2);
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_SRTP_RX_2);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    bool useForRTCP(false);
+    if (enable)
+    {
+        (_checkSrtpRx2++ %2 == 0) ? useForRTCP = false : useForRTCP = true;
+        TEST((ret = _veEncryptionPtr->EnableSRTPReceive(channel,
+            kCipherAes128CounterMode, 30, kAuthHmacSha1, 20, 4, kEncryptionAndAuthentication, key, useForRTCP)) == 0,
+            _T("EnableSRTPReceive(channel=%d, kCipherAes128CounterMode, 30, kAuthHmacSha1, 20, 4, kEncryptionAndAuthentication, key, useForRTCP=%d)"),
+            channel, useForRTCP);
+    }
+    else
+    {
+        TEST((ret = _veEncryptionPtr->DisableSRTPReceive(channel)) == 0, _T("DisableSRTPReceive(channel=%d)"), channel);
+    }
+    if (ret == -1)
+    {
+        // restore inital state since API call failed
+        button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckExtEncryption1()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_EXT_ENCRYPTION_1);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        TEST((ret = _veEncryptionPtr->RegisterExternalEncryption(channel, *_encryptionPtr)) == 0,
+            _T("RegisterExternalEncryption(channel=%d, encryption=0x%x)"), channel, _encryptionPtr);
+    }
+    else
+    {
+        TEST((ret = _veEncryptionPtr->DeRegisterExternalEncryption(channel)) == 0,
+            _T("DeRegisterExternalEncryption(channel=%d)"), channel);
+    }
+    if (ret == -1)
+    {
+        // restore inital state since API call failed
+        button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckExtEncryption2()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_2);
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_EXT_ENCRYPTION_2);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        TEST((ret = _veEncryptionPtr->RegisterExternalEncryption(channel, *_encryptionPtr)) == 0,
+            _T("RegisterExternalEncryption(channel=%d, encryption=0x%x)"), channel, _encryptionPtr);
+    }
+    else
+    {
+        TEST((ret = _veEncryptionPtr->DeRegisterExternalEncryption(channel)) == 0,
+            _T("DeRegisterExternalEncryption(channel=%d)"), channel);
+    }
+    if (ret == -1)
+    {
+        // restore inital state since API call failed
+        button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+    }
+}
+
+void CWinTestDlg::OnBnClickedButtonDtmf1()
+{
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    CTelephonyEvent dlgTelephoneEvent(_vePtr, channel, this);
+    dlgTelephoneEvent.DoModal();
+}
+
+void CWinTestDlg::OnBnClickedButtonDtmf2()
+{
+    int channel = GetDlgItemInt(IDC_EDIT_2);
+    CTelephonyEvent dlgTelephoneEvent(_vePtr, channel, this);
+    dlgTelephoneEvent.DoModal();
+}
+
+void CWinTestDlg::OnBnClickedCheckConference1()
+{
+    // Not supported yet
+}
+
+void CWinTestDlg::OnBnClickedCheckConference2()
+{
+   // Not supported yet
+}
+
+void CWinTestDlg::OnBnClickedCheckOnHold1()
+{
+    SHORT shiftKeyIsPressed = ::GetAsyncKeyState(VK_SHIFT);
+
+    CString str;
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_ON_HOLD_1);
+    int check = button->GetCheck();
+
+    if (shiftKeyIsPressed)
+    {
+        bool enabled(false);
+        OnHoldModes mode(kHoldSendAndPlay);
+        TEST(_veBasePtr->GetOnHoldStatus(channel, enabled, mode) == 0,
+            _T("GetOnHoldStatus(channel=%d, enabled=?, mode=?)"), channel);
+        button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+
+        switch (mode)
+        {
+        case kHoldSendAndPlay:
+            str = _T("kHoldSendAndPlay");
+            break;
+        case kHoldSendOnly:
+            str = _T("kHoldSendOnly");
+            break;
+        case kHoldPlayOnly:
+            str = _T("kHoldPlayOnly");
+            break;
+        default:
+            break;
+        }
+        PRINT_GET_RESULT(_T("enabled=%d, mode=%s"), enabled, str);
+        return;
+    }
+
+    int ret(0);
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        OnHoldModes mode(kHoldSendAndPlay);
+        if (_checkOnHold1 % 3 == 0)
+        {
+            mode = kHoldSendAndPlay;
+            str = _T("kHoldSendAndPlay");
+        }
+        else if (_checkOnHold1 % 3 == 1)
+        {
+            mode = kHoldSendOnly;
+            str = _T("kHoldSendOnly");
+        }
+        else if (_checkOnHold1 % 3 == 2)
+        {
+            mode = kHoldPlayOnly;
+            str = _T("kHoldPlayOnly");
+        }
+        TEST((ret = _veBasePtr->SetOnHoldStatus(channel, enable, mode)) == 0,
+            _T("SetOnHoldStatus(channel=%d, enable=%d, mode=%s)"), channel, enable, str);
+        _checkOnHold1++;
+    }
+    else
+    {
+        TEST((ret = _veBasePtr->SetOnHoldStatus(channel, enable)) == 0,
+            _T("SetOnHoldStatus(channel=%d, enable=%d)"), channel, enable);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckOnHold2()
+{
+    int ret(0);
+    int channel = GetDlgItemInt(IDC_EDIT_2);
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_ON_HOLD_2);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        CString str;
+        OnHoldModes mode(kHoldSendAndPlay);
+        if (_checkOnHold1 % 3 == 0)
+        {
+            mode = kHoldSendAndPlay;
+            str = _T("kHoldSendAndPlay");
+        }
+        else if (_checkOnHold1 % 3 == 1)
+        {
+            mode = kHoldSendOnly;
+            str = _T("kHoldSendOnly");
+        }
+        else if (_checkOnHold1 % 3 == 2)
+        {
+            mode = kHoldPlayOnly;
+            str = _T("kHoldPlayOnly");
+        }
+        TEST((ret = _veBasePtr->SetOnHoldStatus(channel, enable, mode)) == 0,
+            _T("SetOnHoldStatus(channel=%d, enable=%d, mode=%s)"), channel, enable, str);
+        _checkOnHold1++;
+    }
+    else
+    {
+        TEST((ret = _veBasePtr->SetOnHoldStatus(channel, enable)) == 0,
+            _T("SetOnHoldStatus(channel=%d, enable=%d)"), channel, enable);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckDelayEstimate1()
+{
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_DELAY_ESTIMATE_1);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+
+    if (enable)
+    {
+        _delayEstimate1 = true;
+        SetDlgItemInt(IDC_EDIT_DELAY_ESTIMATE_1, 0);
+    }
+    else
+    {
+        _delayEstimate1 = false;
+        SetDlgItemText(IDC_EDIT_DELAY_ESTIMATE_1, _T(""));
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckRxvad()
+{
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_RXVAD);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+
+    if (enable)
+    {
+        _rxVad = true;
+        _veApmPtr->RegisterRxVadObserver(channel, *_rxVadObserverPtr);
+        SetDlgItemInt(IDC_EDIT_RXVAD, 0);
+    }
+    else
+    {
+        _rxVad = false;
+        _veApmPtr->DeRegisterRxVadObserver(channel);
+        SetDlgItemText(IDC_EDIT_RXVAD, _T(""));
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckAgc1()
+{
+    SHORT shiftKeyIsPressed = ::GetAsyncKeyState(VK_SHIFT);
+
+    CString str;
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_AGC_1);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+
+    if (shiftKeyIsPressed)
+    {
+        bool enabled(false);
+        AgcModes mode(kAgcAdaptiveDigital);
+        TEST(_veApmPtr->GetRxAgcStatus(channel, enabled, mode) == 0,
+            _T("GetRxAgcStatus(channel=%d, enabled=?, mode=?)"), channel);
+        button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+
+        switch (mode)
+        {
+        case kAgcAdaptiveAnalog:
+            str = _T("kAgcAdaptiveAnalog");
+            break;
+        case kAgcAdaptiveDigital:
+            str = _T("kAgcAdaptiveDigital");
+            break;
+        case kAgcFixedDigital:
+            str = _T("kAgcFixedDigital");
+            break;
+        default:
+            break;
+        }
+        PRINT_GET_RESULT(_T("enabled=%d, mode=%s"), enabled, str);
+        return;
+    }
+
+    if (enable)
+    {
+        CString str;
+        AgcModes mode(kAgcDefault);
+        if (_checkAGC1 % 3 == 0)
+        {
+            mode = kAgcDefault;
+            str = _T("kAgcDefault");
+        }
+        else if (_checkAGC1 % 3 == 1)
+        {
+            mode = kAgcAdaptiveDigital;
+            str = _T("kAgcAdaptiveDigital");
+        }
+        else if (_checkAGC1 % 3 == 2)
+        {
+            mode = kAgcFixedDigital;
+            str = _T("kAgcFixedDigital");
+        }
+        TEST(_veApmPtr->SetRxAgcStatus(channel, true, mode) == 0, _T("SetRxAgcStatus(channel=%d, enable=%d, %s)"), channel, enable, str);
+        _checkAGC1++;
+    }
+    else
+    {
+        TEST(_veApmPtr->SetRxAgcStatus(channel, false, kAgcUnchanged) == 0, _T("SetRxAgcStatus(channel=%d, enable=%d)"), channel, enable);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckNs1()
+{
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    CButton* buttonNS = (CButton*)GetDlgItem(IDC_CHECK_NS_1);
+    int check = buttonNS->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        CString str;
+        NsModes mode(kNsDefault);
+        if (_checkNS1 % 6 == 0)
+        {
+            mode = kNsDefault;
+            str = _T("kNsDefault");
+        }
+        else if (_checkNS1 % 6 == 1)
+        {
+            mode = kNsConference;
+            str = _T("kNsConference");
+        }
+        else if (_checkNS1 % 6 == 2)
+        {
+            mode = kNsLowSuppression;
+            str = _T("kNsLowSuppression");
+        }
+        else if (_checkNS1 % 6 == 3)
+        {
+            mode = kNsModerateSuppression;
+            str = _T("kNsModerateSuppression");
+        }
+        else if (_checkNS1 % 6 == 4)
+        {
+            mode = kNsHighSuppression;
+            str = _T("kNsHighSuppression");
+        }
+        else if (_checkNS1 % 6 == 5)
+        {
+            mode = kNsVeryHighSuppression;
+            str = _T("kNsVeryHighSuppression");
+        }
+        TEST(_veApmPtr->SetRxNsStatus(channel, true, mode) == 0, _T("SetRxNsStatus(channel=%d, enable=%d, %s)"), channel, enable, str);
+        _checkNS1++;
+    }
+    else
+    {
+        TEST(_veApmPtr->SetRxNsStatus(channel, false, kNsUnchanged) == 0, _T("SetRxNsStatus(channel=%d, enable=%d)"), enable, channel);
+    }
+}
+
+// ----------------------------------------------------------------------------
+//                         Channel-independent Operations
+// ----------------------------------------------------------------------------
+
+void CWinTestDlg::OnBnClickedCheckPlayFileIn()
+{
+    const char micFile[] = "/tmp/audio_short16.pcm";
+    // const char micFile[] = "/tmp/audio_long16noise.pcm";
+
+    int ret(0);
+    int channel(-1);
+    CButton* buttonExtTrans = (CButton*)GetDlgItem(IDC_CHECK_PLAY_FILE_IN);
+    int check = buttonExtTrans->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        bool mix;
+        const bool loop(true);
+        const FileFormats format = kFileFormatPcm16kHzFile;
+        const float scale(1.0);
+
+        (_checkPlayFileIn %2 == 0) ? mix = true : mix = false;
+        TEST(_veFilePtr->StartPlayingFileAsMicrophone(channel, micFile, loop, mix, format, scale) == 0,
+            _T("StartPlayingFileAsMicrophone(channel=%d, file=%s, loop=%d, mix=%d, format=%d, scale=%2.1f)"),
+            channel, CharToTchar(micFile, -1), loop, mix, format, scale);
+        _checkPlayFileIn++;
+    }
+    else
+    {
+        TEST(_veFilePtr->StopPlayingFileAsMicrophone(channel) == 0,
+            _T("StopPlayingFileAsMicrophone(channel=%d)"), channel);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckRecMic()
+{
+    const char micFile[] = "/tmp/rec_mic_mono_16kHz.pcm";
+
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_REC_MIC);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        TEST(_veFilePtr->StartRecordingMicrophone(micFile, NULL) == 0, _T("StartRecordingMicrophone(file=%s)"), CharToTchar(micFile, -1));
+    }
+    else
+    {
+        TEST(_veFilePtr->StopRecordingMicrophone() == 0, _T("StopRecordingMicrophone()"));
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckAgc()
+{
+    CButton* buttonAGC = (CButton*)GetDlgItem(IDC_CHECK_AGC);
+    int check = buttonAGC->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        CString str;
+        AgcModes mode(kAgcDefault);
+        if (_checkAGC % 4 == 0)
+        {
+            mode = kAgcDefault;
+            str = _T("kAgcDefault");
+        }
+        else if (_checkAGC % 4 == 1)
+        {
+            mode = kAgcAdaptiveAnalog;
+            str = _T("kAgcAdaptiveAnalog");
+        }
+        else if (_checkAGC % 4 == 2)
+        {
+            mode = kAgcAdaptiveDigital;
+            str = _T("kAgcAdaptiveDigital");
+        }
+        else if (_checkAGC % 4 == 3)
+        {
+            mode = kAgcFixedDigital;
+            str = _T("kAgcFixedDigital");
+        }
+        TEST(_veApmPtr->SetAgcStatus(true, mode) == 0, _T("SetAgcStatus(enable=%d, %s)"), enable, str);
+        _checkAGC++;
+    }
+    else
+    {
+        TEST(_veApmPtr->SetAgcStatus(false, kAgcUnchanged) == 0, _T("SetAgcStatus(enable=%d)"), enable);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckNs()
+{
+    CButton* buttonNS = (CButton*)GetDlgItem(IDC_CHECK_NS);
+    int check = buttonNS->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        CString str;
+        NsModes mode(kNsDefault);
+        if (_checkNS % 6 == 0)
+        {
+            mode = kNsDefault;
+            str = _T("kNsDefault");
+        }
+        else if (_checkNS % 6 == 1)
+        {
+            mode = kNsConference;
+            str = _T("kNsConference");
+        }
+        else if (_checkNS % 6 == 2)
+        {
+            mode = kNsLowSuppression;
+            str = _T("kNsLowSuppression");
+        }
+        else if (_checkNS % 6 == 3)
+        {
+            mode = kNsModerateSuppression;
+            str = _T("kNsModerateSuppression");
+        }
+        else if (_checkNS % 6 == 4)
+        {
+            mode = kNsHighSuppression;
+            str = _T("kNsHighSuppression");
+        }
+        else if (_checkNS % 6 == 5)
+        {
+            mode = kNsVeryHighSuppression;
+            str = _T("kNsVeryHighSuppression");
+        }
+        TEST(_veApmPtr->SetNsStatus(true, mode) == 0, _T("SetNsStatus(enable=%d, %s)"), enable, str);
+        _checkNS++;
+    }
+    else
+    {
+        TEST(_veApmPtr->SetNsStatus(false, kNsUnchanged) == 0, _T("SetNsStatus(enable=%d)"), enable);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckEc()
+{
+    CButton* buttonEC = (CButton*)GetDlgItem(IDC_CHECK_EC);
+    int check = buttonEC->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        CString str;
+        EcModes mode(kEcDefault);
+        if (_checkEC % 4 == 0)
+        {
+            mode = kEcDefault;
+            str = _T("kEcDefault");
+        }
+        else if (_checkEC % 4 == 1)
+        {
+            mode = kEcConference;
+            str = _T("kEcConference");
+        }
+        else if (_checkEC % 4 == 2)
+        {
+            mode = kEcAec;
+            str = _T("kEcAec");
+        }
+        else if (_checkEC % 4 == 3)
+        {
+            mode = kEcAecm;
+            str = _T("kEcAecm");
+        }
+        TEST(_veApmPtr->SetEcStatus(true, mode) == 0, _T("SetEcStatus(enable=%d, %s)"), enable, str);
+        _checkEC++;
+    }
+    else
+    {
+        TEST(_veApmPtr->SetEcStatus(false, kEcUnchanged) == 0, _T("SetEcStatus(enable=%d)"), enable);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckMuteIn()
+{
+    CButton* buttonMute = (CButton*)GetDlgItem(IDC_CHECK_MUTE_IN);
+    int check = buttonMute->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    const int channel(-1);
+    TEST(_veVolumeControlPtr->SetInputMute(channel, enable) == 0,
+        _T("SetInputMute(channel=%d, enable=%d)"), channel, enable);
+}
+
+void CWinTestDlg::OnBnClickedCheckExtMediaIn()
+{
+    const int channel(-1);
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_EXT_MEDIA_IN);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        TEST(_veExternalMediaPtr->RegisterExternalMediaProcessing(channel, kRecordingAllChannelsMixed, *_externalMediaPtr) == 0,
+            _T("RegisterExternalMediaProcessing(channel=%d, kRecordingAllChannelsMixed, processObject=0x%x)"), channel, _externalMediaPtr);
+    }
+    else
+    {
+        TEST(_veExternalMediaPtr->DeRegisterExternalMediaProcessing(channel, kRecordingAllChannelsMixed) == 0,
+            _T("DeRegisterExternalMediaProcessing(channel=%d, kRecordingAllChannelsMixed)"), channel);
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckExtMediaOut()
+{
+    const int channel(-1);
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_EXT_MEDIA_OUT);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    if (enable)
+    {
+        TEST(_veExternalMediaPtr->RegisterExternalMediaProcessing(channel, kPlaybackAllChannelsMixed, *_externalMediaPtr) == 0,
+            _T("RegisterExternalMediaProcessing(channel=%d, kPlaybackAllChannelsMixed, processObject=0x%x)"), channel, _externalMediaPtr);
+    }
+    else
+    {
+        TEST(_veExternalMediaPtr->DeRegisterExternalMediaProcessing(channel, kPlaybackAllChannelsMixed) == 0,
+            _T("DeRegisterExternalMediaProcessing(channel=%d, kPlaybackAllChannelsMixed)"), channel);
+    }
+}
+
+void CWinTestDlg::OnCbnSelchangeComboRecDevice()
+{
+    CComboBox* comboCodec(NULL);
+    comboCodec = (CComboBox*)GetDlgItem(IDC_COMBO_REC_DEVICE);
+    int index = comboCodec->GetCurSel();
+    TEST(_veHardwarePtr->SetRecordingDevice(index) == 0,
+        _T("SetRecordingDevice(index=%d)"), index);
+}
+
+void CWinTestDlg::OnCbnSelchangeComboPlayDevice()
+{
+    CComboBox* comboCodec(NULL);
+    comboCodec = (CComboBox*)GetDlgItem(IDC_COMBO_PLAY_DEVICE);
+    int index = comboCodec->GetCurSel();
+    TEST(_veHardwarePtr->SetPlayoutDevice(index) == 0,
+        _T("SetPlayoutDevice(index=%d)"), index);
+}
+
+void CWinTestDlg::OnNMReleasedcaptureSliderInputVolume(NMHDR *pNMHDR, LRESULT *pResult)
+{
+    CSliderCtrl* slider = (CSliderCtrl*)GetDlgItem(IDC_SLIDER_INPUT_VOLUME);
+    slider->SetRangeMin(0);
+    slider->SetRangeMax(255);
+    int pos = slider->GetPos();
+
+    TEST(_veVolumeControlPtr->SetMicVolume(pos) == 0, _T("SetMicVolume(volume=%d)"), pos);
+
+    *pResult = 0;
+}
+
+void CWinTestDlg::OnNMReleasedcaptureSliderOutputVolume(NMHDR *pNMHDR, LRESULT *pResult)
+{
+    CSliderCtrl* slider = (CSliderCtrl*)GetDlgItem(IDC_SLIDER_OUTPUT_VOLUME);
+    slider->SetRangeMin(0);
+    slider->SetRangeMax(255);
+    int pos = slider->GetPos();
+
+    TEST(_veVolumeControlPtr->SetSpeakerVolume(pos) == 0, _T("SetSpeakerVolume(volume=%d)"), pos);
+
+    *pResult = 0;
+}
+
+void CWinTestDlg::OnNMReleasedcaptureSliderPanLeft(NMHDR *pNMHDR, LRESULT *pResult)
+{
+    CSliderCtrl* slider = (CSliderCtrl*)GetDlgItem(IDC_SLIDER_PAN_LEFT);
+    slider->SetRange(0,10);
+    int pos = 10 - slider->GetPos();    // 0 <=> lower end, 10 <=> upper end
+
+    float left(0.0);
+    float right(0.0);
+    const int channel(-1);
+
+    // Only left channel will be modified
+    _veVolumeControlPtr->GetOutputVolumePan(channel, left, right);
+
+    left = (float)((float)pos/10.0f);
+
+    TEST(_veVolumeControlPtr->SetOutputVolumePan(channel, left, right) == 0,
+        _T("SetOutputVolumePan(channel=%d, left=%2.1f, right=%2.1f)"), channel, left, right);
+
+    *pResult = 0;
+}
+
+void CWinTestDlg::OnNMReleasedcaptureSliderPanRight(NMHDR *pNMHDR, LRESULT *pResult)
+{
+    CSliderCtrl* slider = (CSliderCtrl*)GetDlgItem(IDC_SLIDER_PAN_RIGHT);
+    slider->SetRange(0,10);
+    int pos = 10 - slider->GetPos();    // 0 <=> lower end, 10 <=> upper end
+
+    float left(0.0);
+    float right(0.0);
+    const int channel(-1);
+
+    // Only right channel will be modified
+    _veVolumeControlPtr->GetOutputVolumePan(channel, left, right);
+
+    right = (float)((float)pos/10.0f);
+
+    TEST(_veVolumeControlPtr->SetOutputVolumePan(channel, left, right) == 0,
+        _T("SetOutputVolumePan(channel=%d, left=%2.1f, right=%2.1f)"), channel, left, right);
+
+    *pResult = 0;
+}
+
+void CWinTestDlg::OnBnClickedButtonVersion()
+{
+    if (_veBasePtr)
+    {
+        char version[1024];
+        if (_veBasePtr->GetVersion(version) == 0)
+        {
+            AfxMessageBox(CString(version), MB_OK);
+        }
+        else
+        {
+            AfxMessageBox(_T("FAILED!"), MB_OK);
+        }
+    }
+}
+
+void CWinTestDlg::OnBnClickedCheckRecCall()
+{
+    // Not supported
+}
+
+void CWinTestDlg::OnBnClickedCheckTypingDetection()
+{
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_TYPING_DETECTION);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    TEST(_veApmPtr->SetTypingDetectionStatus(enable) == 0, _T("SetTypingDetectionStatus(enable=%d)"), enable);
+}
+
+void CWinTestDlg::OnBnClickedCheckFEC()
+{
+    CButton* button = (CButton*)GetDlgItem(IDC_CHECK_FEC);
+    int channel = GetDlgItemInt(IDC_EDIT_1);
+    int check = button->GetCheck();
+    const bool enable = (check == BST_CHECKED);
+    TEST(_veRtpRtcpPtr->SetFECStatus(channel, enable) == 0, _T("SetFECStatus(enable=%d)"), enable);
+}
+
+// ----------------------------------------------------------------------------
+//                                   Message Handlers
+// ----------------------------------------------------------------------------
+
+void CWinTestDlg::OnTimer(UINT_PTR nIDEvent)
+{
+    CString str;
+
+    unsigned int svol(0);
+    unsigned int mvol(0);
+
+    _timerTicks++;
+
+    // Get speaker and microphone volumes
+    _veVolumeControlPtr->GetSpeakerVolume(svol);
+    _veVolumeControlPtr->GetMicVolume(mvol);
+
+    // Update speaker volume slider
+    CSliderCtrl* sliderSpkr = (CSliderCtrl*)GetDlgItem(IDC_SLIDER_OUTPUT_VOLUME);
+    sliderSpkr->SetRangeMin(0);
+    sliderSpkr->SetRangeMax(255);
+    sliderSpkr->SetPos(svol);
+
+    // Update microphone volume slider
+    CSliderCtrl* sliderMic = (CSliderCtrl*)GetDlgItem(IDC_SLIDER_INPUT_VOLUME);
+    sliderMic->SetRangeMin(0);
+    sliderMic->SetRangeMax(255);
+    sliderMic->SetPos(mvol);
+
+    unsigned int micLevel;
+    unsigned int combinedOutputLevel;
+
+    // Get audio levels
+    _veVolumeControlPtr->GetSpeechInputLevel(micLevel);
+    _veVolumeControlPtr->GetSpeechOutputLevel(-1, combinedOutputLevel);
+
+    // Update audio level controls
+    CProgressCtrl* progressMic = (CProgressCtrl*)GetDlgItem(IDC_PROGRESS_AUDIO_LEVEL_IN);
+    progressMic->SetRange(0,9);
+    progressMic->SetStep(1);
+    progressMic->SetPos(micLevel);
+    CProgressCtrl* progressOut = (CProgressCtrl*)GetDlgItem(IDC_PROGRESS_AUDIO_LEVEL_OUT);
+    progressOut->SetRange(0,9);
+    progressOut->SetStep(1);
+    progressOut->SetPos(combinedOutputLevel);
+
+    // Update playout delay (buffer size)
+    if (_veVideoSyncPtr)
+    {
+        int bufferMs(0);
+        _veVideoSyncPtr->GetPlayoutBufferSize(bufferMs);
+        SetDlgItemInt(IDC_EDIT_PLAYOUT_BUFFER_SIZE, bufferMs);
+    }
+
+    if (_delayEstimate1 && _veVideoSyncPtr)
+    {
+        const int channel = GetDlgItemInt(IDC_EDIT_1);
+        int delayMs(0);
+        _veVideoSyncPtr->GetDelayEstimate(channel, delayMs);
+        SetDlgItemInt(IDC_EDIT_DELAY_ESTIMATE_1, delayMs);
+    }
+
+    if (_rxVad && _veApmPtr && _rxVadObserverPtr)
+    {
+        SetDlgItemInt(IDC_EDIT_RXVAD, _rxVadObserverPtr->_vadDecision);
+    }
+
+    if (_veHardwarePtr)
+    {
+        int load1, load2;
+        _veHardwarePtr->GetSystemCPULoad(load1);
+        _veHardwarePtr->GetCPULoad(load2);
+        str.Format(_T("CPU load (system/VoE): %d/%d [%%]"), load1, load2);
+        SetDlgItemText(IDC_EDIT_CPU_LOAD, (LPCTSTR)str);
+    }
+
+    BOOL ret;
+    int channel = GetDlgItemInt(IDC_EDIT_1, &ret);
+
+    if (_veCodecPtr)
+    {
+        if (ret == TRUE)
+        {
+            CodecInst codec;
+            if (_veCodecPtr->GetRecCodec(channel, codec) == 0)
+            {
+        str.Format(_T("RX codec: %s, freq=%d, pt=%d, rate=%d, size=%d"), CharToTchar(codec.plname, 32), codec.plfreq, codec.pltype, codec.rate, codec.pacsize);
+                SetDlgItemText(IDC_EDIT_RX_CODEC_1, (LPCTSTR)str);
+            }
+        }
+    }
+
+    if (_veRtpRtcpPtr)
+    {
+        if (ret == TRUE)
+        {
+            CallStatistics stats;
+            if (_veRtpRtcpPtr->GetRTCPStatistics(channel, stats) == 0)
+            {
+                str.Format(_T("RTCP | RTP: cum=%u, ext=%d, frac=%u, jitter=%u | TX=%d, RX=%d, RTT=%d"),
+                    stats.cumulativeLost, stats.extendedMax, stats.fractionLost, stats.jitterSamples, stats.packetsSent, stats.packetsReceived, stats.rttMs);
+                SetDlgItemText(IDC_EDIT_RTCP_STAT_1, (LPCTSTR)str);
+            }
+        }
+    }
+
+    SetTimer(0, 1000, NULL);
+    CDialog::OnTimer(nIDEvent);
+}
+
+void CWinTestDlg::OnBnClickedButtonClearErrorCallback()
+{
+    _nErrorCallbacks = 0;
+    SetDlgItemText(IDC_EDIT_ERROR_CALLBACK, _T(""));
+}
+
+// ----------------------------------------------------------------------------
+//                                       TEST
+// ----------------------------------------------------------------------------
+
+void CWinTestDlg::OnBnClickedButtonTest1()
+{
+    // add tests here...
+}
+
diff --git a/voice_engine/main/test/win_test/WinTestDlg.h b/voice_engine/main/test/win_test/WinTestDlg.h
new file mode 100644
index 0000000..6452775
--- /dev/null
+++ b/voice_engine/main/test/win_test/WinTestDlg.h
@@ -0,0 +1,278 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#pragma once
+
+#if (_MSC_VER >= 1400)
+#define PRINT_GET_RESULT(...) \
+    { \
+        _strMsg.Format(__VA_ARGS__); \
+        SetDlgItemText(IDC_EDIT_GET_OUTPUT, _strMsg); \
+    } \
+
+#define TEST(x, ...) \
+    if (!(x)) \
+    { \
+        _strMsg.Format(__VA_ARGS__); \
+        SetDlgItemText(IDC_EDIT_MESSAGE, _strMsg); \
+        _strErr.Format(_T("FAILED (error=%d)"), _veBasePtr->LastError()); \
+        SetDlgItemText(IDC_EDIT_RESULT, _strErr); \
+        _failCount++; \
+        SetDlgItemInt(IDC_EDIT_N_FAILS, _failCount); \
+        SetDlgItemInt(IDC_EDIT_LAST_ERROR, _veBasePtr->LastError()); \
+    } \
+    else \
+    { \
+        _strMsg.Format(__VA_ARGS__); \
+        SetDlgItemText(IDC_EDIT_MESSAGE, _strMsg); \
+        SetDlgItemText(IDC_EDIT_RESULT, _T("OK")); \
+    } \
+
+#define TEST2(x, ...) \
+    if (!(x)) \
+    { \
+        _strMsg.Format(__VA_ARGS__); \
+        ((CWinTestDlg*)_parentDialogPtr)->UpdateTest(true, _strMsg); \
+    } \
+    else \
+    { \
+        _strMsg.Format(__VA_ARGS__); \
+        ((CWinTestDlg*)_parentDialogPtr)->UpdateTest(false, _strMsg); \
+    }
+#else
+#define TEST(x, exp) \
+    if (!(x)) \
+    { \
+        _strMsg.Format(exp); \
+        SetDlgItemText(IDC_EDIT_MESSAGE, _strMsg); \
+        _strErr.Format("FAILED (error=%d)", _veBasePtr->LastError()); \
+        SetDlgItemText(IDC_EDIT_RESULT, _strErr); \
+        _failCount++; \
+        SetDlgItemInt(IDC_EDIT_N_FAILS, _failCount); \
+        SetDlgItemInt(IDC_EDIT_LAST_ERROR, _veBasePtr->LastError()); \
+    } \
+    else \
+    { \
+        _strMsg.Format(exp); \
+        SetDlgItemText(IDC_EDIT_MESSAGE, _strMsg); \
+        SetDlgItemText(IDC_EDIT_RESULT, _T("OK")); \
+    } \
+
+#define TEST2(x, exp) \
+    if (!(x)) \
+    { \
+        _strMsg.Format(exp); \
+        ((CWinTestDlg*)_parentDialogPtr)->UpdateTest(true, _strMsg); \
+    } \
+    else \
+    { \
+        _strMsg.Format(exp); \
+        ((CWinTestDlg*)_parentDialogPtr)->UpdateTest(false, _strMsg); \
+    }
+#endif
+
+#include "voe_base.h"
+#include "voe_rtp_rtcp.h"
+#include "voe_codec.h"
+#include "voe_dtmf.h"
+#include "voe_encryption.h"
+#include "voe_external_media.h"
+#include "voe_file.h"
+#include "voe_hardware.h"
+#include "voe_network.h"
+#include "voe_video_sync.h"
+#include "voe_volume_control.h"
+
+#include "voe_audio_processing.h"
+#include "voe_rtp_rtcp.h"
+#include "voe_errors.h"
+
+class MediaProcessImpl;
+class ConnectionObserver;
+class MyEncryption;
+class RxCallback;
+class MyTransport;
+
+using namespace webrtc;
+
+#define MAX_NUM_OF_CHANNELS    10
+
+// CWinTestDlg dialog
+class CWinTestDlg : public CDialog,
+                    public VoiceEngineObserver,
+                    public VoERTPObserver
+{
+// Construction
+public:
+    CWinTestDlg(CWnd* pParent = NULL);    // standard constructor
+    virtual ~CWinTestDlg();
+
+// Dialog Data
+    enum { IDD = IDD_WINTEST_DIALOG };
+
+    BOOL UpdateTest(bool failed, const CString& strMsg);
+
+protected:
+    virtual void DoDataExchange(CDataExchange* pDX);    // DDX/DDV support
+
+protected:  // VoiceEngineObserver
+    virtual void CallbackOnError(const int channel, const int errCode);
+
+protected:    // VoERTPObserver
+    virtual void OnIncomingCSRCChanged(
+        const int channel, const unsigned int CSRC, const bool added);
+    virtual void OnIncomingSSRCChanged(
+        const int channel, const unsigned int SSRC);
+
+// Implementation
+protected:
+    HICON m_hIcon;
+
+    // Generated message map functions
+    virtual BOOL OnInitDialog();
+    afx_msg void OnSysCommand(UINT nID, LPARAM lParam);
+    afx_msg void OnPaint();
+    afx_msg HCURSOR OnQueryDragIcon();
+    DECLARE_MESSAGE_MAP()
+public:
+    afx_msg void OnBnClickedButtonCreate1();
+    afx_msg void OnBnClickedButtonDelete1();
+
+private:
+    VoiceEngine*    _vePtr;
+
+    VoECodec*               _veCodecPtr;
+    VoEExternalMedia*       _veExternalMediaPtr;
+    VoEVolumeControl*       _veVolumeControlPtr;
+    VoEEncryption*          _veEncryptionPtr;
+    VoEHardware*            _veHardwarePtr;
+    VoEVideoSync*           _veVideoSyncPtr;
+    VoENetwork*             _veNetworkPtr;
+    VoEFile*                _veFilePtr;
+    VoEAudioProcessing*     _veApmPtr;
+    VoEBase*                _veBasePtr;
+    VoERTP_RTCP*            _veRtpRtcpPtr;
+
+    MyTransport*            _transportPtr;
+    MediaProcessImpl*       _externalMediaPtr;
+    ConnectionObserver*     _connectionObserverPtr;
+    MyEncryption*           _encryptionPtr;
+    RxCallback*             _rxVadObserverPtr;
+
+private:
+    int                     _failCount;
+    CString                 _strMsg;
+    CString                 _strErr;
+    bool                    _externalTransport;
+    bool                    _externalTransportBuild;
+    int                     _checkPlayFileIn;
+    int                     _checkPlayFileIn1;
+    int                     _checkPlayFileIn2;
+    int                     _checkPlayFileOut1;
+    int                     _checkPlayFileOut2;
+    int                     _checkAGC;
+    int                     _checkAGC1;
+    int                     _checkNS;
+    int                     _checkNS1;
+    int                     _checkEC;
+    int                     _checkVAD1;
+    int                     _checkVAD2;
+    int                     _checkSrtpTx1;
+    int                     _checkSrtpTx2;
+    int                     _checkSrtpRx1;
+    int                     _checkSrtpRx2;
+    int                     _checkConference1;
+    int                     _checkConference2;
+    int                     _checkOnHold1;
+    int                     _checkOnHold2;
+    bool                    _delayEstimate1;
+    bool                    _delayEstimate2;
+    bool                    _rxVad;
+    int                     _nErrorCallbacks;
+    int                     _timerTicks;
+
+public:
+    afx_msg void OnBnClickedButtonCreate2();
+    afx_msg void OnBnClickedButtonDelete2();
+    afx_msg void OnCbnSelchangeComboCodec1();
+    afx_msg void OnBnClickedButtonStartListen1();
+    afx_msg void OnBnClickedButtonStopListen1();
+    afx_msg void OnBnClickedButtonStartPlayout1();
+    afx_msg void OnBnClickedButtonStopPlayout1();
+    afx_msg void OnBnClickedButtonStartSend1();
+    afx_msg void OnBnClickedButtonStopSend1();
+    afx_msg void OnCbnSelchangeComboIp2();
+    afx_msg void OnCbnSelchangeComboIp1();
+    afx_msg void OnCbnSelchangeComboCodec2();
+    afx_msg void OnBnClickedButtonStartListen2();
+    afx_msg void OnBnClickedButtonStopListen2();
+    afx_msg void OnBnClickedButtonStartPlayout2();
+    afx_msg void OnBnClickedButtonStopPlayout2();
+    afx_msg void OnBnClickedButtonStartSend2();
+    afx_msg void OnBnClickedButtonStopSend2();
+    afx_msg void OnBnClickedButtonTest11();
+    afx_msg void OnBnClickedCheckExtTrans1();
+    afx_msg void OnBnClickedCheckPlayFileIn1();
+    afx_msg void OnBnClickedCheckPlayFileOut1();
+    afx_msg void OnBnClickedCheckExtTrans2();
+    afx_msg void OnBnClickedCheckPlayFileIn2();
+    afx_msg void OnBnClickedCheckPlayFileOut2();
+    afx_msg void OnBnClickedCheckPlayFileIn();
+    afx_msg void OnBnClickedCheckPlayFileOut();
+    afx_msg void OnCbnSelchangeComboRecDevice();
+    afx_msg void OnCbnSelchangeComboPlayDevice();
+    afx_msg void OnBnClickedCheckExtMediaIn1();
+    afx_msg void OnBnClickedCheckExtMediaOut1();
+    afx_msg void OnNMReleasedcaptureSliderInputVolume(NMHDR *pNMHDR, LRESULT *pResult);
+    afx_msg void OnNMReleasedcaptureSliderOutputVolume(NMHDR *pNMHDR, LRESULT *pResult);
+    afx_msg void OnTimer(UINT_PTR nIDEvent);
+    afx_msg void OnBnClickedCheckAgc();
+    CString _strComboIp1;
+    CString _strComboIp2;
+    afx_msg void OnBnClickedCheckNs();
+    afx_msg void OnBnClickedCheckEc();
+    afx_msg void OnBnClickedCheckVad1();
+    afx_msg void OnBnClickedCheckVad2();
+    afx_msg void OnBnClickedCheckExtMediaIn2();
+    afx_msg void OnBnClickedCheckExtMediaOut2();
+    afx_msg void OnBnClickedCheckMuteIn();
+    afx_msg void OnBnClickedCheckMuteIn1();
+    afx_msg void OnBnClickedCheckMuteIn2();
+    afx_msg void OnBnClickedCheckSrtpTx1();
+    afx_msg void OnBnClickedCheckSrtpRx1();
+    afx_msg void OnBnClickedCheckSrtpTx2();
+    afx_msg void OnBnClickedCheckSrtpRx2();
+    afx_msg void OnBnClickedCheckExtEncryption1();
+    afx_msg void OnBnClickedCheckExtEncryption2();
+    afx_msg void OnBnClickedButtonDtmf1();
+    afx_msg void OnBnClickedCheckRecMic();
+    afx_msg void OnBnClickedButtonDtmf2();
+    afx_msg void OnBnClickedButtonTest1();
+    afx_msg void OnBnClickedCheckConference1();
+    afx_msg void OnBnClickedCheckConference2();
+    afx_msg void OnBnClickedCheckOnHold1();
+    afx_msg void OnBnClickedCheckOnHold2();
+    afx_msg void OnBnClickedCheckExtMediaIn();
+    afx_msg void OnBnClickedCheckExtMediaOut();
+    afx_msg void OnLbnSelchangeListCodec1();
+    afx_msg void OnNMReleasedcaptureSliderPanLeft(NMHDR *pNMHDR, LRESULT *pResult);
+    afx_msg void OnNMReleasedcaptureSliderPanRight(NMHDR *pNMHDR, LRESULT *pResult);
+    afx_msg void OnBnClickedButtonVersion();
+    afx_msg void OnBnClickedCheckDelayEstimate1();
+    afx_msg void OnBnClickedCheckRxvad();
+    afx_msg void OnBnClickedCheckAgc1();
+    afx_msg void OnBnClickedCheckNs1();
+    afx_msg void OnBnClickedCheckRecCall();
+    afx_msg void OnBnClickedCheckTypingDetection();
+    afx_msg void OnBnClickedCheckFEC();
+    afx_msg void OnBnClickedButtonClearErrorCallback();
+    afx_msg void OnBnClickedCheckBwe1();
+};
+#pragma once
diff --git a/voice_engine/main/test/win_test/audio_long16.pcm b/voice_engine/main/test/win_test/audio_long16.pcm
new file mode 100644
index 0000000..853e0df
--- /dev/null
+++ b/voice_engine/main/test/win_test/audio_long16.pcm
Binary files differ
diff --git a/voice_engine/main/test/win_test/audio_long16noise.pcm b/voice_engine/main/test/win_test/audio_long16noise.pcm
new file mode 100644
index 0000000..a7be537
--- /dev/null
+++ b/voice_engine/main/test/win_test/audio_long16noise.pcm
Binary files differ
diff --git a/voice_engine/main/test/win_test/audio_short16.pcm b/voice_engine/main/test/win_test/audio_short16.pcm
new file mode 100644
index 0000000..15a0f18
--- /dev/null
+++ b/voice_engine/main/test/win_test/audio_short16.pcm
Binary files differ
diff --git a/voice_engine/main/test/win_test/audio_tiny11.wav b/voice_engine/main/test/win_test/audio_tiny11.wav
new file mode 100644
index 0000000..6db80d5
--- /dev/null
+++ b/voice_engine/main/test/win_test/audio_tiny11.wav
Binary files differ
diff --git a/voice_engine/main/test/win_test/audio_tiny16.wav b/voice_engine/main/test/win_test/audio_tiny16.wav
new file mode 100644
index 0000000..baab0ac
--- /dev/null
+++ b/voice_engine/main/test/win_test/audio_tiny16.wav
Binary files differ
diff --git a/voice_engine/main/test/win_test/audio_tiny22.wav b/voice_engine/main/test/win_test/audio_tiny22.wav
new file mode 100644
index 0000000..b421867
--- /dev/null
+++ b/voice_engine/main/test/win_test/audio_tiny22.wav
Binary files differ
diff --git a/voice_engine/main/test/win_test/audio_tiny32.wav b/voice_engine/main/test/win_test/audio_tiny32.wav
new file mode 100644
index 0000000..773ac23
--- /dev/null
+++ b/voice_engine/main/test/win_test/audio_tiny32.wav
Binary files differ
diff --git a/voice_engine/main/test/win_test/audio_tiny44.wav b/voice_engine/main/test/win_test/audio_tiny44.wav
new file mode 100644
index 0000000..c9faa45
--- /dev/null
+++ b/voice_engine/main/test/win_test/audio_tiny44.wav
Binary files differ
diff --git a/voice_engine/main/test/win_test/audio_tiny48.wav b/voice_engine/main/test/win_test/audio_tiny48.wav
new file mode 100644
index 0000000..8ebf11a
--- /dev/null
+++ b/voice_engine/main/test/win_test/audio_tiny48.wav
Binary files differ
diff --git a/voice_engine/main/test/win_test/audio_tiny8.wav b/voice_engine/main/test/win_test/audio_tiny8.wav
new file mode 100644
index 0000000..d71c65e
--- /dev/null
+++ b/voice_engine/main/test/win_test/audio_tiny8.wav
Binary files differ
diff --git a/voice_engine/main/test/win_test/res/WinTest.ico b/voice_engine/main/test/win_test/res/WinTest.ico
new file mode 100644
index 0000000..8a84ca3
--- /dev/null
+++ b/voice_engine/main/test/win_test/res/WinTest.ico
Binary files differ
diff --git a/voice_engine/main/test/win_test/res/WinTest.rc2 b/voice_engine/main/test/win_test/res/WinTest.rc2
new file mode 100644
index 0000000..044bf7e
--- /dev/null
+++ b/voice_engine/main/test/win_test/res/WinTest.rc2
@@ -0,0 +1,13 @@
+//

+// WinTest.RC2 - resources Microsoft Visual C++ does not edit directly

+//

+

+#ifdef APSTUDIO_INVOKED

+#error this file is not editable by Microsoft Visual C++

+#endif //APSTUDIO_INVOKED

+

+

+/////////////////////////////////////////////////////////////////////////////

+// Add manually edited resources here...

+

+/////////////////////////////////////////////////////////////////////////////

diff --git a/voice_engine/main/test/win_test/stdafx.cpp b/voice_engine/main/test/win_test/stdafx.cpp
new file mode 100644
index 0000000..6cdb906
--- /dev/null
+++ b/voice_engine/main/test/win_test/stdafx.cpp
@@ -0,0 +1,17 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// stdafx.cpp : source file that includes just the standard includes
+// WinTest.pch will be the pre-compiled header
+// stdafx.obj will contain the pre-compiled type information
+
+#include "stdafx.h"
+
+
diff --git a/voice_engine/main/test/win_test/stdafx.h b/voice_engine/main/test/win_test/stdafx.h
new file mode 100644
index 0000000..b4d875c
--- /dev/null
+++ b/voice_engine/main/test/win_test/stdafx.h
@@ -0,0 +1,82 @@
+/*
+ *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// stdafx.h : include file for standard system include files,
+// or project specific include files that are used frequently,
+// but are changed infrequently
+
+#pragma once
+
+#ifndef _SECURE_ATL
+#define _SECURE_ATL 1
+#endif
+
+#ifndef VC_EXTRALEAN
+#define VC_EXTRALEAN		// Exclude rarely-used stuff from Windows headers
+#endif
+
+// Modify the following defines if you have to target a platform prior to the ones specified below.
+// Refer to MSDN for the latest info on corresponding values for different platforms.
+#ifndef WINVER				// Allow use of features specific to Windows XP or later.
+#define WINVER 0x0501		// Change this to the appropriate value to target other versions of Windows.
+#endif
+
+#ifndef _WIN32_WINNT		// Allow use of features specific to Windows XP or later.                   
+#define _WIN32_WINNT 0x0501	// Change this to the appropriate value to target other versions of Windows.
+#endif						
+
+#ifndef _WIN32_WINDOWS		// Allow use of features specific to Windows 98 or later.
+#define _WIN32_WINDOWS 0x0410 // Change this to the appropriate value to target Windows Me or later.
+#endif
+
+#ifndef _WIN32_IE			// Allow use of features specific to IE 6.0 or later.
+#define _WIN32_IE 0x0600	// Change this to the appropriate value to target other versions of IE.
+#endif
+
+#define _ATL_CSTRING_EXPLICIT_CONSTRUCTORS	// some CString constructors will be explicit
+
+// turns off MFC's hiding of some common and often safely ignored warning messages
+#define _AFX_ALL_WARNINGS
+
+#include <afxwin.h>         // MFC core and standard components
+#include <afxext.h>         // MFC extensions
+
+
+
+
+
+#ifndef _AFX_NO_OLE_SUPPORT
+#include <afxdtctl.h>		// MFC support for Internet Explorer 4 Common Controls
+#endif
+#ifndef _AFX_NO_AFXCMN_SUPPORT
+#include <afxcmn.h>			// MFC support for Windows Common Controls
+#endif // _AFX_NO_AFXCMN_SUPPORT
+
+
+
+
+
+
+
+
+
+#ifdef _UNICODE
+#if defined _M_IX86
+#pragma comment(linker,"/manifestdependency:\"type='win32' name='Microsoft.Windows.Common-Controls' version='6.0.0.0' processorArchitecture='x86' publicKeyToken='6595b64144ccf1df' language='*'\"")
+#elif defined _M_IA64
+#pragma comment(linker,"/manifestdependency:\"type='win32' name='Microsoft.Windows.Common-Controls' version='6.0.0.0' processorArchitecture='ia64' publicKeyToken='6595b64144ccf1df' language='*'\"")
+#elif defined _M_X64
+#pragma comment(linker,"/manifestdependency:\"type='win32' name='Microsoft.Windows.Common-Controls' version='6.0.0.0' processorArchitecture='amd64' publicKeyToken='6595b64144ccf1df' language='*'\"")
+#else
+#pragma comment(linker,"/manifestdependency:\"type='win32' name='Microsoft.Windows.Common-Controls' version='6.0.0.0' processorArchitecture='*' publicKeyToken='6595b64144ccf1df' language='*'\"")
+#endif
+#endif
+
+
diff --git a/voice_engine/main/test/win_test/stereo_rtp_files/HRTF_pcm16wb.rtp b/voice_engine/main/test/win_test/stereo_rtp_files/HRTF_pcm16wb.rtp
new file mode 100644
index 0000000..02abbc2
--- /dev/null
+++ b/voice_engine/main/test/win_test/stereo_rtp_files/HRTF_pcm16wb.rtp
Binary files differ
diff --git a/voice_engine/main/test/win_test/stereo_rtp_files/HRTF_pcm16wb_jitter.rtp b/voice_engine/main/test/win_test/stereo_rtp_files/HRTF_pcm16wb_jitter.rtp
new file mode 100644
index 0000000..4ed110b
--- /dev/null
+++ b/voice_engine/main/test/win_test/stereo_rtp_files/HRTF_pcm16wb_jitter.rtp
Binary files differ
diff --git a/voice_engine/main/test/win_test/stereo_rtp_files/README.txt b/voice_engine/main/test/win_test/stereo_rtp_files/README.txt
new file mode 100644
index 0000000..976ac56
--- /dev/null
+++ b/voice_engine/main/test/win_test/stereo_rtp_files/README.txt
@@ -0,0 +1,4 @@
+Use RTP Play tool with command 'rtpplay.exe -v -T -f <path>\<file.rtp> 127.0.0.1/1236' 
+Example: rtpplay.exe -v -T -f hrtf_g722_1C_48.rtp 127.0.0.1/1236.  
+This sends the stereo rtp file to port 1236.  
+You can hear the voice getting panned from left, right and center.   
diff --git a/voice_engine/main/test/win_test/stereo_rtp_files/hrtf_g722_1C_48.rtp b/voice_engine/main/test/win_test/stereo_rtp_files/hrtf_g722_1C_48.rtp
new file mode 100644
index 0000000..b96d59b
--- /dev/null
+++ b/voice_engine/main/test/win_test/stereo_rtp_files/hrtf_g722_1C_48.rtp
Binary files differ
diff --git a/voice_engine/main/test/win_test/stereo_rtp_files/hrtf_g722_1C_48_jitterT2.rtp b/voice_engine/main/test/win_test/stereo_rtp_files/hrtf_g722_1C_48_jitterT2.rtp
new file mode 100644
index 0000000..527a50a
--- /dev/null
+++ b/voice_engine/main/test/win_test/stereo_rtp_files/hrtf_g722_1C_48_jitterT2.rtp
Binary files differ
diff --git a/voice_engine/main/test/win_test/stereo_rtp_files/rtpplay.exe b/voice_engine/main/test/win_test/stereo_rtp_files/rtpplay.exe
new file mode 100755
index 0000000..6f938c8
--- /dev/null
+++ b/voice_engine/main/test/win_test/stereo_rtp_files/rtpplay.exe
Binary files differ
diff --git a/voice_engine/main/test/win_test/stereo_rtp_files/stereo_g729.rtp b/voice_engine/main/test/win_test/stereo_rtp_files/stereo_g729.rtp
new file mode 100644
index 0000000..3c36e30
--- /dev/null
+++ b/voice_engine/main/test/win_test/stereo_rtp_files/stereo_g729.rtp
Binary files differ
diff --git a/voice_engine/main/test/win_test/stereo_rtp_files/stereo_g729_jitter.rtp b/voice_engine/main/test/win_test/stereo_rtp_files/stereo_g729_jitter.rtp
new file mode 100644
index 0000000..913226c
--- /dev/null
+++ b/voice_engine/main/test/win_test/stereo_rtp_files/stereo_g729_jitter.rtp
Binary files differ
diff --git a/voice_engine/main/test/win_test/stereo_rtp_files/stereo_pcm16wb.rtp b/voice_engine/main/test/win_test/stereo_rtp_files/stereo_pcm16wb.rtp
new file mode 100644
index 0000000..729b565
--- /dev/null
+++ b/voice_engine/main/test/win_test/stereo_rtp_files/stereo_pcm16wb.rtp
Binary files differ
diff --git a/voice_engine/main/test/win_test/stereo_rtp_files/stereo_pcm16wb_jitter.rtp b/voice_engine/main/test/win_test/stereo_rtp_files/stereo_pcm16wb_jitter.rtp
new file mode 100644
index 0000000..efa2800
--- /dev/null
+++ b/voice_engine/main/test/win_test/stereo_rtp_files/stereo_pcm16wb_jitter.rtp
Binary files differ
diff --git a/voice_engine/main/test/win_test/stereo_rtp_files/stereo_pcmu.rtp b/voice_engine/main/test/win_test/stereo_rtp_files/stereo_pcmu.rtp
new file mode 100644
index 0000000..bb2d93c
--- /dev/null
+++ b/voice_engine/main/test/win_test/stereo_rtp_files/stereo_pcmu.rtp
Binary files differ
diff --git a/voice_engine/main/test/win_test/stereo_rtp_files/stereo_pcmu_jitter.rtp b/voice_engine/main/test/win_test/stereo_rtp_files/stereo_pcmu_jitter.rtp
new file mode 100644
index 0000000..fb79378
--- /dev/null
+++ b/voice_engine/main/test/win_test/stereo_rtp_files/stereo_pcmu_jitter.rtp
Binary files differ
diff --git a/voice_engine/main/test/win_test/stereo_rtp_files/stereo_pcmu_vad.rtp b/voice_engine/main/test/win_test/stereo_rtp_files/stereo_pcmu_vad.rtp
new file mode 100644
index 0000000..eebcf34
--- /dev/null
+++ b/voice_engine/main/test/win_test/stereo_rtp_files/stereo_pcmu_vad.rtp
Binary files differ
diff --git a/voice_engine/main/test/win_test/stereo_rtp_files/stereo_pcmu_vad_jitter.rtp b/voice_engine/main/test/win_test/stereo_rtp_files/stereo_pcmu_vad_jitter.rtp
new file mode 100644
index 0000000..5c368b4
--- /dev/null
+++ b/voice_engine/main/test/win_test/stereo_rtp_files/stereo_pcmu_vad_jitter.rtp
Binary files differ
diff --git a/voice_engine/main/test/win_test/stereo_rtp_files/toggling_stereo_g729_pt18_pt125.rtp b/voice_engine/main/test/win_test/stereo_rtp_files/toggling_stereo_g729_pt18_pt125.rtp
new file mode 100644
index 0000000..1f713f6
--- /dev/null
+++ b/voice_engine/main/test/win_test/stereo_rtp_files/toggling_stereo_g729_pt18_pt125.rtp
Binary files differ